python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2016 Advanced Micro Devices, Inc.
* Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "dm_services.h"
#include "dc.h"
#include "dcn20_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
#include "dcn20_hubbub.h"
#include "dcn20_mpc.h"
#include "dcn20_hubp.h"
#include "irq/dcn20/irq_service_dcn20.h"
#include "dcn20_dpp.h"
#include "dcn20_optc.h"
#include "dcn20_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_resource.h"
#include "dcn20_opp.h"
#include "dcn20_dsc.h"
#include "dcn20_link_encoder.h"
#include "dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
#include "dpcs/dpcs_2_0_0_offset.h"
#include "dpcs/dpcs_2_0_0_sh_mask.h"
#include "nbio/nbio_2_3_offset.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "reg_helper.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "vm_helper.h"
#include "link_enc_cfg.h"
#include "amdgpu_socbb.h"
#include "link.h"
#define DC_LOGGER_INIT(logger)
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
#define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
#define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#endif
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_PLL3,
DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_PLL5,
DCN20_CLK_SRC_TOTAL
};
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRI2_DWB(reg_name, block, id)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SF_DWB(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
.field_name = reg_name ## __ ## field_name ## post_fix
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
mm ## reg_name ## _ ## block ## id
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
/* MMHUB */
#define MMHUB_BASE_INNER(seg) \
MMHUB_BASE__INST0_SEG ## seg
#define MMHUB_BASE(seg) \
MMHUB_BASE_INNER(seg)
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
mmMM ## reg_name
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
}
static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(0, A),
clk_src_regs(1, B),
clk_src_regs(2, C),
clk_src_regs(3, D),
clk_src_regs(4, E),
clk_src_regs(5, F)
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCN10_REG_LIST()
};
static const struct dce_dmcu_shift dmcu_shift = {
DMCU_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dce_dmcu_mask dmcu_mask = {
DMCU_MASK_SH_LIST_DCN10(_MASK)
};
static const struct dce_abm_registers abm_regs = {
ABM_DCN20_REG_LIST()
};
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN20(_MASK)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
}
static const struct dce_audio_registers audio_regs[] = {
audio_regs(0),
audio_regs(1),
audio_regs(2),
audio_regs(3),
audio_regs(4),
audio_regs(5),
audio_regs(6),
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
#define stream_enc_regs(id)\
[id] = {\
SE_DCN2_REG_LIST(id)\
}
static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
stream_enc_regs(3),
stream_enc_regs(4),
stream_enc_regs(5),
};
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
};
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
}
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
aux_regs(2),
aux_regs(3),
aux_regs(4),
aux_regs(5)
};
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
}
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5)
};
#define link_regs(id, phyid)\
[id] = {\
LE_DCN10_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0, A),
link_regs(1, B),
link_regs(2, C),
link_regs(3, D),
link_regs(4, E),
link_regs(5, F)
};
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
DPCS_DCN2_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
DPCS_DCN2_MASK_SH_LIST(_MASK)
};
static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
{ DCN_PANEL_CNTL_REG_LIST() }
};
static const struct dce_panel_cntl_shift panel_cntl_shift = {
DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
};
static const struct dce_panel_cntl_mask panel_cntl_mask = {
DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
};
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN20(id),\
}
static const struct dcn10_ipp_registers ipp_regs[] = {
ipp_regs(0),
ipp_regs(1),
ipp_regs(2),
ipp_regs(3),
ipp_regs(4),
ipp_regs(5),
};
static const struct dcn10_ipp_shift ipp_shift = {
IPP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn10_ipp_mask ipp_mask = {
IPP_MASK_SH_LIST_DCN20(_MASK),
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN20(id),\
}
static const struct dcn20_opp_registers opp_regs[] = {
opp_regs(0),
opp_regs(1),
opp_regs(2),
opp_regs(3),
opp_regs(4),
opp_regs(5),
};
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST0(id), \
.AUXN_IMPCAL = 0, \
.AUXP_IMPCAL = 0, \
.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
tf_regs(0),
tf_regs(1),
tf_regs(2),
tf_regs(3),
tf_regs(4),
tf_regs(5),
};
static const struct dcn2_dpp_shift tf_shift = {
TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
TF_REG_LIST_SH_MASK_DCN20(_MASK),
TF_DEBUG_REG_LIST_MASK_DCN20
};
#define dwbc_regs_dcn2(id)\
[id] = {\
DWBC_COMMON_REG_LIST_DCN2_0(id),\
}
static const struct dcn20_dwbc_registers dwbc20_regs[] = {
dwbc_regs_dcn2(0),
};
static const struct dcn20_dwbc_shift dwbc20_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dcn20_dwbc_mask dwbc20_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
#define mcif_wb_regs_dcn2(id)\
[id] = {\
MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
}
static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = {
mcif_wb_regs_dcn2(0),
};
static const struct dcn20_mmhubbub_shift mcif_wb20_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dcn20_mmhubbub_mask mcif_wb20_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
static const struct dcn20_mpc_registers mpc_regs = {
MPC_REG_LIST_DCN2_0(0),
MPC_REG_LIST_DCN2_0(1),
MPC_REG_LIST_DCN2_0(2),
MPC_REG_LIST_DCN2_0(3),
MPC_REG_LIST_DCN2_0(4),
MPC_REG_LIST_DCN2_0(5),
MPC_OUT_MUX_REG_LIST_DCN2_0(0),
MPC_OUT_MUX_REG_LIST_DCN2_0(1),
MPC_OUT_MUX_REG_LIST_DCN2_0(2),
MPC_OUT_MUX_REG_LIST_DCN2_0(3),
MPC_OUT_MUX_REG_LIST_DCN2_0(4),
MPC_OUT_MUX_REG_LIST_DCN2_0(5),
MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define tg_regs(id)\
[id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
static const struct dcn_optc_registers tg_regs[] = {
tg_regs(0),
tg_regs(1),
tg_regs(2),
tg_regs(3),
tg_regs(4),
tg_regs(5)
};
static const struct dcn_optc_shift tg_shift = {
TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
#define hubp_regs(id)\
[id] = {\
HUBP_REG_LIST_DCN20(id)\
}
static const struct dcn_hubp2_registers hubp_regs[] = {
hubp_regs(0),
hubp_regs(1),
hubp_regs(2),
hubp_regs(3),
hubp_regs(4),
hubp_regs(5)
};
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN20(_MASK)
};
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN20(0)
};
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN20(_MASK)
};
#define vmid_regs(id)\
[id] = {\
DCN20_VMID_REG_LIST(id)\
}
static const struct dcn_vmid_registers vmid_regs[] = {
vmid_regs(0),
vmid_regs(1),
vmid_regs(2),
vmid_regs(3),
vmid_regs(4),
vmid_regs(5),
vmid_regs(6),
vmid_regs(7),
vmid_regs(8),
vmid_regs(9),
vmid_regs(10),
vmid_regs(11),
vmid_regs(12),
vmid_regs(13),
vmid_regs(14),
vmid_regs(15)
};
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
};
static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
};
static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
static int map_transmitter_id_to_phy_instance(
enum transmitter transmitter)
{
switch (transmitter) {
case TRANSMITTER_UNIPHY_A:
return 0;
break;
case TRANSMITTER_UNIPHY_B:
return 1;
break;
case TRANSMITTER_UNIPHY_C:
return 2;
break;
case TRANSMITTER_UNIPHY_D:
return 3;
break;
case TRANSMITTER_UNIPHY_E:
return 4;
break;
case TRANSMITTER_UNIPHY_F:
return 5;
break;
default:
ASSERT(0);
return 0;
}
}
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
dsc_regsDCN20(0),
dsc_regsDCN20(1),
dsc_regsDCN20(2),
dsc_regsDCN20(3),
dsc_regsDCN20(4),
dsc_regsDCN20(5)
};
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN2()
};
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN2(__SHIFT)
};
static const struct dccg_mask dccg_mask = {
DCCG_MASK_SH_LIST_DCN2(_MASK)
};
static const struct resource_caps res_cap_nv10 = {
.num_timing_generator = 6,
.num_opp = 6,
.num_video_plane = 6,
.num_audio = 7,
.num_stream_encoder = 6,
.num_pll = 6,
.num_dwb = 1,
.num_ddc = 6,
.num_vmid = 16,
.num_dsc = 6,
};
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
.p010 = true
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 1
},
16,
16
};
static const struct resource_caps res_cap_nv14 = {
.num_timing_generator = 5,
.num_opp = 5,
.num_video_plane = 5,
.num_audio = 6,
.num_stream_encoder = 5,
.num_pll = 5,
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 16,
.num_dsc = 5,
};
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
.max_downscale_src_width = 5120,/*upto 5K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_legacy_fast_update = true,
};
void dcn20_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
}
struct dpp *dcn20_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_dpp *dpp =
kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
if (!dpp)
return NULL;
if (dpp2_construct(dpp, ctx, inst,
&tf_regs[inst], &tf_shift, &tf_mask))
return &dpp->base;
BREAK_TO_DEBUGGER();
kfree(dpp);
return NULL;
}
struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
if (!ipp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn20_ipp_construct(ipp, ctx, inst,
&ipp_regs[inst], &ipp_shift, &ipp_mask);
return &ipp->base;
}
struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
if (!opp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn20_opp_construct(opp, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
struct dce_aux *dcn20_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
&aux_mask,
&aux_shift,
ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
i2c_inst_regs(1),
i2c_inst_regs(2),
i2c_inst_regs(3),
i2c_inst_regs(4),
i2c_inst_regs(5),
i2c_inst_regs(6),
};
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
};
static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
struct dce_i2c_hw *dcn20_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
if (!dce_i2c_hw)
return NULL;
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
return dce_i2c_hw;
}
struct mpc *dcn20_mpc_create(struct dc_context *ctx)
{
struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
GFP_ATOMIC);
if (!mpc20)
return NULL;
dcn20_mpc_construct(mpc20, ctx,
&mpc_regs,
&mpc_shift,
&mpc_mask,
6);
return &mpc20->base;
}
struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
{
int i;
struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
GFP_ATOMIC);
if (!hubbub)
return NULL;
hubbub2_construct(hubbub, ctx,
&hubbub_reg,
&hubbub_shift,
&hubbub_mask);
for (i = 0; i < res_cap_nv10.num_vmid; i++) {
struct dcn20_vmid *vmid = &hubbub->vmid[i];
vmid->ctx = ctx;
vmid->regs = &vmid_regs[i];
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
return &hubbub->base;
}
struct timing_generator *dcn20_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_ATOMIC);
if (!tgn10)
return NULL;
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
tgn10->tg_regs = &tg_regs[instance];
tgn10->tg_shift = &tg_shift;
tgn10->tg_mask = &tg_mask;
dcn20_timing_generator_init(tgn10);
return &tgn10->base;
}
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
.fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true
};
struct link_encoder *dcn20_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
int link_regs_id;
if (!enc20)
return NULL;
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
dcn20_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc20->enc10.base;
}
static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dce_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
if (!panel_cntl)
return NULL;
dce_panel_cntl_construct(panel_cntl,
init_data,
&panel_cntl_regs[init_data->inst],
&panel_cntl_shift,
&panel_cntl_mask);
return &panel_cntl->base;
}
static struct clock_source *dcn20_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
if (!clk_src)
return NULL;
if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
static struct audio *dcn20_create_audio(
struct dc_context *ctx, unsigned int inst)
{
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
struct stream_encoder *dcn20_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
if (!enc1)
return NULL;
if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
if (eng_id >= ENGINE_ID_DIGD)
eng_id++;
}
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
return &enc1->base;
}
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN2_REG_LIST()
};
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN2_MASK_SH_LIST(__SHIFT)
};
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN2_MASK_SH_LIST(_MASK)
};
struct dce_hwseq *dcn20_hwseq_create(
struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
}
return hws;
}
static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn20_create_audio,
.create_stream_encoder = dcn20_stream_encoder_create,
.create_hwseq = dcn20_hwseq_create,
};
static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
void dcn20_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
}
struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
if (!dsc) {
BREAK_TO_DEBUGGER();
return NULL;
}
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
return &dsc->base;
}
void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
{
kfree(container_of(*dsc, struct dcn20_dsc, base));
*dsc = NULL;
}
static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
{
unsigned int i;
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
if (pool->base.hubbub != NULL) {
kfree(pool->base.hubbub);
pool->base.hubbub = NULL;
}
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.dpps[i] != NULL)
dcn20_dpp_destroy(&pool->base.dpps[i]);
if (pool->base.ipps[i] != NULL)
pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
if (pool->base.hubps[i] != NULL) {
kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
pool->base.hubps[i] = NULL;
}
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
}
if (pool->base.sw_i2cs[i] != NULL) {
kfree(pool->base.sw_i2cs[i]);
pool->base.sw_i2cs[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.timing_generators[i] != NULL) {
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
if (pool->base.dwbc[i] != NULL) {
kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
pool->base.dwbc[i] = NULL;
}
if (pool->base.mcif_wb[i] != NULL) {
kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
pool->base.mcif_wb[i] = NULL;
}
}
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] != NULL) {
dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
pool->base.clock_sources[i] = NULL;
}
}
if (pool->base.dp_clock_source != NULL) {
dcn20_clock_source_destroy(&pool->base.dp_clock_source);
pool->base.dp_clock_source = NULL;
}
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.pp_smu != NULL)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
if (pool->base.oem_device != NULL) {
struct dc *dc = pool->base.oem_device->ctx->dc;
dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
}
}
struct hubp *dcn20_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
if (!hubp2)
return NULL;
if (hubp2_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
BREAK_TO_DEBUGGER();
kfree(hubp2);
return NULL;
}
static void get_pixel_clock_parameters(
struct pipe_ctx *pipe_ctx,
struct pixel_clk_params *pixel_clk_params)
{
const struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
struct dc_link *link = stream->link;
struct link_encoder *link_enc = NULL;
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dce_hwseq *hws = dc->hwseq;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
pixel_clk_params->encoder_object_id = link_enc->id;
pixel_clk_params->signal_type = pipe_ctx->stream->signal;
pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
/* TODO: un-hardcode*/
/* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */
pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
LINK_RATE_REF_FREQ_IN_KHZ;
pixel_clk_params->flags.ENABLE_SS = 0;
pixel_clk_params->color_depth =
stream->timing.display_color_depth;
pixel_clk_params->flags.DISPLAY_BLANKED = 1;
pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
pixel_clk_params->color_depth = COLOR_DEPTH_888;
if (opp_cnt == 4)
pixel_clk_params->requested_pix_clk_100hz /= 4;
else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
pixel_clk_params->requested_pix_clk_100hz /= 2;
else if (hws->funcs.is_dp_dig_pixel_rate_div_policy) {
if (hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx))
pixel_clk_params->requested_pix_clk_100hz /= 2;
}
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
pixel_clk_params->requested_pix_clk_100hz *= 2;
}
static void build_clamping_params(struct dc_stream_state *stream)
{
stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
stream->clamping.c_depth = stream->timing.display_color_depth;
stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
}
static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
{
get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
&pipe_ctx->pll_settings);
pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
resource_build_bit_depth_reduction_params(pipe_ctx->stream,
&pipe_ctx->stream->bit_depth_params);
build_clamping_params(pipe_ctx->stream);
return DC_OK;
}
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
{
enum dc_status status = DC_OK;
struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
status = build_pipe_hw_param(pipe_ctx);
return status;
}
void dcn20_acquire_dsc(const struct dc *dc,
struct resource_context *res_ctx,
struct display_stream_compressor **dsc,
int pipe_idx)
{
int i;
const struct resource_pool *pool = dc->res_pool;
struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
*dsc = NULL;
/* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
*dsc = pool->dscs[pipe_idx];
res_ctx->is_dsc_acquired[pipe_idx] = true;
return;
}
/* Return old DSC to avoid the need for re-programming */
if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
*dsc = dsc_old;
res_ctx->is_dsc_acquired[dsc_old->inst] = true;
return ;
}
/* Find first free DSC */
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (!res_ctx->is_dsc_acquired[i]) {
*dsc = pool->dscs[i];
res_ctx->is_dsc_acquired[i] = true;
break;
}
}
void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
struct display_stream_compressor **dsc)
{
int i;
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (pool->dscs[i] == *dsc) {
res_ctx->is_dsc_acquired[i] = false;
*dsc = NULL;
break;
}
}
enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
struct dc_state *dc_ctx,
struct dc_stream_state *dc_stream)
{
enum dc_status result = DC_OK;
int i;
/* Get a DSC if required and available */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
if (pipe_ctx->top_pipe)
continue;
if (pipe_ctx->stream != dc_stream)
continue;
if (pipe_ctx->stream_res.dsc)
continue;
dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
result = DC_NO_DSC_RESOURCE;
}
break;
}
return result;
}
static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *dc_stream)
{
struct pipe_ctx *pipe_ctx = NULL;
int i;
for (i = 0; i < MAX_PIPES; i++) {
if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream_res.dsc)
dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
}
}
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
else
return DC_OK;
}
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
result = resource_map_pool_resources(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
/* Get a DSC if required and available */
if (result == DC_OK && dc_stream->timing.flags.DSC)
result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
return result;
}
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
{
enum dc_status result = DC_OK;
result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
return result;
}
/**
* dcn20_split_stream_for_odm - Check if stream can be splited for ODM
*
* @dc: DC object with resource pool info required for pipe split
* @res_ctx: Persistent state of resources
* @prev_odm_pipe: Reference to the previous ODM pipe
* @next_odm_pipe: Reference to the next ODM pipe
*
* This function takes a logically active pipe and a logically free pipe and
* halves all the scaling parameters that need to be halved while populating
* the free pipe with the required resources and configuring the next/previous
* ODM pipe pointers.
*
* Return:
* Return true if split stream for ODM is possible, otherwise, return false.
*/
bool dcn20_split_stream_for_odm(
const struct dc *dc,
struct resource_context *res_ctx,
struct pipe_ctx *prev_odm_pipe,
struct pipe_ctx *next_odm_pipe)
{
int pipe_idx = next_odm_pipe->pipe_idx;
const struct resource_pool *pool = dc->res_pool;
*next_odm_pipe = *prev_odm_pipe;
next_odm_pipe->pipe_idx = pipe_idx;
next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
next_odm_pipe->stream_res.dsc = NULL;
if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
}
if (prev_odm_pipe->top_pipe && prev_odm_pipe->top_pipe->next_odm_pipe) {
prev_odm_pipe->top_pipe->next_odm_pipe->bottom_pipe = next_odm_pipe;
next_odm_pipe->top_pipe = prev_odm_pipe->top_pipe->next_odm_pipe;
}
if (prev_odm_pipe->bottom_pipe && prev_odm_pipe->bottom_pipe->next_odm_pipe) {
prev_odm_pipe->bottom_pipe->next_odm_pipe->top_pipe = next_odm_pipe;
next_odm_pipe->bottom_pipe = prev_odm_pipe->bottom_pipe->next_odm_pipe;
}
prev_odm_pipe->next_odm_pipe = next_odm_pipe;
next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
if (prev_odm_pipe->plane_state) {
struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
int new_width;
/* HACTIVE halved for odm combine */
sd->h_active /= 2;
/* Calculate new vp and recout for left pipe */
/* Need at least 16 pixels width per side */
if (sd->recout.x + 16 >= sd->h_active)
return false;
new_width = sd->h_active - sd->recout.x;
sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
sd->ratios.horz, sd->recout.width - new_width));
sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
sd->ratios.horz_c, sd->recout.width - new_width));
sd->recout.width = new_width;
/* Calculate new vp and recout for right pipe */
sd = &next_odm_pipe->plane_res.scl_data;
/* HACTIVE halved for odm combine */
sd->h_active /= 2;
/* Need at least 16 pixels width per side */
if (new_width <= 16)
return false;
new_width = sd->recout.width + sd->recout.x - sd->h_active;
sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
sd->ratios.horz, sd->recout.width - new_width));
sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
sd->ratios.horz_c, sd->recout.width - new_width));
sd->recout.width = new_width;
sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
sd->ratios.horz, sd->h_active - sd->recout.x));
sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
sd->ratios.horz_c, sd->h_active - sd->recout.x));
sd->recout.x = 0;
}
if (!next_odm_pipe->top_pipe)
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
else
next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp;
if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) {
dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
return false;
}
return true;
}
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
struct pipe_ctx *secondary_pipe)
{
int pipe_idx = secondary_pipe->pipe_idx;
struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
*secondary_pipe = *primary_pipe;
secondary_pipe->bottom_pipe = sec_bot_pipe;
secondary_pipe->pipe_idx = pipe_idx;
secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
secondary_pipe->stream_res.dsc = NULL;
if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
ASSERT(!secondary_pipe->bottom_pipe);
secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
}
primary_pipe->bottom_pipe = secondary_pipe;
secondary_pipe->top_pipe = primary_pipe;
ASSERT(primary_pipe->plane_state);
}
unsigned int dcn20_calc_max_scaled_time(
unsigned int time_per_pixel,
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark)
{
unsigned int time_per_byte = 0;
unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */
unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */
unsigned int small_free_entry, max_free_entry;
unsigned int buf_lh_capability;
unsigned int max_scaled_time;
if (mode == PACKED_444) /* packed mode */
time_per_byte = time_per_pixel/4;
else if (mode == PLANAR_420_8BPC)
time_per_byte = time_per_pixel;
else if (mode == PLANAR_420_10BPC) /* p010 */
time_per_byte = time_per_pixel * 819/1024;
if (time_per_byte == 0)
time_per_byte = 1;
small_free_entry = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry;
max_free_entry = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
max_scaled_time = buf_lh_capability - urgent_watermark;
return max_scaled_time;
}
void dcn20_set_mcif_arb_params(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
enum mmhubbub_wbif_mode wbif_mode;
struct mcif_arb_params *wb_arb_params;
int i, j, dwb_pipe;
/* Writeback MCIF_WB arbitration parameters */
dwb_pipe = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
for (j = 0; j < MAX_DWB_PIPES; j++) {
if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false)
continue;
//wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) {
if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
wbif_mode = PLANAR_420_8BPC;
else
wbif_mode = PLANAR_420_10BPC;
} else
wbif_mode = PACKED_444;
DC_FP_START();
dcn20_fpu_set_wb_arb_params(wb_arb_params, context, pipes, pipe_cnt, i);
DC_FP_END();
wb_arb_params->slice_lines = 32;
wb_arb_params->arbitration_slice = 2;
wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
wbif_mode,
wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
dwb_pipe++;
if (dwb_pipe >= MAX_DWB_PIPES)
return;
}
if (dwb_pipe >= MAX_DWB_PIPES)
return;
}
}
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
{
int i;
/* Validate DSC config, dsc count validation is already done */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx->stream;
struct dsc_config dsc_cfg;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
/* Only need to validate top pipe */
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
continue;
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
+ stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
+ stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
return false;
}
return true;
}
struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
{
struct pipe_ctx *secondary_pipe = NULL;
if (dc && primary_pipe) {
int j;
int preferred_pipe_idx = 0;
/* first check the prev dc state:
* if this primary pipe has a bottom pipe in prev. state
* and if the bottom pipe is still available (which it should be),
* pick that pipe as secondary
* Same logic applies for ODM pipes
*/
if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
}
if (secondary_pipe == NULL &&
dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
}
/*
* if this primary pipe does not have a bottom pipe in prev. state
* start backward and find a pipe that did not used to be a bottom pipe in
* prev. dc state. This way we make sure we keep the same assignment as
* last state and will not have to reprogram every pipe
*/
if (secondary_pipe == NULL) {
for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
&& dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
preferred_pipe_idx = j;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
break;
}
}
}
}
/*
* We should never hit this assert unless assignments are shuffled around
* if this happens we will prob. hit a vsync tdr
*/
ASSERT(secondary_pipe);
/*
* search backwards for the second pipe to keep pipe
* assignment more consistent
*/
if (secondary_pipe == NULL) {
for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
preferred_pipe_idx = j;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
break;
}
}
}
}
return secondary_pipe;
}
void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context)
{
int i;
/* merge previously split odm pipes since mode support needs to make the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *odm_pipe = pipe->next_odm_pipe;
if (pipe->prev_odm_pipe)
continue;
pipe->next_odm_pipe = NULL;
while (odm_pipe) {
struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
odm_pipe->plane_state = NULL;
odm_pipe->stream = NULL;
odm_pipe->top_pipe = NULL;
odm_pipe->bottom_pipe = NULL;
odm_pipe->prev_odm_pipe = NULL;
odm_pipe->next_odm_pipe = NULL;
if (odm_pipe->stream_res.dsc)
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
/* Clear plane_res and stream_res */
memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
odm_pipe = next_odm_pipe;
}
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
/* merge previously mpc split pipes since mode support needs to make the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
continue;
pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
if (hsplit_pipe->bottom_pipe)
hsplit_pipe->bottom_pipe->top_pipe = pipe;
hsplit_pipe->plane_state = NULL;
hsplit_pipe->stream = NULL;
hsplit_pipe->top_pipe = NULL;
hsplit_pipe->bottom_pipe = NULL;
/* Clear plane_res and stream_res */
memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
}
int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
int *split,
bool *merge)
{
int i, pipe_idx, vlevel_split;
int plane_count = 0;
bool force_split = false;
bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
struct vba_vars_st *v = &context->bw_ctx.dml.vba;
int max_mpc_comb = v->maxMpcComb;
if (context->stream_count > 1) {
if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
avoid_split = true;
} else if (dc->debug.force_single_disp_pipe_split)
force_split = true;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
/**
* Workaround for avoiding pipe-split in cases where we'd split
* planes that are too small, resulting in splits that aren't
* valid for the scaler.
*/
if (pipe->plane_state &&
(pipe->plane_state->dst_rect.width <= 16 ||
pipe->plane_state->dst_rect.height <= 16 ||
pipe->plane_state->src_rect.width <= 16 ||
pipe->plane_state->src_rect.height <= 16))
avoid_split = true;
/* TODO: fix dc bugs and remove this split threshold thing */
if (pipe->stream && !pipe->prev_odm_pipe &&
(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
++plane_count;
}
if (plane_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
/* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct dc_crtc_timing timing;
if (!pipe->stream)
continue;
else {
timing = pipe->stream->timing;
if (timing.h_border_left + timing.h_border_right
+ timing.v_border_top + timing.v_border_bottom > 0) {
avoid_split = true;
break;
}
}
}
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
if (avoid_split) {
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
v->ModeSupport[vlevel][0])
break;
/* Impossible to not split this pipe */
if (vlevel > context->bw_ctx.dml.soc.num_states)
vlevel = vlevel_split;
else
max_mpc_comb = 0;
pipe_idx++;
}
v->maxMpcComb = max_mpc_comb;
}
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
int pipe_plane = v->pipe_plane[pipe_idx];
bool split4mpc = context->stream_count == 1 && plane_count == 1
&& dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
split[i] = 4;
else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
split[i] = 2;
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
pipe->stream->view_format ==
VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
(pipe->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
pipe->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE))
split[i] = 2;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = 2;
v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
split[i] = 4;
v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
}
/*420 format workaround*/
if (pipe->stream->timing.h_addressable > 7680 &&
pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
split[i] = 4;
}
v->ODMCombineEnabled[pipe_plane] =
v->ODMCombineEnablePerState[vlevel][pipe_plane];
if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
if (resource_get_num_mpc_splits(pipe) == 1) {
/*If need split for mpc but 2 way split already*/
if (split[i] == 4)
split[i] = 2; /* 2 -> 4 MPC */
else if (split[i] == 2)
split[i] = 0; /* 2 -> 2 MPC */
else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
merge[i] = true; /* 2 -> 1 MPC */
} else if (resource_get_num_mpc_splits(pipe) == 3) {
/*If need split for mpc but 4 way split already*/
if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
|| !pipe->bottom_pipe)) {
merge[i] = true; /* 4 -> 2 MPC */
} else if (split[i] == 0 && pipe->top_pipe &&
pipe->top_pipe->plane_state == pipe->plane_state)
merge[i] = true; /* 4 -> 1 MPC */
split[i] = 0;
} else if (resource_get_num_odm_splits(pipe)) {
/* ODM -> MPC transition */
if (pipe->prev_odm_pipe) {
split[i] = 0;
merge[i] = true;
}
}
} else {
if (resource_get_num_odm_splits(pipe) == 1) {
/*If need split for odm but 2 way split already*/
if (split[i] == 4)
split[i] = 2; /* 2 -> 4 ODM */
else if (split[i] == 2)
split[i] = 0; /* 2 -> 2 ODM */
else if (pipe->prev_odm_pipe) {
ASSERT(0); /* NOT expected yet */
merge[i] = true; /* exit ODM */
}
} else if (resource_get_num_odm_splits(pipe) == 3) {
/*If need split for odm but 4 way split already*/
if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
|| !pipe->next_odm_pipe)) {
merge[i] = true; /* 4 -> 2 ODM */
} else if (split[i] == 0 && pipe->prev_odm_pipe) {
ASSERT(0); /* NOT expected yet */
merge[i] = true; /* exit ODM */
}
split[i] = 0;
} else if (resource_get_num_mpc_splits(pipe)) {
/* MPC -> ODM transition */
ASSERT(0); /* NOT expected yet */
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
split[i] = 0;
merge[i] = true;
}
}
}
/* Adjust dppclk when split is forced, do not bother with dispclk */
if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) {
DC_FP_START();
dcn20_fpu_adjust_dppclk(v, vlevel, max_mpc_comb, pipe_idx, false);
DC_FP_END();
}
pipe_idx++;
}
return vlevel;
}
bool dcn20_fast_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
bool fast_validate)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
if (!pipes)
return false;
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
if (!pipe_cnt) {
out = true;
goto validate_out;
}
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
pipe_split_from[i] = -1;
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
if (!pipe->stream || pipe_split_from[i] >= 0)
continue;
pipe_idx++;
if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
if (!dcn20_split_stream_for_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
dcn20_build_mapped_resource(dc, context, pipe->stream);
}
if (!pipe->plane_state)
continue;
/* Skip 2nd half of already split pipe */
if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
continue;
/* We do not support mpo + odm at the moment */
if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
if (split[i] == 2) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
if (!hsplit_pipe) {
DC_FP_START();
dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true);
DC_FP_END();
continue;
}
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
dcn20_build_mapped_resource(dc, context, pipe->stream);
} else {
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe);
resource_build_scaling_params(pipe);
resource_build_scaling_params(hsplit_pipe);
}
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
/* merge should already have been done */
ASSERT(0);
}
}
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
}
*vlevel_out = vlevel;
out = true;
goto validate_out;
validate_fail:
out = false;
validate_out:
return out;
}
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
DC_FP_START();
voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
DC_FP_END();
return voltage_supported;
}
struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *opp_head)
{
struct resource_context *res_ctx = &new_ctx->res_ctx;
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream);
struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master);
ASSERT(otg_master);
if (!sec_dpp_pipe)
return NULL;
sec_dpp_pipe->stream = opp_head->stream;
sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg;
sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp;
sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx];
sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx];
sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx];
sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst;
return sec_dpp_pipe;
}
bool dcn20_get_dcc_compression_cap(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
dc->res_pool->hubbub,
input,
output);
}
static void dcn20_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
dcn20_resource_destruct(dcn20_pool);
kfree(dcn20_pool);
*pool = NULL;
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S;
if (bpp == 64)
plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D;
return DC_OK;
}
static const struct resource_funcs dcn20_res_pool_funcs = {
.destroy = dcn20_destroy_resource_pool,
.link_enc_create = dcn20_link_encoder_create,
.panel_cntl_create = dcn20_panel_cntl_create,
.validate_bandwidth = dcn20_validate_bandwidth,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
GFP_KERNEL);
if (!dwbc20) {
dm_error("DC: failed to create dwbc20!\n");
return false;
}
dcn20_dwbc_construct(dwbc20, ctx,
&dwbc20_regs[i],
&dwbc20_shift,
&dwbc20_mask,
i);
pool->dwbc[i] = &dwbc20->base;
}
return true;
}
bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
ASSERT(pipe_count > 0);
for (i = 0; i < pipe_count; i++) {
struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub),
GFP_KERNEL);
if (!mcif_wb20) {
dm_error("DC: failed to create mcif_wb20!\n");
return false;
}
dcn20_mmhubbub_construct(mcif_wb20, ctx,
&mcif_wb20_regs[i],
&mcif_wb20_shift,
&mcif_wb20_mask,
i);
pool->mcif_wb[i] = &mcif_wb20->base;
}
return true;
}
static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
if (!pp_smu)
return pp_smu;
dm_pp_get_funcs(ctx, pp_smu);
if (pp_smu->ctx.ver != PP_SMU_VER_NV)
pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
return pp_smu;
}
static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
*pp_smu = NULL;
}
}
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
uint32_t hw_internal_rev)
{
if (ASICREV_IS_NAVI14_M(hw_internal_rev))
return &dcn2_0_nv14_soc;
if (ASICREV_IS_NAVI12_P(hw_internal_rev))
return &dcn2_0_nv12_soc;
return &dcn2_0_soc;
}
static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
uint32_t hw_internal_rev)
{
/* NV14 */
if (ASICREV_IS_NAVI14_M(hw_internal_rev))
return &dcn2_0_nv14_ip;
/* NV12 and NV10 */
return &dcn2_0_ip;
}
static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
{
return DML_PROJECT_NAVI10v2;
}
static bool init_soc_bounding_box(struct dc *dc,
struct dcn20_resource_pool *pool)
{
struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
struct _vcs_dpi_ip_params_st *loaded_ip =
get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
DC_LOGGER_INIT(dc->ctx->logger);
if (pool->base.pp_smu) {
struct pp_smu_nv_clock_table max_clocks = {0};
unsigned int uclk_states[8] = {0};
unsigned int num_states = 0;
enum pp_smu_status status;
bool clock_limits_available = false;
bool uclk_states_available = false;
if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
(&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
uclk_states_available = (status == PP_SMU_RESULT_OK);
}
if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
(&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
/* SMU cannot set DCF clock to anything equal to or higher than SOC clock
*/
if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
clock_limits_available = (status == PP_SMU_RESULT_OK);
}
if (clock_limits_available && uclk_states_available && num_states) {
DC_FP_START();
dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
DC_FP_END();
} else if (clock_limits_available) {
DC_FP_START();
dcn20_cap_soc_clocks(loaded_bb, max_clocks);
DC_FP_END();
}
}
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
DC_FP_END();
return true;
}
static bool dcn20_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn20_resource_pool *pool)
{
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
struct ddc_service_init_data ddc_init_data = {0};
struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
struct _vcs_dpi_ip_params_st *loaded_ip =
get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev);
enum dml_project dml_project_version =
get_dml_project_version(ctx->asic_id.hw_internal_rev);
ctx->dc_bios->regs = &bios_regs;
pool->base.funcs = &dcn20_res_pool_funcs;
if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
pool->base.res_cap = &res_cap_nv14;
pool->base.pipe_count = 5;
pool->base.mpcc_count = 5;
} else {
pool->base.res_cap = &res_cap_nv10;
pool->base.pipe_count = 6;
pool->base.mpcc_count = 6;
}
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 0;
dc->caps.color.dpp.icsc = 1;
dc->caps.color.dpp.dgam_ram = 1;
dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.dgam_rom_caps.pq = 0;
dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
dc->caps.color.dpp.post_csc = 0;
dc->caps.color.dpp.gamma_corr = 0;
dc->caps.color.dpp.dgam_rom_for_yuv = 1;
dc->caps.color.dpp.hw_3d_lut = 1;
dc->caps.color.dpp.ogam_ram = 1;
// no OGAM ROM on DCN2, only MPC ROM
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.ogam_rom_caps.pq = 0;
dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
dc->caps.color.dpp.ocsc = 0;
dc->caps.color.mpc.gamut_remap = 0;
dc->caps.color.mpc.num_3dluts = 0;
dc->caps.color.mpc.shared_3d_lut = 0;
dc->caps.color.mpc.ogam_ram = 1;
dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
//dcn2.0x
dc->work_arounds.dedcn20_305_wa = true;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
/*************************************************
* Create resources *
*************************************************/
pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
dcn20_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
dcn20_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
dcn20_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
dcn20_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
dcn20_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
dcn20_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL5,
&clk_src_regs[5], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
/* todo: not reuse phy_pll registers */
pool->base.dp_clock_source =
dcn20_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_ID_DP_DTO,
&clk_src_regs[0], true);
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
if (pool->base.dccg == NULL) {
dm_error("DC: failed to create dccg!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
pool->base.dmcu = dcn20_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
&dmcu_mask);
if (pool->base.dmcu == NULL) {
dm_error("DC: failed to create dmcu!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
&abm_mask);
if (pool->base.abm == NULL) {
dm_error("DC: failed to create abm!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
pool->base.pp_smu = dcn20_pp_smu_create(ctx);
if (!init_soc_bounding_box(dc, pool)) {
dm_error("DC: failed to initialize soc bounding box!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version);
if (!dc->debug.disable_pplib_wm_range) {
struct pp_smu_wm_range_sets ranges = {0};
int i = 0;
ranges.num_reader_wm_sets = 0;
if (loaded_bb->num_states == 1) {
ranges.reader_wm_sets[0].wm_inst = i;
ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
ranges.num_reader_wm_sets = 1;
} else if (loaded_bb->num_states > 1) {
for (i = 0; i < 4 && i < loaded_bb->num_states; i++) {
ranges.reader_wm_sets[i].wm_inst = i;
ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
DC_FP_START();
dcn20_fpu_set_wm_ranges(i, &ranges, loaded_bb);
DC_FP_END();
ranges.num_reader_wm_sets = i + 1;
}
ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
}
ranges.num_writer_wm_sets = 1;
ranges.writer_wm_sets[0].wm_inst = 0;
ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
}
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
if (!pool->base.irqs)
goto create_fail;
/* mem input -> ipp -> dpp -> opp -> TG */
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create memory input!\n");
goto create_fail;
}
pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
if (pool->base.ipps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create input pixel processor!\n");
goto create_fail;
}
pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
if (pool->base.dpps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto create_fail;
}
pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create hw i2c!!\n");
goto create_fail;
}
pool->base.sw_i2cs[i] = NULL;
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool->base.opps[i] = dcn20_opp_create(ctx, i);
if (pool->base.opps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.timing_generators[i] = dcn20_timing_generator_create(
ctx, i);
if (pool->base.timing_generators[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto create_fail;
}
}
pool->base.timing_generator_count = i;
pool->base.mpc = dcn20_mpc_create(ctx);
if (pool->base.mpc == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mpc!\n");
goto create_fail;
}
pool->base.hubbub = dcn20_hubbub_create(ctx);
if (pool->base.hubbub == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create hubbub!\n");
goto create_fail;
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create display stream compressor %d!\n", i);
goto create_fail;
}
}
if (!dcn20_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create dwbc!\n");
goto create_fail;
}
if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mcif_wb!\n");
goto create_fail;
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto create_fail;
dcn20_hw_sequencer_construct(dc);
// IF NV12, set PG function pointer to NULL. It's not that
// PG isn't supported for NV12, it's that we don't want to
// program the registers because that will cause more power
// to be consumed. We could have created dcn20_init_hw to get
// the same effect by checking ASIC rev, but there was a
// request at some point to not check ASIC rev on hw sequencer.
if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
dc->hwseq->funcs.enable_power_gating_plane = NULL;
dc->debug.disable_dpp_power_gate = true;
dc->debug.disable_hubp_power_gate = true;
}
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->cap_funcs = cap_funcs;
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
ddc_init_data.ctx = dc->ctx;
ddc_init_data.link = NULL;
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
return true;
create_fail:
dcn20_resource_destruct(pool);
return false;
}
struct resource_pool *dcn20_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn20_resource_pool *pool =
kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
if (!pool)
return NULL;
if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
kfree(pool);
return NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "reg_helper.h"
#include "core_types.h"
#include "dcn20_dccg.h"
#define TO_DCN_DCCG(dccg)\
container_of(dccg, struct dcn_dccg, base)
#define REG(reg) \
(dccg_dcn->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
#define CTX \
dccg_dcn->base.ctx
#define DC_LOGGER \
dccg->ctx->logger
void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
int modulo, phase;
// phase / modulo = dpp pipe clk / dpp global clk
modulo = 0xff; // use FF at the end
phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
if (phase > 0xff) {
ASSERT(false);
phase = 0xff;
}
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, phase,
DPPCLK0_DTO_MODULO, modulo);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t clk_en = 0;
uint32_t clk_sel = 0;
REG_GET_2(REFCLK_CNTL, REFCLK_CLOCK_EN, &clk_en, REFCLK_SRC_SEL, &clk_sel);
if (clk_en != 0) {
// DCN20 has never been validated for non-xtalin as reference
// frequency. There's actually no way for DC to determine what
// frequency a non-xtalin source is.
ASSERT_CRITICAL(false);
}
*dccg_ref_freq_inKhz = xtalin_freq_inKhz;
return;
}
void dccg2_set_fifo_errdet_ovr_en(struct dccg *dccg,
bool en)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
REG_UPDATE(DISPCLK_FREQ_CHANGE_CNTL,
DCCG_FIFO_ERRDET_OVR_EN, en ? 1 : 0);
}
void dccg2_otg_add_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[otg_inst],
OTG_ADD_PIXEL[otg_inst], 0,
OTG_DROP_PIXEL[otg_inst], 0);
REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
OTG_ADD_PIXEL[otg_inst], 1);
}
void dccg2_otg_drop_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[otg_inst],
OTG_ADD_PIXEL[otg_inst], 0,
OTG_DROP_PIXEL[otg_inst], 0);
REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
OTG_DROP_PIXEL[otg_inst], 1);
}
void dccg2_init(struct dccg *dccg)
{
}
static const struct dccg_funcs dccg2_funcs = {
.update_dpp_dto = dccg2_update_dpp_dto,
.get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
.set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
.otg_add_pixel = dccg2_otg_add_pixel,
.otg_drop_pixel = dccg2_otg_drop_pixel,
.dccg_init = dccg2_init
};
struct dccg *dccg2_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
struct dccg *base;
if (dccg_dcn == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
base = &dccg_dcn->base;
base->ctx = ctx;
base->funcs = &dccg2_funcs;
dccg_dcn->regs = regs;
dccg_dcn->dccg_shift = dccg_shift;
dccg_dcn->dccg_mask = dccg_mask;
return &dccg_dcn->base;
}
void dcn_dccg_destroy(struct dccg **dccg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(*dccg);
kfree(dccg_dcn);
*dccg = NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dcn20_optc.h"
#include "dc.h"
#define REG(reg)\
optc1->tg_regs->reg
#define CTX \
optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
/**
* optc2_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator.
*
* @optc: timing_generator instance.
*
* Return: If CRTC is enabled, return true.
*
*/
bool optc2_enable_crtc(struct timing_generator *optc)
{
/* TODO FPGA wait for answer
* OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
* OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
*/
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* opp instance for OTG. For DCN1.0, ODM is remoed.
* OPP and OPTC should 1:1 mapping
*/
REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, optc->inst);
/* VTG enable first is for HW workaround */
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
REG_SEQ_START();
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
REG_SEQ_SUBMIT();
REG_SEQ_WAIT_DONE();
return true;
}
/**
* optc2_set_gsl() - Assign OTG to GSL groups,
* set one of the OTGs to be master & rest are slaves
*
* @optc: timing_generator instance.
* @params: pointer to gsl_params
*/
void optc2_set_gsl(struct timing_generator *optc,
const struct gsl_params *params)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/*
* There are (MAX_OPTC+1)/2 gsl groups available for use.
* In each group (assign an OTG to a group by setting OTG_GSLX_EN = 1,
* set one of the OTGs to be the master (OTG_GSL_MASTER_EN = 1) and the rest are slaves.
*/
REG_UPDATE_5(OTG_GSL_CONTROL,
OTG_GSL0_EN, params->gsl0_en,
OTG_GSL1_EN, params->gsl1_en,
OTG_GSL2_EN, params->gsl2_en,
OTG_GSL_MASTER_EN, params->gsl_master_en,
OTG_GSL_MASTER_MODE, params->gsl_master_mode);
}
void optc2_set_gsl_source_select(
struct timing_generator *optc,
int group_idx,
uint32_t gsl_ready_signal)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
switch (group_idx) {
case 1:
REG_UPDATE(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, gsl_ready_signal);
break;
case 2:
REG_UPDATE(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, gsl_ready_signal);
break;
case 3:
REG_UPDATE(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, gsl_ready_signal);
break;
default:
break;
}
}
/* Set DSC-related configuration.
* dsc_mode: 0 disables DSC, other values enable DSC in specified format
* sc_bytes_per_pixel: Bytes per pixel in u3.28 format
* dsc_slice_width: Slice width in pixels
*/
void optc2_set_dsc_config(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE(OPTC_DATA_FORMAT_CONTROL,
OPTC_DSC_MODE, dsc_mode);
REG_SET(OPTC_BYTES_PER_PIXEL, 0,
OPTC_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
}
/* Get DSC-related configuration.
* dsc_mode: 0 disables DSC, other values enable DSC in specified format
*/
void optc2_get_dsc_status(struct timing_generator *optc,
uint32_t *dsc_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OPTC_DATA_FORMAT_CONTROL,
OPTC_DSC_MODE, dsc_mode);
}
/*TEMP: Need to figure out inheritance model here.*/
bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
return optc1_is_two_pixels_per_containter(timing);
}
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t h_div_2 = 0;
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 0,
OPTC_SEG0_SRC_SEL, optc->inst,
OPTC_SEG1_SRC_SEL, 0xf);
REG_WRITE(OTG_H_TIMING_CNTL, 0);
h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div_2);
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, 0);
optc1->opp_count = 1;
}
void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
/ opp_cnt;
uint32_t memory_mask;
ASSERT(opp_cnt == 2);
/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
* REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
* Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
* REG_SET_2(OTG_GLOBAL_CONTROL1, 0,
* MASTER_UPDATE_LOCK_DB_X, 160,
* MASTER_UPDATE_LOCK_DB_Y, 240);
*/
/* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
* however, for ODM combine we can simplify by always using 4.
* To make sure there's no overlap, each instance "reserves" 2 memories and
* they are uniquely combined here.
*/
memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
if (REG(OPTC_MEMORY_CONFIG))
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, memory_mask);
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
OPTC_SEG0_SRC_SEL, opp_id[0],
OPTC_SEG1_SRC_SEL, opp_id[1]);
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_BY2, 1);
optc1->opp_count = opp_cnt;
}
void optc2_get_optc_source(struct timing_generator *optc,
uint32_t *num_of_src_opp,
uint32_t *src_opp_id_0,
uint32_t *src_opp_id_1)
{
uint32_t num_of_input_segments;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET_3(OPTC_DATA_SOURCE_SELECT,
OPTC_NUM_OF_INPUT_SEGMENT, &num_of_input_segments,
OPTC_SEG0_SRC_SEL, src_opp_id_0,
OPTC_SEG1_SRC_SEL, src_opp_id_1);
if (num_of_input_segments == 1)
*num_of_src_opp = 2;
else
*num_of_src_opp = 1;
/* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
if (*src_opp_id_1 == 0xf)
*num_of_src_opp = 1;
}
static void optc2_set_dwb_source(struct timing_generator *optc,
uint32_t dwb_pipe_inst)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (dwb_pipe_inst == 0)
REG_UPDATE(DWB_SOURCE_SELECT,
OPTC_DWB0_SOURCE_SELECT, optc->inst);
else if (dwb_pipe_inst == 1)
REG_UPDATE(DWB_SOURCE_SELECT,
OPTC_DWB1_SOURCE_SELECT, optc->inst);
}
static void optc2_align_vblanks(
struct timing_generator *optc_master,
struct timing_generator *optc_slave,
uint32_t master_pixel_clock_100Hz,
uint32_t slave_pixel_clock_100Hz,
uint8_t master_clock_divider,
uint8_t slave_clock_divider)
{
/* accessing slave OTG registers */
struct optc *optc1 = DCN10TG_FROM_TG(optc_slave);
uint32_t master_v_active = 0;
uint32_t master_h_total = 0;
uint32_t slave_h_total = 0;
uint64_t L, XY;
uint32_t X, Y, p = 10000;
uint32_t master_update_lock;
/* disable slave OTG */
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
/* wait until disabled */
REG_WAIT(OTG_CONTROL,
OTG_CURRENT_MASTER_EN_STATE,
0, 10, 5000);
REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &slave_h_total);
/* assign slave OTG to be controlled by master update lock */
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc_master->inst);
/* accessing master OTG registers */
optc1 = DCN10TG_FROM_TG(optc_master);
/* saving update lock state, not sure if it's needed */
REG_GET(OTG_MASTER_UPDATE_LOCK,
OTG_MASTER_UPDATE_LOCK, &master_update_lock);
/* unlocking master OTG */
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 0);
REG_GET(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, &master_v_active);
REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &master_h_total);
/* calculate when to enable slave OTG */
L = (uint64_t)p * slave_h_total * master_pixel_clock_100Hz;
L = div_u64(L, master_h_total);
L = div_u64(L, slave_pixel_clock_100Hz);
XY = div_u64(L, p);
Y = master_v_active - XY - 1;
X = div_u64(((XY + 1) * p - L) * master_h_total, p * master_clock_divider);
/*
* set master OTG to unlock when V/H
* counters reach calculated values
*/
REG_UPDATE(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_EN, 1);
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
X,
MASTER_UPDATE_LOCK_DB_Y,
Y);
/* lock master OTG */
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1, 1, 10);
/* accessing slave OTG registers */
optc1 = DCN10TG_FROM_TG(optc_slave);
/*
* enable slave OTG, the OTG is locked with
* master's update lock, so it will not run
*/
REG_UPDATE(OTG_CONTROL,
OTG_MASTER_EN, 1);
/* accessing master OTG registers */
optc1 = DCN10TG_FROM_TG(optc_master);
/*
* unlock master OTG. When master H/V counters reach
* DB_XY point, slave OTG will start
*/
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 0);
/* accessing slave OTG registers */
optc1 = DCN10TG_FROM_TG(optc_slave);
/* wait for slave OTG to start running*/
REG_WAIT(OTG_CONTROL,
OTG_CURRENT_MASTER_EN_STATE,
1, 10, 5000);
/* accessing master OTG registers */
optc1 = DCN10TG_FROM_TG(optc_master);
/* disable the XY point*/
REG_UPDATE(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_EN, 0);
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
0,
MASTER_UPDATE_LOCK_DB_Y,
0);
/*restore master update lock*/
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, master_update_lock);
/* accessing slave OTG registers */
optc1 = DCN10TG_FROM_TG(optc_slave);
/* restore slave to be controlled by it's own */
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc_slave->inst);
}
void optc2_triplebuffer_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
REG_SET(OTG_VUPDATE_KEEPOUT, 0,
OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
}
void optc2_triplebuffer_unlock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 0);
REG_SET(OTG_VUPDATE_KEEPOUT, 0,
OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0);
}
void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t v_blank_start = 0;
uint32_t h_blank_start = 0;
REG_UPDATE(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, 1);
REG_UPDATE_2(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1,
DIG_UPDATE_LOCATION, 20);
REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START, &v_blank_start);
REG_GET(OTG_H_BLANK_START_END, OTG_H_BLANK_START, &h_blank_start);
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
(h_blank_start - 200 - 1) / optc1->opp_count,
MASTER_UPDATE_LOCK_DB_Y,
v_blank_start - 1);
REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
}
void optc2_lock_doublebuffer_disable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
0,
MASTER_UPDATE_LOCK_DB_Y,
0);
REG_UPDATE_2(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0,
DIG_UPDATE_LOCATION, 0);
REG_UPDATE(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, 0);
}
void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* Set the min/max selectors unconditionally so that
* DMCUB fw may change OTG timings when necessary
* TODO: Remove the w/a after fixing the issue in DMCUB firmware
*/
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
OTG_TRIGA_POLARITY_SELECT, 0,
OTG_TRIGA_FREQUENCY_SELECT, 0,
OTG_TRIGA_DELAY, 0,
OTG_TRIGA_CLEAR, 1);
}
void optc2_program_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_TRIGA_MANUAL_TRIG, 0,
OTG_TRIGA_MANUAL_TRIG, 1);
}
bool optc2_configure_crc(struct timing_generator *optc,
const struct crc_params *params)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET_2(OTG_CRC_CNTL2, 0,
OTG_CRC_DSC_MODE, params->dsc_mode,
OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode);
return optc1_configure_crc(optc, params);
}
void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *refresh_rate)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate);
}
static struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc2_enable_crtc,
.disable_crtc = optc1_disable_crtc,
/* used by enable_timing_synchronization. Not need for FPGA */
.is_counter_moving = optc1_is_counter_moving,
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
.set_blank = optc1_set_blank,
.is_blanked = optc1_is_blanked,
.set_blank_color = optc1_program_blank_color,
.enable_reset_trigger = optc1_enable_reset_trigger,
.enable_crtc_reset = optc1_enable_crtc_reset,
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
.triplebuffer_lock = optc2_triplebuffer_lock,
.triplebuffer_unlock = optc2_triplebuffer_unlock,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc2_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc2_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
.tg_init = optc1_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
.set_odm_combine = optc2_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
.set_gsl = optc2_set_gsl,
.set_gsl_source_select = optc2_set_gsl_source_select,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.align_vblanks = optc2_align_vblanks,
};
void dcn20_timing_generator_init(struct optc *optc1)
{
optc1->base.funcs = &dcn20_tg_funcs;
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue.
optc1->min_v_sync_width = 1;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/delay.h>
#include "dm_services.h"
#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
#include "dcn20_resource.h"
#include "dcn20_hwseq.h"
#include "dce/dce_hwseq.h"
#include "dcn20_dsc.h"
#include "dcn20_optc.h"
#include "abm.h"
#include "clk_mgr.h"
#include "dmcu.h"
#include "hubp.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
#include "mpc.h"
#include "mcif_wb.h"
#include "dchubbub.h"
#include "reg_helper.h"
#include "dcn10/dcn10_cm_common.h"
#include "vm_helper.h"
#include "dccg.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "hw_sequencer.h"
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
#include "link.h"
#define DC_LOGGER_INIT(logger)
#define CTX \
hws->ctx
#define REG(reg)\
hws->regs->reg
#undef FN
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
static int find_free_gsl_group(const struct dc *dc)
{
if (dc->res_pool->gsl_groups.gsl_0 == 0)
return 1;
if (dc->res_pool->gsl_groups.gsl_1 == 0)
return 2;
if (dc->res_pool->gsl_groups.gsl_2 == 0)
return 3;
return 0;
}
/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
* This is only used to lock pipes in pipe splitting case with immediate flip
* Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
* so we get tearing with freesync since we cannot flip multiple pipes
* atomically.
* We use GSL for this:
* - immediate flip: find first available GSL group if not already assigned
* program gsl with that group, set current OTG as master
* and always us 0x4 = AND of flip_ready from all pipes
* - vsync flip: disable GSL if used
*
* Groups in stream_res are stored as +1 from HW registers, i.e.
* gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
* Using a magic value like -1 would require tracking all inits/resets
*/
static void dcn20_setup_gsl_group_as_lock(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool enable)
{
struct gsl_params gsl;
int group_idx;
memset(&gsl, 0, sizeof(struct gsl_params));
if (enable) {
/* return if group already assigned since GSL was set up
* for vsync flip, we would unassign so it can't be "left over"
*/
if (pipe_ctx->stream_res.gsl_group > 0)
return;
group_idx = find_free_gsl_group(dc);
ASSERT(group_idx != 0);
pipe_ctx->stream_res.gsl_group = group_idx;
/* set gsl group reg field and mark resource used */
switch (group_idx) {
case 1:
gsl.gsl0_en = 1;
dc->res_pool->gsl_groups.gsl_0 = 1;
break;
case 2:
gsl.gsl1_en = 1;
dc->res_pool->gsl_groups.gsl_1 = 1;
break;
case 3:
gsl.gsl2_en = 1;
dc->res_pool->gsl_groups.gsl_2 = 1;
break;
default:
BREAK_TO_DEBUGGER();
return; // invalid case
}
gsl.gsl_master_en = 1;
} else {
group_idx = pipe_ctx->stream_res.gsl_group;
if (group_idx == 0)
return; // if not in use, just return
pipe_ctx->stream_res.gsl_group = 0;
/* unset gsl group reg field and mark resource free */
switch (group_idx) {
case 1:
gsl.gsl0_en = 0;
dc->res_pool->gsl_groups.gsl_0 = 0;
break;
case 2:
gsl.gsl1_en = 0;
dc->res_pool->gsl_groups.gsl_1 = 0;
break;
case 3:
gsl.gsl2_en = 0;
dc->res_pool->gsl_groups.gsl_2 = 0;
break;
default:
BREAK_TO_DEBUGGER();
return;
}
gsl.gsl_master_en = 0;
}
/* at this point we want to program whether it's to enable or disable */
if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
pipe_ctx->stream_res.tg->funcs->set_gsl(
pipe_ctx->stream_res.tg,
&gsl);
pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
} else
BREAK_TO_DEBUGGER();
}
void dcn20_set_flip_control_gsl(
struct pipe_ctx *pipe_ctx,
bool flip_immediate)
{
if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
pipe_ctx->plane_res.hubp, flip_immediate);
}
void dcn20_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
bool force_on = true; /* disable power gating */
uint32_t org_ip_request_cntl = 0;
if (enable)
force_on = false;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
if (REG(DOMAIN8_PG_CONFIG))
REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
if (REG(DOMAIN10_PG_CONFIG))
REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
/* DPP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
if (REG(DOMAIN9_PG_CONFIG))
REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
if (REG(DOMAIN11_PG_CONFIG))
REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
/* DCS0/1/2/3/4/5 */
REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on);
if (REG(DOMAIN19_PG_CONFIG))
REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on);
if (REG(DOMAIN20_PG_CONFIG))
REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
if (REG(DOMAIN21_PG_CONFIG))
REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn20_dccg_init(struct dce_hwseq *hws)
{
/*
* set MICROSECOND_TIME_BASE_DIV
* 100Mhz refclk -> 0x120264
* 27Mhz refclk -> 0x12021b
* 48Mhz refclk -> 0x120230
*
*/
REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264);
/*
* set MILLISECOND_TIME_BASE_DIV
* 100Mhz refclk -> 0x1186a0
* 27Mhz refclk -> 0x106978
* 48Mhz refclk -> 0x10bb80
*
*/
REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
/* This value is dependent on the hardware pipeline delay so set once per SOC */
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
}
void dcn20_disable_vga(
struct dce_hwseq *hws)
{
REG_WRITE(D1VGA_CONTROL, 0);
REG_WRITE(D2VGA_CONTROL, 0);
REG_WRITE(D3VGA_CONTROL, 0);
REG_WRITE(D4VGA_CONTROL, 0);
REG_WRITE(D5VGA_CONTROL, 0);
REG_WRITE(D6VGA_CONTROL, 0);
}
void dcn20_program_triple_buffer(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool enable_triple_buffer)
{
if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
pipe_ctx->plane_res.hubp,
enable_triple_buffer);
}
}
/* Blank pixel data during initialization */
void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg)
{
struct dce_hwseq *hws = dc->hwseq;
enum dc_color_space color_space;
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
struct output_pixel_processor *bottom_opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
uint32_t otg_active_width, otg_active_height;
/* program opp dpg blank color */
color_space = COLOR_SPACE_SRGB;
color_space_to_black_color(dc, color_space, &black_color);
/* get the OTG active size */
tg->funcs->get_otg_active_size(tg,
&otg_active_width,
&otg_active_height);
/* get the OPTC source */
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
ASSERT(false);
return;
}
opp = dc->res_pool->opps[opp_id_src0];
/* don't override the blank pattern if already enabled with the correct one. */
if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp))
return;
if (num_opps == 2) {
otg_active_width = otg_active_width / 2;
if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
ASSERT(false);
return;
}
bottom_opp = dc->res_pool->opps[opp_id_src1];
}
opp->funcs->opp_set_disp_pattern_generator(
opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
otg_active_height,
0);
if (num_opps == 2) {
bottom_opp->funcs->opp_set_disp_pattern_generator(
bottom_opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
otg_active_height,
0);
}
hws->funcs.wait_for_blank_complete(opp);
}
void dcn20_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on)
{
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl = 0;
if (hws->ctx->dc->debug.disable_dsc_power_gate)
return;
if (REG(DOMAIN16_PG_CONFIG) == 0)
return;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
switch (dsc_inst) {
case 0: /* DSC0 */
REG_UPDATE(DOMAIN16_PG_CONFIG,
DOMAIN16_POWER_GATE, power_gate);
REG_WAIT(DOMAIN16_PG_STATUS,
DOMAIN16_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 1: /* DSC1 */
REG_UPDATE(DOMAIN17_PG_CONFIG,
DOMAIN17_POWER_GATE, power_gate);
REG_WAIT(DOMAIN17_PG_STATUS,
DOMAIN17_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 2: /* DSC2 */
REG_UPDATE(DOMAIN18_PG_CONFIG,
DOMAIN18_POWER_GATE, power_gate);
REG_WAIT(DOMAIN18_PG_STATUS,
DOMAIN18_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 3: /* DSC3 */
REG_UPDATE(DOMAIN19_PG_CONFIG,
DOMAIN19_POWER_GATE, power_gate);
REG_WAIT(DOMAIN19_PG_STATUS,
DOMAIN19_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 4: /* DSC4 */
REG_UPDATE(DOMAIN20_PG_CONFIG,
DOMAIN20_POWER_GATE, power_gate);
REG_WAIT(DOMAIN20_PG_STATUS,
DOMAIN20_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 5: /* DSC5 */
REG_UPDATE(DOMAIN21_PG_CONFIG,
DOMAIN21_POWER_GATE, power_gate);
REG_WAIT(DOMAIN21_PG_STATUS,
DOMAIN21_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn20_dpp_pg_control(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on)
{
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
if (hws->ctx->dc->debug.disable_dpp_power_gate)
return;
if (REG(DOMAIN1_PG_CONFIG) == 0)
return;
switch (dpp_inst) {
case 0: /* DPP0 */
REG_UPDATE(DOMAIN1_PG_CONFIG,
DOMAIN1_POWER_GATE, power_gate);
REG_WAIT(DOMAIN1_PG_STATUS,
DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 1: /* DPP1 */
REG_UPDATE(DOMAIN3_PG_CONFIG,
DOMAIN3_POWER_GATE, power_gate);
REG_WAIT(DOMAIN3_PG_STATUS,
DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 2: /* DPP2 */
REG_UPDATE(DOMAIN5_PG_CONFIG,
DOMAIN5_POWER_GATE, power_gate);
REG_WAIT(DOMAIN5_PG_STATUS,
DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 3: /* DPP3 */
REG_UPDATE(DOMAIN7_PG_CONFIG,
DOMAIN7_POWER_GATE, power_gate);
REG_WAIT(DOMAIN7_PG_STATUS,
DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 4: /* DPP4 */
REG_UPDATE(DOMAIN9_PG_CONFIG,
DOMAIN9_POWER_GATE, power_gate);
REG_WAIT(DOMAIN9_PG_STATUS,
DOMAIN9_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 5: /* DPP5 */
/*
* Do not power gate DPP5, should be left at HW default, power on permanently.
* PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard
* reset.
* REG_UPDATE(DOMAIN11_PG_CONFIG,
* DOMAIN11_POWER_GATE, power_gate);
*
* REG_WAIT(DOMAIN11_PG_STATUS,
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
* 1, 1000);
*/
break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
void dcn20_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
bool power_on)
{
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
if (hws->ctx->dc->debug.disable_hubp_power_gate)
return;
if (REG(DOMAIN0_PG_CONFIG) == 0)
return;
switch (hubp_inst) {
case 0: /* DCHUBP0 */
REG_UPDATE(DOMAIN0_PG_CONFIG,
DOMAIN0_POWER_GATE, power_gate);
REG_WAIT(DOMAIN0_PG_STATUS,
DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 1: /* DCHUBP1 */
REG_UPDATE(DOMAIN2_PG_CONFIG,
DOMAIN2_POWER_GATE, power_gate);
REG_WAIT(DOMAIN2_PG_STATUS,
DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 2: /* DCHUBP2 */
REG_UPDATE(DOMAIN4_PG_CONFIG,
DOMAIN4_POWER_GATE, power_gate);
REG_WAIT(DOMAIN4_PG_STATUS,
DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 3: /* DCHUBP3 */
REG_UPDATE(DOMAIN6_PG_CONFIG,
DOMAIN6_POWER_GATE, power_gate);
REG_WAIT(DOMAIN6_PG_STATUS,
DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 4: /* DCHUBP4 */
REG_UPDATE(DOMAIN8_PG_CONFIG,
DOMAIN8_POWER_GATE, power_gate);
REG_WAIT(DOMAIN8_PG_STATUS,
DOMAIN8_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 5: /* DCHUBP5 */
/*
* Do not power gate DCHUB5, should be left at HW default, power on permanently.
* PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard
* reset.
* REG_UPDATE(DOMAIN10_PG_CONFIG,
* DOMAIN10_POWER_GATE, power_gate);
*
* REG_WAIT(DOMAIN10_PG_STATUS,
* DOMAIN10_PGFSM_PWR_STATUS, pwr_status,
* 1, 1000);
*/
break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
/* disable HW used by plane.
* note: cannot disable until disconnect is complete
*/
void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
/* In flip immediate with pipe splitting case GSL is used for
* synchronization so we must disable it when the plane is disabled.
*/
if (pipe_ctx->stream_res.gsl_group != 0)
dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
if (hubp->funcs->hubp_update_mall_sel)
hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
dc->hwss.set_flip_control_gsl(pipe_ctx, false);
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true;
hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
pipe_ctx->plane_res.hubp);
pipe_ctx->stream = NULL;
memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
pipe_ctx->top_pipe = NULL;
pipe_ctx->bottom_pipe = NULL;
pipe_ctx->plane_state = NULL;
}
void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
DC_LOGGER_INIT(dc->ctx->logger);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
dcn20_plane_atomic_disable(dc, pipe_ctx);
/* Turn back off the phantom OTG after the phantom plane is fully disabled
*/
if (is_phantom)
if (tg && tg->funcs->disable_phantom_crtc)
tg->funcs->disable_phantom_crtc(tg);
DC_LOG_DC("Power down front end %d\n",
pipe_ctx->pipe_idx);
}
void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank)
{
dcn20_blank_pixel_data(dc, pipe_ctx, blank);
}
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
int opp_cnt)
{
bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
int flow_ctrl_cnt;
if (opp_cnt >= 2)
hblank_halved = true;
flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
stream->timing.h_border_left -
stream->timing.h_border_right;
if (hblank_halved)
flow_ctrl_cnt /= 2;
/* ODM combine 4:1 case */
if (opp_cnt == 4)
flow_ctrl_cnt /= 2;
return flow_ctrl_cnt;
}
enum dc_status dcn20_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
struct dc_stream_state *stream = pipe_ctx->stream;
struct drr_params params = {0};
unsigned int event_triggers = 0;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
bool interlace = stream->timing.flags.INTERLACE;
int i;
struct mpc_dwb_flow_control flow_control;
struct mpc *mpc = dc->res_pool->mpc;
bool rate_control_2x_pclk = (interlace || optc2_is_two_pixels_per_containter(&stream->timing));
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
pipe_ctx->stream_res.tg->inst,
k1_div, k2_div);
}
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
* with pipe 0. No program is needed.
*/
if (pipe_ctx->top_pipe != NULL)
return DC_OK;
/* TODO check if timing_changed, disable stream if timing changed */
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
opp_cnt++;
}
if (opp_cnt > 1)
pipe_ctx->stream_res.tg->funcs->set_odm_combine(
pipe_ctx->stream_res.tg,
opp_inst, opp_cnt,
&pipe_ctx->stream->timing);
/* HW program guide assume display already disable
* by unplug sequence. OTG assume stop.
*/
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
}
if (dc_is_hdmi_tmds_signal(stream->signal)) {
stream->link->phy_state.symclk_ref_cnts.otg = 1;
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
else
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
}
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
pipe_ctx->pipe_dlg_param.vready_offset,
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
pipe_ctx->stream->signal,
true);
rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
flow_control.flow_ctrl_mode = 0;
flow_control.flow_ctrl_cnt0 = 0x80;
flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt);
if (mpc->funcs->set_out_rate_control) {
for (i = 0; i < opp_cnt; ++i) {
mpc->funcs->set_out_rate_control(
mpc, opp_inst[i],
true,
rate_control_2x_pclk,
&flow_control);
}
}
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
true);
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
true);
hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
}
hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
params.vertical_total_mid = stream->adjust.v_total_mid;
params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, ¶ms);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
/* Event triggers and num frames initialized for DRR, but can be
* later updated for PSR use. Note DRR trigger events are generated
* regardless of whether num frames met.
*/
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
/* TODO program crtc source select for non-virtual signal*/
/* TODO program FMT */
/* TODO setup link_enc */
/* TODO set stream attributes */
/* TODO program audio */
/* TODO enable stream if timing changed */
/* TODO unblank stream if DP */
if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
return DC_OK;
}
void dcn20_program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix,
int opp_id)
{
struct mpc *mpc = dc->res_pool->mpc;
enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
if (mpc->funcs->power_on_mpc_mem_pwr)
mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
if (mpc->funcs->set_output_csc != NULL)
mpc->funcs->set_output_csc(mpc,
opp_id,
matrix,
ocsc_mode);
} else {
if (mpc->funcs->set_ocsc_default != NULL)
mpc->funcs->set_ocsc_default(mpc,
opp_id,
colorspace,
ocsc_mode);
}
}
bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
struct pwl_params *params = NULL;
/*
* program OGAM only for the top pipe
* if there is a pipe split then fix diagnostic is required:
* how to pass OGAM parameter for stream.
* if programming for all pipes is required then remove condition
* pipe_ctx->top_pipe == NULL ,but then fix the diagnostic.
*/
if (mpc->funcs->power_on_mpc_mem_pwr)
mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
if (pipe_ctx->top_pipe == NULL
&& mpc->funcs->set_output_gamma && stream->out_transfer_func) {
if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
params = &stream->out_transfer_func->pwl;
else if (pipe_ctx->stream->out_transfer_func->type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm_helper_translate_curve_to_hw_format(dc->ctx,
stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/*
* there is no ROM
*/
if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
/*
* if above if is not executed then 'params' equal to 0 and set in bypass
*/
mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
return true;
}
bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
struct pwl_params *blend_lut = NULL;
if (plane_state->blend_tf) {
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
blend_lut = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->blend_tf,
&dpp_base->regamma_params, false);
blend_lut = &dpp_base->regamma_params;
}
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
return result;
}
bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
struct pwl_params *shaper_lut = NULL;
if (plane_state->in_shaper_func) {
if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
shaper_lut = &plane_state->in_shaper_func->pwl;
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
}
}
result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut);
if (plane_state->lut3d_func &&
plane_state->lut3d_func->state.bits.initialized == 1)
result = dpp_base->funcs->dpp_program_3dlut(dpp_base,
&plane_state->lut3d_func->lut_3d);
else
result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
return result;
}
bool dcn20_set_input_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
struct dce_hwseq *hws = dc->hwseq;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
bool result = true;
bool use_degamma_ram = false;
if (dpp_base == NULL || plane_state == NULL)
return false;
hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
hws->funcs.set_blend_lut(pipe_ctx, plane_state);
if (plane_state->in_transfer_func)
tf = plane_state->in_transfer_func;
if (tf == NULL) {
dpp_base->funcs->dpp_set_degamma(dpp_base,
IPP_DEGAMMA_MODE_BYPASS);
return true;
}
if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS)
use_degamma_ram = true;
if (use_degamma_ram == true) {
if (tf->type == TF_TYPE_HWPWL)
dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
&tf->pwl);
else if (tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_degamma_hw_format(tf,
&dpp_base->degamma_params);
dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
&dpp_base->degamma_params);
}
return true;
}
/* handle here the optimized cases when de-gamma ROM could be used.
*
*/
if (tf->type == TF_TYPE_PREDEFINED) {
switch (tf->tf) {
case TRANSFER_FUNCTION_SRGB:
dpp_base->funcs->dpp_set_degamma(dpp_base,
IPP_DEGAMMA_MODE_HW_sRGB);
break;
case TRANSFER_FUNCTION_BT709:
dpp_base->funcs->dpp_set_degamma(dpp_base,
IPP_DEGAMMA_MODE_HW_xvYCC);
break;
case TRANSFER_FUNCTION_LINEAR:
dpp_base->funcs->dpp_set_degamma(dpp_base,
IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
result = true;
break;
default:
result = false;
break;
}
} else if (tf->type == TF_TYPE_BYPASS)
dpp_base->funcs->dpp_set_degamma(dpp_base,
IPP_DEGAMMA_MODE_BYPASS);
else {
/*
* if we are here, we did not handle correctly.
* fix is required for this use case
*/
BREAK_TO_DEBUGGER();
dpp_base->funcs->dpp_set_degamma(dpp_base,
IPP_DEGAMMA_MODE_BYPASS);
}
return result;
}
void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
opp_cnt++;
}
if (opp_cnt > 1)
pipe_ctx->stream_res.tg->funcs->set_odm_combine(
pipe_ctx->stream_res.tg,
opp_inst, opp_cnt,
&pipe_ctx->stream->timing);
else
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
}
void dcn20_blank_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank)
{
struct tg_color black_color = {0};
struct stream_resource *stream_res = &pipe_ctx->stream_res;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space = stream->output_color_space;
enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
struct pipe_ctx *odm_pipe;
int odm_cnt = 1;
int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
int odm_slice_width, last_odm_slice_width, offset = 0;
if (stream->link->test_pattern_enabled)
return;
/* get opp dpg blank color */
color_space_to_black_color(dc, color_space, &black_color);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_cnt++;
odm_slice_width = h_active / odm_cnt;
last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
if (blank) {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
}
} else {
test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
}
odm_pipe = pipe_ctx;
while (odm_pipe->next_odm_pipe) {
dc->hwss.set_disp_pattern_generator(dc,
odm_pipe,
test_pattern,
test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
odm_slice_width,
v_active,
offset);
offset += odm_slice_width;
odm_pipe = odm_pipe->next_odm_pipe;
}
dc->hwss.set_disp_pattern_generator(dc,
odm_pipe,
test_pattern,
test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
last_odm_slice_width,
v_active,
offset);
if (!blank)
if (stream_res->abm) {
dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
}
static void dcn20_power_on_plane_resources(
struct dce_hwseq *hws,
struct pipe_ctx *pipe_ctx)
{
DC_LOGGER_INIT(hws->ctx->logger);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
"Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst);
}
}
static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
//if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
//}
dcn20_power_on_plane_resources(dc->hwseq, pipe_ctx);
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
true);
/* TODO: enable/disable in dm as per update type.
if (plane_state) {
DC_LOG_DC(dc->ctx->logger,
"Pipe:%d 0x%x: addr hi:0x%x, "
"addr low:0x%x, "
"src: %d, %d, %d,"
" %d; dst: %d, %d, %d, %d;\n",
pipe_ctx->pipe_idx,
plane_state,
plane_state->address.grph.addr.high_part,
plane_state->address.grph.addr.low_part,
plane_state->src_rect.x,
plane_state->src_rect.y,
plane_state->src_rect.width,
plane_state->src_rect.height,
plane_state->dst_rect.x,
plane_state->dst_rect.y,
plane_state->dst_rect.width,
plane_state->dst_rect.height);
DC_LOG_DC(dc->ctx->logger,
"Pipe %d: width, height, x, y format:%d\n"
"viewport:%d, %d, %d, %d\n"
"recout: %d, %d, %d, %d\n",
pipe_ctx->pipe_idx,
plane_state->format,
pipe_ctx->plane_res.scl_data.viewport.width,
pipe_ctx->plane_res.scl_data.viewport.height,
pipe_ctx->plane_res.scl_data.viewport.x,
pipe_ctx->plane_res.scl_data.viewport.y,
pipe_ctx->plane_res.scl_data.recout.width,
pipe_ctx->plane_res.scl_data.recout.height,
pipe_ctx->plane_res.scl_data.recout.x,
pipe_ctx->plane_res.scl_data.recout.y);
print_rq_dlg_ttu(dc, pipe_ctx);
}
*/
if (dc->vm_pa_config.valid) {
struct vm_system_aperture_param apt;
apt.sys_default.quad_part = 0;
apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
}
if (!pipe_ctx->top_pipe
&& pipe_ctx->plane_state
&& pipe_ctx->plane_state->flip_int_enabled
&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
// if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
// }
}
void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock)
{
struct pipe_ctx *temp_pipe;
bool flip_immediate = false;
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
if (!pipe || pipe->top_pipe)
return;
if (pipe->plane_state != NULL)
flip_immediate = pipe->plane_state->flip_immediate;
if (pipe->stream_res.gsl_group > 0) {
temp_pipe = pipe->bottom_pipe;
while (!flip_immediate && temp_pipe) {
if (temp_pipe->plane_state != NULL)
flip_immediate = temp_pipe->plane_state->flip_immediate;
temp_pipe = temp_pipe->bottom_pipe;
}
}
if (flip_immediate && lock) {
const int TIMEOUT_FOR_FLIP_PENDING_US = 100000;
unsigned int polling_interval_us = 1;
int i;
temp_pipe = pipe;
while (temp_pipe) {
if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) {
for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING_US / polling_interval_us; ++i) {
if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp))
break;
udelay(polling_interval_us);
}
/* no reason it should take this long for immediate flips */
ASSERT(i != TIMEOUT_FOR_FLIP_PENDING_US);
}
temp_pipe = temp_pipe->bottom_pipe;
}
}
/* In flip immediate and pipe splitting case, we need to use GSL
* for synchronization. Only do setup on locking and on flip type change.
*/
if (lock && (pipe->bottom_pipe != NULL || !flip_immediate))
if ((flip_immediate && pipe->stream_res.gsl_group == 0) ||
(!flip_immediate && pipe->stream_res.gsl_group > 0))
dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate);
if (pipe->plane_state != NULL)
flip_immediate = pipe->plane_state->flip_immediate;
temp_pipe = pipe->bottom_pipe;
while (flip_immediate && temp_pipe) {
if (temp_pipe->plane_state != NULL)
flip_immediate = temp_pipe->plane_state->flip_immediate;
temp_pipe = temp_pipe->bottom_pipe;
}
if (!lock && pipe->stream_res.gsl_group > 0 && pipe->plane_state &&
!flip_immediate)
dcn20_setup_gsl_group_as_lock(dc, pipe, false);
if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
hw_locks.bits.lock_pipe = 1;
inst_flags.otg_inst = pipe->stream_res.tg->inst;
if (pipe->plane_state != NULL)
hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips;
dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
lock,
&hw_locks,
&inst_flags);
} else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
if (lock)
pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
else
pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg);
} else {
if (lock)
pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
else
pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
}
}
static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
{
new_pipe->update_flags.raw = 0;
/* If non-phantom pipe is being transitioned to a phantom pipe,
* set disable and return immediately. This is because the pipe
* that was previously in use must be fully disabled before we
* can "enable" it as a phantom pipe (since the OTG will certainly
* be different). The post_unlock sequence will set the correct
* update flags to enable the phantom pipe.
*/
if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom &&
new_pipe->plane_state && new_pipe->plane_state->is_phantom) {
new_pipe->update_flags.bits.disable = 1;
return;
}
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
/* Detect pipe enable/disable */
if (!old_pipe->plane_state && new_pipe->plane_state) {
new_pipe->update_flags.bits.enable = 1;
new_pipe->update_flags.bits.mpcc = 1;
new_pipe->update_flags.bits.dppclk = 1;
new_pipe->update_flags.bits.hubp_interdependent = 1;
new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
new_pipe->update_flags.bits.unbounded_req = 1;
new_pipe->update_flags.bits.gamut_remap = 1;
new_pipe->update_flags.bits.scaler = 1;
new_pipe->update_flags.bits.viewport = 1;
new_pipe->update_flags.bits.det_size = 1;
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
new_pipe->update_flags.bits.odm = 1;
new_pipe->update_flags.bits.global_sync = 1;
}
return;
}
/* For SubVP we need to unconditionally enable because any phantom pipes are
* always removed then newly added for every full updates whenever SubVP is in use.
* The remove-add sequence of the phantom pipe always results in the pipe
* being blanked in enable_stream_timing (DPG).
*/
if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
new_pipe->update_flags.bits.enable = 1;
/* Phantom pipes are effectively disabled, if the pipe was previously phantom
* we have to enable
*/
if (old_pipe->plane_state && old_pipe->plane_state->is_phantom &&
new_pipe->plane_state && !new_pipe->plane_state->is_phantom)
new_pipe->update_flags.bits.enable = 1;
if (old_pipe->plane_state && !new_pipe->plane_state) {
new_pipe->update_flags.bits.disable = 1;
return;
}
/* Detect plane change */
if (old_pipe->plane_state != new_pipe->plane_state) {
new_pipe->update_flags.bits.plane_changed = true;
}
/* Detect top pipe only changes */
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
/* Detect odm changes */
if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
&& old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
|| (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
|| (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
new_pipe->update_flags.bits.odm = 1;
/* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
|| old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
|| old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
new_pipe->update_flags.bits.global_sync = 1;
}
if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
new_pipe->update_flags.bits.det_size = 1;
/*
* Detect opp / tg change, only set on change, not on enable
* Assume mpcc inst = pipe index, if not this code needs to be updated
* since mpcc is what is affected by these. In fact all of our sequence
* makes this assumption at the moment with how hubp reset is matched to
* same index mpcc reset.
*/
if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
new_pipe->update_flags.bits.opp_changed = 1;
if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
new_pipe->update_flags.bits.tg_changed = 1;
/*
* Detect mpcc blending changes, only dpp inst and opp matter here,
* mpccs getting removed/inserted update connected ones during their own
* programming
*/
if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
new_pipe->update_flags.bits.mpcc = 1;
/* Detect dppclk change */
if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
new_pipe->update_flags.bits.dppclk = 1;
/* Check for scl update */
if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
new_pipe->update_flags.bits.scaler = 1;
/* Check for vp update */
if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
new_pipe->update_flags.bits.viewport = 1;
/* Detect dlg/ttu/rq updates */
{
struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
/* Detect pipe interdependent updates */
if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
new_pipe->update_flags.bits.hubp_interdependent = 1;
}
/* Detect any other updates to ttu/rq/dlg */
if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
}
if (old_pipe->unbounded_req != new_pipe->unbounded_req)
new_pipe->update_flags.bits.unbounded_req = 1;
}
static void dcn20_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dccg *dccg = dc->res_pool->dccg;
bool viewport_changed = false;
if (pipe_ctx->update_flags.bits.dppclk)
dpp->funcs->dpp_dppclk_control(dpp, false, true);
if (pipe_ctx->update_flags.bits.enable)
dccg->funcs->update_dpp_dto(dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
*/
if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
hubp->funcs->hubp_setup(
hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
}
if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
if (pipe_ctx->update_flags.bits.hubp_interdependent)
hubp->funcs->hubp_setup_interdependent(
hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
plane_state->update_flags.bits.bpp_change ||
plane_state->update_flags.bits.input_csc_change ||
plane_state->update_flags.bits.color_space_change ||
plane_state->update_flags.bits.coeff_reduction_change) {
struct dc_bias_and_scale bns_params = {0};
// program the input csc
dpp->funcs->dpp_setup(dpp,
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
plane_state->color_space,
NULL);
if (dpp->funcs->dpp_program_bias_and_scale) {
//TODO :for CNVC set scale and bias registers if necessary
build_prescale_params(&bns_params, plane_state);
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
}
if (pipe_ctx->update_flags.bits.mpcc
|| pipe_ctx->update_flags.bits.plane_changed
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
hws->funcs.update_mpcc(dc, pipe_ctx);
}
if (pipe_ctx->update_flags.bits.scaler ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
/* scaler configuration */
pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
}
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
&pipe_ctx->plane_res.scl_data.viewport_c);
viewport_changed = true;
}
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
/* Any updates are handled in dc interface, just need
* to apply existing for plane enable / opp change */
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|| pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| plane_state->update_flags.bits.gamut_remap_change
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
dc->hwss.program_gamut_remap(pipe_ctx);
/*call the dcn2 method which uses mpc csc*/
dc->hwss.program_output_csc(dc,
pipe_ctx,
pipe_ctx->stream->output_color_space,
pipe_ctx->stream->csc_color_matrix.matrix,
hubp->opp_id);
}
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
pipe_ctx->update_flags.bits.opp_changed ||
plane_state->update_flags.bits.pixel_format_change ||
plane_state->update_flags.bits.horizontal_mirror_change ||
plane_state->update_flags.bits.rotation_change ||
plane_state->update_flags.bits.swizzle_change ||
plane_state->update_flags.bits.dcc_change ||
plane_state->update_flags.bits.bpp_change ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.plane_size_change) {
struct plane_size size = plane_state->plane_size;
size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
hubp->funcs->hubp_program_surface_config(
hubp,
plane_state->format,
&plane_state->tiling_info,
&size,
plane_state->rotation,
&plane_state->dcc,
plane_state->horizontal_mirror,
0);
hubp->power_gated = false;
}
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
plane_state->update_flags.bits.addr_update)
hws->funcs.update_plane_addr(dc, pipe_ctx);
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
/* If the stream paired with this plane is phantom, the plane is also phantom */
if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM
&& hubp->funcs->phantom_hubp_post_enable)
hubp->funcs->phantom_hubp_post_enable(hubp);
}
static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
{
struct pipe_ctx *other_pipe;
int vready_offset = pipe->pipe_dlg_param.vready_offset;
/* Always use the largest vready_offset of all connected pipes */
for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
vready_offset = other_pipe->pipe_dlg_param.vready_offset;
}
for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
vready_offset = other_pipe->pipe_dlg_param.vready_offset;
}
for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
vready_offset = other_pipe->pipe_dlg_param.vready_offset;
}
for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
vready_offset = other_pipe->pipe_dlg_param.vready_offset;
}
return vready_offset;
}
static void dcn20_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
/* Only need to unblank on top pipe */
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.odm ||
pipe_ctx->stream->update_flags.bits.abm_level)
hws->funcs.blank_pixel_data(dc, pipe_ctx,
!pipe_ctx->plane_state ||
!pipe_ctx->plane_state->visible);
}
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
&& !pipe_ctx->prev_odm_pipe) {
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM)
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
if (pipe_ctx->update_flags.bits.odm)
hws->funcs.update_odm(dc, context, pipe_ctx);
if (pipe_ctx->update_flags.bits.enable) {
dcn20_enable_plane(dc, pipe_ctx, context);
if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
}
if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size)
dc->res_pool->hubbub->funcs->program_det_size(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->update_flags.bits.enable
|| pipe_ctx->plane_state->update_flags.bits.hdr_mult)
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
pipe_ctx->plane_state->update_flags.bits.lut_3d)
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
* only do gamma programming for powering on, internal memcmp to avoid
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
pipe_ctx->stream->update_flags.bits.out_tf ||
pipe_ctx->plane_state->update_flags.bits.output_tf_change)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
* should reprogram the fmt. This deals with cases where
* interation between mpc and odm combine on different streams
* causes a different pipe to be chosen to odm combine with.
*/
if (pipe_ctx->update_flags.bits.enable
|| pipe_ctx->update_flags.bits.opp_changed) {
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
pipe_ctx->stream->timing.display_color_depth,
pipe_ctx->stream->signal);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
pipe_ctx->stream_res.opp,
&pipe_ctx->stream->bit_depth_params,
&pipe_ctx->stream->clamping);
}
/* Set ABM pipe after other pipe configurations done */
if (pipe_ctx->plane_state->visible) {
if (pipe_ctx->stream_res.abm) {
dc->hwss.set_pipe(pipe_ctx);
pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
pipe_ctx->stream->abm_level);
}
}
}
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
{
int i;
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
/* Carry over GSL groups in case the context is changing. */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream == old_pipe_ctx->stream)
pipe_ctx->stream_res.gsl_group = old_pipe_ctx->stream_res.gsl_group;
}
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
}
}
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
* buffer updates properly)
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc)
tg->funcs->enable_crtc(tg);
}
}
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
&& !context->res_ctx.pipe_ctx[i].top_pipe
&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
struct hubbub *hubbub = dc->res_pool->hubbub;
/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
* then we want to do the programming here (effectively it's being disabled). If we do
* the programming later the DET won't be updated until the OTG for the phantom pipe is
* turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
* DET allocation.
*/
if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
(context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom)))
hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
/*
* Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) {
if (hws->funcs.program_pipe)
hws->funcs.program_pipe(dc, pipe, context);
else {
/* Don't program phantom pipes in the regular front end programming sequence.
* There is an MPO transition case where a pipe being used by a video plane is
* transitioned directly to be a phantom pipe when closing the MPO video. However
* the phantom pipe will program a new HUBP_VTG_SEL (update takes place right away),
* but the MPO still exists until the double buffered update of the main pipe so we
* will get a frame of underflow if the phantom pipe is programmed here.
*/
if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM)
dcn20_program_pipe(dc, pipe, context);
}
pipe = pipe->bottom_pipe;
}
}
/* Program secondary blending tree and writeback pipes */
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->top_pipe && !pipe->prev_odm_pipe
&& pipe->stream && pipe->stream->num_wb_info > 0
&& (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
|| pipe->stream->update_flags.raw)
&& hws->funcs.program_all_writeback_pipes_in_tree)
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
!pipe->top_pipe &&
pipe->stream &&
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
dc->current_state->stream_status[0].plane_count == 1 &&
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
/* when dynamic ODM is active, pipes must be reconfigured when all planes are
* disabled, as some transitions will leave software and hardware state
* mismatched.
*/
if (dc->debug.enable_single_display_2to1_odm_policy &&
pipe->stream &&
pipe->update_flags.bits.disable &&
!pipe->prev_odm_pipe &&
hws->funcs.update_odm)
hws->funcs.update_odm(dc, context, pipe);
}
}
void dcn20_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context)
{
int i;
const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000;
unsigned int polling_interval_us = 1;
struct dce_hwseq *hwseq = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
* part of the enable operation otherwise, DM may request an immediate flip which
* will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
* is unsupported on DCN.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
udelay(polling_interval_us);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
/* Program phantom pipe here to prevent a frame of underflow in the MPO transition
* case (if a pipe being used for a video plane transitions to a phantom pipe, it
* can underflow due to HUBP_VTG_SEL programming if done in the regular front end
* programming sequence).
*/
while (pipe) {
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
/* When turning on the phantom pipe we want to run through the
* entire enable sequence, so apply all the "enable" flags.
*/
if (dc->hwss.apply_update_flags_for_phantom)
dc->hwss.apply_update_flags_for_phantom(pipe);
if (dc->hwss.update_phantom_vp_position)
dc->hwss.update_phantom_vp_position(dc, context, pipe);
dcn20_program_pipe(dc, pipe, context);
}
pipe = pipe->bottom_pipe;
}
}
}
/* P-State support transitions:
* Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
* FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
* Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
* FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
* FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
*/
if (hwseq && hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
/* Only program the MALL registers after all the main and phantom pipes
* are done programming.
*/
if (hwseq->funcs.program_mall_pipe_config)
hwseq->funcs.program_mall_pipe_config(dc, context);
/* WA to apply WM setting*/
if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
/* WA for stutter underflow during MPO transitions when adding 2nd plane */
if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
if (dc->current_state->stream_status[0].plane_count == 1 &&
context->stream_status[0].plane_count > 1) {
struct timing_generator *tg = dc->res_pool->timing_generators[0];
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
}
}
}
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
unsigned int compbuf_size_kb = 0;
unsigned int cache_wm_a = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns;
unsigned int i;
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't restore the original watermark value
if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
}
/* program dchubbub watermarks:
* For assigning wm_optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
// Restore the real watermark so we can commit the value to DMCUB
// DMCUB uses the "original" watermark value in SubVP MCLK switch
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = cache_wm_a;
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
}
}
void dcn20_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't need to restore the original watermark value
if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
}
/* program dchubbub watermarks */
hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
/* increase compbuf size */
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dc_dmub_srv_p_state_delegate(dc,
true, context);
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
dc->clk_mgr->clks.fw_based_mclk_switching = true;
} else {
dc->clk_mgr->clks.fw_based_mclk_switching = false;
}
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
true);
if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
pipe_ctx->dlg_regs.min_dst_y_next_start);
}
}
}
bool dcn20_update_bandwidth(
struct dc *dc,
struct dc_state *context)
{
int i;
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
return false;
/* apply updated bandwidth parameters */
dc->hwss.prepare_bandwidth(dc, context);
/* update hubp configs for all pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state == NULL)
continue;
if (pipe_ctx->top_pipe == NULL) {
bool blank = !is_pipe_tree_visible(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
if (pipe_ctx->prev_odm_pipe == NULL)
hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
}
return true;
}
void dcn20_enable_writeback(
struct dc *dc,
struct dc_writeback_info *wb_info,
struct dc_state *context)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
struct timing_generator *optc;
ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
ASSERT(wb_info->wb_enabled);
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
/* set the OPTC source mux */
optc = dc->res_pool->timing_generators[dwb->otg_inst];
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
/* Enable MCIF_WB */
mcif_wb->funcs->enable_mcif(mcif_wb);
/* Enable DWB */
dwb->funcs->enable(dwb, &wb_info->dwb_params);
/* TODO: add sequence to enable/disable warmup */
}
void dcn20_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
ASSERT(dwb_pipe_inst < MAX_DWB_PIPES);
dwb = dc->res_pool->dwbc[dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst];
dwb->funcs->disable(dwb);
mcif_wb->funcs->disable_mcif(mcif_wb);
}
bool dcn20_wait_for_blank_complete(
struct output_pixel_processor *opp)
{
int counter;
for (counter = 0; counter < 1000; counter++) {
if (opp->funcs->dpg_is_blanked(opp))
break;
udelay(100);
}
if (counter == 1000) {
dm_error("DC: failed to blank crtc!\n");
return false;
}
return true;
}
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
if (!hubp)
return false;
return hubp->funcs->dmdata_status_done(hubp);
}
void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
if (pipe_ctx->stream_res.dsc) {
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
while (odm_pipe) {
hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
odm_pipe = odm_pipe->next_odm_pipe;
}
}
}
void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
if (pipe_ctx->stream_res.dsc) {
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
while (odm_pipe) {
hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
odm_pipe = odm_pipe->next_odm_pipe;
}
}
}
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
{
struct dc_dmdata_attributes attr = { 0 };
struct hubp *hubp = pipe_ctx->plane_res.hubp;
attr.dmdata_mode = DMDATA_HW_MODE;
attr.dmdata_size =
dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36;
attr.address.quad_part =
pipe_ctx->stream->dmdata_address.quad_part;
attr.dmdata_dl_delta = 0;
attr.dmdata_qos_mode = 0;
attr.dmdata_qos_level = 0;
attr.dmdata_repeat = 1; /* always repeat */
attr.dmdata_updated = 1;
attr.dmdata_sw_data = NULL;
hubp->funcs->dmdata_set_attributes(hubp, &attr);
}
void dcn20_init_vm_ctx(
struct dce_hwseq *hws,
struct dc *dc,
struct dc_virtual_addr_space_config *va_config,
int vmid)
{
struct dcn_hubbub_virt_addr_config config;
if (vmid == 0) {
ASSERT(0); /* VMID cannot be 0 for vm context */
return;
}
config.page_table_start_addr = va_config->page_table_start_addr;
config.page_table_end_addr = va_config->page_table_end_addr;
config.page_table_block_size = va_config->page_table_block_size_in_bytes;
config.page_table_depth = va_config->page_table_depth;
config.page_table_base_addr = va_config->page_table_base_addr;
dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);
}
int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
struct dcn_hubbub_phys_addr_config config;
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
config.page_table_default_page_addr = pa_config->page_table_default_page_addr;
return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
}
static bool patch_address_for_sbs_tb_stereo(
struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
(pipe_ctx->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE ||
pipe_ctx->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
*addr = plane_state->address.grph_stereo.left_addr;
plane_state->address.grph_stereo.left_addr =
plane_state->address.grph_stereo.right_addr;
return true;
}
if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
plane_state->address.grph_stereo.right_addr =
plane_state->address.grph_stereo.left_addr;
plane_state->address.grph_stereo.right_meta_addr =
plane_state->address.grph_stereo.left_meta_addr;
}
return false;
}
void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
if (plane_state == NULL)
return;
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
// Call Helper to track VMID use
vm_helper_mark_vmid_used(dc->vm_helper, plane_state->address.vmid, pipe_ctx->plane_res.hubp->inst);
pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
pipe_ctx->plane_res.hubp,
&plane_state->address,
plane_state->flip_immediate);
plane_state->status.requested_address = plane_state->address;
if (plane_state->flip_immediate)
plane_state->status.current_address = plane_state->address;
if (addr_patched)
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
struct pipe_ctx *odm_pipe;
params.opp_cnt = 1;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
params.opp_cnt++;
}
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
pipe_ctx->stream_res.tg->inst);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, true);
}
}
void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (start_line < 0)
start_line = 0;
if (tg->funcs->setup_vertical_interrupt2)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
static void dcn20_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
int i;
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
}
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
* feature. When system resume from S4 with second
* screen only, the dpms_off would be true but
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
/* free acquired resources */
if (pipe_ctx->stream_res.audio) {
/*disable az_endpoint*/
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
/*free audio*/
if (dc->caps.dynamic_audio == true) {
/*we have to dynamic arbitrate the audio endpoints*/
/*we free the resource, need reset is_audio_acquired*/
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
*/
link->phy_state.symclk_ref_cnts.otg = 0;
if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
link_hwss->disable_link_output(link,
&pipe_ctx->link_res, pipe_ctx->stream->signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
break;
if (i == dc->res_pool->pipe_count)
return;
pipe_ctx->stream = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
void dcn20_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
int i;
struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx_old->stream)
continue;
if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
continue;
if (!pipe_ctx->stream ||
pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
struct clock_source *old_clk = pipe_ctx_old->clock_source;
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
}
}
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
int mpcc_id;
struct mpcc *new_mpcc;
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
if (per_pixel_alpha) {
blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
if (pipe_ctx->plane_state->global_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
} else {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
}
} else {
blnd_cfg.pre_multiplied_alpha = false;
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
blnd_cfg.global_alpha = 0xff;
blnd_cfg.background_color_bpc = 4;
blnd_cfg.bottom_gain_mode = 0;
blnd_cfg.top_gain = 0x1f000;
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
if (pipe_ctx->plane_state->format
== SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
blnd_cfg.pre_multiplied_alpha = false;
/*
* TODO: remove hack
* Note: currently there is a bug in init_hw such that
* on resume from hibernate, BIOS sets up MPCC0, and
* we do mpcc_remove but the mpcc cannot go to idle
* after remove. This cause us to pick mpcc1 here,
* which causes a pstate hang for yet unknown reason.
*/
mpcc_id = hubp->inst;
/* If there is no full update, don't need to touch MPC tree*/
if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
!pipe_ctx->update_flags.bits.mpcc) {
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
return;
}
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
if (new_mpcc != NULL)
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
else
if (dc->debug.sanity_checks)
mpc->funcs->assert_mpcc_idle_before_connect(
dc->res_pool->mpc, mpcc_id);
/* Call MPC to insert new plane */
new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
mpc_tree_params,
&blnd_cfg,
NULL,
NULL,
hubp->inst,
mpcc_id);
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
ASSERT(new_mpcc != NULL);
hubp->opp_id = pipe_ctx->stream_res.opp->inst;
hubp->mpcc_id = mpcc_id;
}
static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
{
switch (link->link_enc->transmitter) {
case TRANSMITTER_UNIPHY_A:
return PHYD32CLKA;
case TRANSMITTER_UNIPHY_B:
return PHYD32CLKB;
case TRANSMITTER_UNIPHY_C:
return PHYD32CLKC;
case TRANSMITTER_UNIPHY_D:
return PHYD32CLKD;
case TRANSMITTER_UNIPHY_E:
return PHYD32CLKE;
default:
return PHYD32CLKA;
}
}
static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
{
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
int count = 1;
while (odm_pipe != NULL) {
count++;
odm_pipe = odm_pipe->next_odm_pipe;
}
return count;
}
void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
pipe_ctx->stream->link->cur_link_settings.lane_count;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct dc_link *link = pipe_ctx->stream->link;
uint32_t active_total_with_borders;
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dtbclk_dto_params dto_params = {0};
struct dccg *dccg = dc->res_pool->dccg;
enum phyd32clk_clock_source phyd32clk;
int dp_hpo_inst;
struct dce_hwseq *hws = dc->hwseq;
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
}
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
phyd32clk = get_phyd32clk_src(link);
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
} else {
}
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
pipe_ctx->stream_res.tg->inst,
k1_div, k2_div);
}
link_hwss->setup_stream_encoder(pipe_ctx);
if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
if (dc->hwss.program_dmdata_engine)
dc->hwss.program_dmdata_engine(pipe_ctx);
}
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
timing->h_addressable
+ timing->h_border_left
+ timing->h_border_right;
if (lane_count != 0)
early_control = active_total_with_borders % lane_count;
if (early_control == 0)
early_control = lane_count;
tg->funcs->set_early_control(tg, early_control);
if (dc->hwseq->funcs.set_pixels_per_cycle)
dc->hwseq->funcs.set_pixels_per_cycle(pipe_ctx);
}
void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
bool enable = false;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal)
? dmdata_dp
: dmdata_hdmi;
/* if using dynamic meta, don't set up generic infopackets */
if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
enable = true;
}
if (!hubp)
return;
if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
return;
stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
hubp->inst, mode);
}
void dcn20_fpga_init_hw(struct dc *dc)
{
int i, j;
struct dce_hwseq *hws = dc->hwseq;
struct resource_pool *res_pool = dc->res_pool;
struct dc_state *context = dc->current_state;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// Initialize the dccg
if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
//Enable ability to power gate / don't force power on permanently
hws->funcs.enable_power_gating_plane(hws, true);
// Specific to FPGA dccg and registers
REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
hws->funcs.dccg_init(hws);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
if (REG(REFCLK_CNTL))
REG_WRITE(REFCLK_CNTL, 0);
//
/* Blank pixel data with OPP DPG */
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg))
dcn20_init_blank(dc, tg);
}
for (i = 0; i < res_pool->timing_generator_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->lock(tg);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dpp *dpp = res_pool->dpps[i];
dpp->funcs->dpp_reset(dpp);
}
/* Reset all MPCC muxes */
res_pool->mpc->funcs->mpc_init(res_pool->mpc);
/* initialize OPP mpc_tree parameter */
for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
for (j = 0; j < MAX_PIPES; j++)
res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = dc->res_pool->hubps[i];
struct dpp *dpp = dc->res_pool->dpps[i];
pipe_ctx->stream_res.tg = tg;
pipe_ctx->pipe_idx = i;
pipe_ctx->plane_res.hubp = hubp;
pipe_ctx->plane_res.dpp = dpp;
pipe_ctx->plane_res.mpcc_inst = dpp->inst;
hubp->mpcc_id = dpp->inst;
hubp->opp_id = OPP_ID_INVALID;
hubp->power_gated = false;
pipe_ctx->stream_res.opp = NULL;
hubp->funcs->hubp_init(hubp);
//dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
//dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
/*to do*/
hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
for (i = 0; i < res_pool->res_cap->num_dwb; i++)
res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
dc->hwss.disable_plane(dc, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
}
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
tg->funcs->tg_init(tg);
}
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
}
#ifndef TRIM_FSFT
bool dcn20_optimize_timing_for_fsft(struct dc *dc,
struct dc_crtc_timing *timing,
unsigned int max_input_rate_in_khz)
{
unsigned int old_v_front_porch;
unsigned int old_v_total;
unsigned int max_input_rate_in_100hz;
unsigned long long new_v_total;
max_input_rate_in_100hz = max_input_rate_in_khz * 10;
if (max_input_rate_in_100hz < timing->pix_clk_100hz)
return false;
old_v_total = timing->v_total;
old_v_front_porch = timing->v_front_porch;
timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
timing->pix_clk_100hz = max_input_rate_in_100hz;
new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz);
timing->v_total = new_v_total;
timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total);
return true;
}
#endif
void dcn20_set_disp_pattern_generator(const struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum controller_dp_test_pattern test_pattern,
enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width, int height, int offset)
{
pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
color_space, color_depth, solid_color, width, height, offset);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c |
/*
* Copyright 2012-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn20_hubp.h"
#include "dm_services.h"
#include "dce_calcs.h"
#include "reg_helper.h"
#include "basics/conversion.h"
#define DC_LOGGER_INIT(logger)
#define REG(reg)\
hubp2->hubp_regs->reg
#define CTX \
hubp2->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
void hubp2_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
// The format of default addr is 48:12 of the 48 bit addr
mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
REG_UPDATE_2(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, 1, /* 1 = system physical memory */
DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mc_vm_apt_default.high_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 0,
DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mc_vm_apt_default.low_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, 0,
MC_VM_SYSTEM_APERTURE_LOW_ADDR, mc_vm_apt_low.quad_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, 0,
MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mc_vm_apt_high.quad_part);
REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
ENABLE_L1_TLB, 1,
SYSTEM_ACCESS_MODE, 0x3);
}
void hubp2_program_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
/* DLG - Per hubp */
REG_SET_2(BLANK_OFFSET_0, 0,
REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
DLG_V_BLANK_END, dlg_attr->dlg_vblank_end);
REG_SET(BLANK_OFFSET_1, 0,
MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start);
REG_SET(DST_DIMENSIONS, 0,
REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal);
REG_SET_2(DST_AFTER_SCALER, 0,
REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
/* DLG - Per luma/chroma */
REG_SET(VBLANK_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
if (REG(NOM_PARAMETERS_0))
REG_SET(NOM_PARAMETERS_0, 0,
DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
if (REG(NOM_PARAMETERS_1))
REG_SET(NOM_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
REG_SET(NOM_PARAMETERS_4, 0,
DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
REG_SET(NOM_PARAMETERS_5, 0,
REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
REG_SET_2(PER_LINE_DELIVERY, 0,
REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
REG_SET(VBLANK_PARAMETERS_2, 0,
REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
if (REG(NOM_PARAMETERS_2))
REG_SET(NOM_PARAMETERS_2, 0,
DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
if (REG(NOM_PARAMETERS_3))
REG_SET(NOM_PARAMETERS_3, 0,
REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
REG_SET(NOM_PARAMETERS_6, 0,
DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
REG_SET(NOM_PARAMETERS_7, 0,
REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c);
/* TTU - per hubp */
REG_SET_2(DCN_TTU_QOS_WM, 0,
QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
REG_SET_3(DCN_SURF0_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
REG_SET(FLIP_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_FLIP_L, dlg_attr->refcyc_per_pte_group_flip_l);
}
void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
uint32_t value = 0;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
/* disable_dlg_test_mode Set 9th bit to 1 to disable "dv" mode */
REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
/*
if (VSTARTUP_START - (VREADY_OFFSET+VUPDATE_WIDTH+VUPDATE_OFFSET)/htotal)
<= OTG_V_BLANK_END
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 1
else
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
if (pipe_dest->htotal != 0) {
if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
value = 1;
} else
value = 0;
}
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
static void hubp2_program_requestor(struct hubp *hubp,
struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
REG_SET_4(DCN_EXPANSION_MODE, 0,
DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0,
CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size,
SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
}
static void hubp2_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
/* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
hubp2_program_requestor(hubp, rq_regs);
hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
}
void hubp2_setup_interdependent(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_SET_2(PREFETCH_SETTINGS, 0,
DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
REG_SET(PREFETCH_SETTINGS_C, 0,
VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
REG_SET_2(VBLANK_PARAMETERS_0, 0,
DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
REG_SET_2(FLIP_PARAMETERS_0, 0,
DST_Y_PER_VM_FLIP, dlg_attr->dst_y_per_vm_flip,
DST_Y_PER_ROW_FLIP, dlg_attr->dst_y_per_row_flip);
REG_SET(VBLANK_PARAMETERS_3, 0,
REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
REG_SET(VBLANK_PARAMETERS_4, 0,
REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
REG_SET(FLIP_PARAMETERS_2, 0,
REFCYC_PER_META_CHUNK_FLIP_L, dlg_attr->refcyc_per_meta_chunk_flip_l);
REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
REG_SET(DCN_SURF0_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
ttu_attr->refcyc_per_req_delivery_pre_l);
REG_SET(DCN_SURF1_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
ttu_attr->refcyc_per_req_delivery_pre_c);
REG_SET(DCN_CUR0_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
REG_SET(DCN_CUR1_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur1);
REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
}
/* DCN2 (GFX10), the following GFX fields are deprecated. They can be set but they will not be used:
* NUM_BANKS
* NUM_SE
* NUM_RB_PER_SE
* RB_ALIGNED
* Other things can be defaulted, since they never change:
* PIPE_ALIGNED = 0
* META_LINEAR = 0
* In GFX10, only these apply:
* PIPE_INTERLEAVE
* NUM_PIPES
* MAX_COMPRESSED_FRAGS
* SW_MODE
*/
static void hubp2_program_tiling(
struct dcn20_hubp *hubp2,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_3(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags));
REG_UPDATE_4(DCSURF_TILING_CONFIG,
SW_MODE, info->gfx9.swizzle,
META_LINEAR, 0,
RB_ALIGNED, 0,
PIPE_ALIGNED, 0);
}
void hubp2_program_size(
struct hubp *hubp,
enum surface_pixel_format format,
const struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
bool use_pitch_c = false;
/* Program data and meta surface pitch (calculation from addrlib)
* 444 or 420 luma
*/
use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
&& format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END;
use_pitch_c = use_pitch_c
|| (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
if (use_pitch_c) {
ASSERT(plane_size->chroma_pitch != 0);
/* Chroma pitch zero can cause system hang! */
pitch = plane_size->surface_pitch - 1;
meta_pitch = dcc->meta_pitch - 1;
pitch_c = plane_size->chroma_pitch - 1;
meta_pitch_c = dcc->meta_pitch_c - 1;
} else {
pitch = plane_size->surface_pitch - 1;
meta_pitch = dcc->meta_pitch - 1;
pitch_c = 0;
meta_pitch_c = 0;
}
if (!dcc->enable) {
meta_pitch = 0;
meta_pitch_c = 0;
}
REG_UPDATE_2(DCSURF_SURFACE_PITCH,
PITCH, pitch, META_PITCH, meta_pitch);
use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN;
use_pitch_c = use_pitch_c
|| (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
if (use_pitch_c)
REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
}
void hubp2_program_rotation(
struct hubp *hubp,
enum dc_rotation_angle rotation,
bool horizontal_mirror)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t mirror;
if (horizontal_mirror)
mirror = 1;
else
mirror = 0;
/* Program rotation angle and horz mirror - no mirror */
if (rotation == ROTATION_ANGLE_0)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, 0,
H_MIRROR_EN, mirror);
else if (rotation == ROTATION_ANGLE_90)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, 1,
H_MIRROR_EN, mirror);
else if (rotation == ROTATION_ANGLE_180)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, 2,
H_MIRROR_EN, mirror);
else if (rotation == ROTATION_ANGLE_270)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, 3,
H_MIRROR_EN, mirror);
}
void hubp2_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size independent_64b_blks)
{
uint32_t dcc_en = enable ? 1 : 0;
uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_DCC_EN, dcc_en,
PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk,
SECONDARY_SURFACE_DCC_EN, dcc_en,
SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
}
void hubp2_program_pixel_format(
struct hubp *hubp,
enum surface_pixel_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t red_bar = 3;
uint32_t blue_bar = 2;
/* swap for ABGR format */
if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
red_bar = 2;
blue_bar = 3;
}
REG_UPDATE_2(HUBPRET_CONTROL,
CROSSBAR_SRC_CB_B, blue_bar,
CROSSBAR_SRC_CR_R, red_bar);
/* Mapping is same as ipp programming (cnvc) */
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 1);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 3);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 8);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 24);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 65);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 64);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 67);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 12);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 112);
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 113);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 114);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 118);
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 116,
ALPHA_PLANE_EN, 0);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 116,
ALPHA_PLANE_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
/* don't see the need of program the xbar in DCN 1.0 */
}
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp2_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
hubp2_program_tiling(hubp2, tiling_info, format);
hubp2_program_size(hubp, format, plane_size, dcc);
hubp2_program_rotation(hubp, rotation, horizontal_mirror);
hubp2_program_pixel_format(hubp, format);
}
enum cursor_lines_per_chunk hubp2_get_lines_per_chunk(
unsigned int cursor_width,
enum dc_cursor_color_format cursor_mode)
{
enum cursor_lines_per_chunk line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
if (cursor_mode == CURSOR_MODE_MONO)
line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
else if (cursor_mode == CURSOR_MODE_COLOR_1BIT_AND ||
cursor_mode == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
cursor_mode == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
if (cursor_width >= 1 && cursor_width <= 32)
line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
else if (cursor_width >= 33 && cursor_width <= 64)
line_per_chunk = CURSOR_LINE_PER_CHUNK_8;
else if (cursor_width >= 65 && cursor_width <= 128)
line_per_chunk = CURSOR_LINE_PER_CHUNK_4;
else if (cursor_width >= 129 && cursor_width <= 256)
line_per_chunk = CURSOR_LINE_PER_CHUNK_2;
} else if (cursor_mode == CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED ||
cursor_mode == CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED) {
if (cursor_width >= 1 && cursor_width <= 16)
line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
else if (cursor_width >= 17 && cursor_width <= 32)
line_per_chunk = CURSOR_LINE_PER_CHUNK_8;
else if (cursor_width >= 33 && cursor_width <= 64)
line_per_chunk = CURSOR_LINE_PER_CHUNK_4;
else if (cursor_width >= 65 && cursor_width <= 128)
line_per_chunk = CURSOR_LINE_PER_CHUNK_2;
else if (cursor_width >= 129 && cursor_width <= 256)
line_per_chunk = CURSOR_LINE_PER_CHUNK_1;
}
return line_per_chunk;
}
void hubp2_cursor_set_attributes(
struct hubp *hubp,
const struct dc_cursor_attributes *attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
attr->width, attr->color_format);
hubp->curs_attr = *attr;
REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
REG_UPDATE(CURSOR_SURFACE_ADDRESS,
CURSOR_SURFACE_ADDRESS, attr->address.low_part);
REG_UPDATE_2(CURSOR_SIZE,
CURSOR_WIDTH, attr->width,
CURSOR_HEIGHT, attr->height);
REG_UPDATE_4(CURSOR_CONTROL,
CURSOR_MODE, attr->color_format,
CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
CURSOR_PITCH, hw_pitch,
CURSOR_LINES_PER_CHUNK, lpc);
REG_SET_2(CURSOR_SETTINGS, 0,
/* no shift of the cursor HDL schedule */
CURSOR0_DST_Y_OFFSET, 0,
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
hubp->att.SURFACE_ADDR = attr->address.low_part;
hubp->att.size.bits.width = attr->width;
hubp->att.size.bits.height = attr->height;
hubp->att.cur_ctl.bits.mode = attr->color_format;
hubp->cur_rect.w = attr->width;
hubp->cur_rect.h = attr->height;
hubp->att.cur_ctl.bits.pitch = hw_pitch;
hubp->att.cur_ctl.bits.line_per_chunk = lpc;
hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
hubp->att.settings.bits.dst_y_offset = 0;
hubp->att.settings.bits.chunk_hdl_adjust = 3;
}
void hubp2_dmdata_set_attributes(
struct hubp *hubp,
const struct dc_dmdata_attributes *attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
if (attr->dmdata_mode == DMDATA_HW_MODE) {
/* set to HW mode */
REG_UPDATE(DMDATA_CNTL,
DMDATA_MODE, 1);
/* for DMDATA flip, need to use SURFACE_UPDATE_LOCK */
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 1);
/* toggle DMDATA_UPDATED and set repeat and size */
REG_UPDATE(DMDATA_CNTL,
DMDATA_UPDATED, 0);
REG_UPDATE_3(DMDATA_CNTL,
DMDATA_UPDATED, 1,
DMDATA_REPEAT, attr->dmdata_repeat,
DMDATA_SIZE, attr->dmdata_size);
/* set DMDATA address */
REG_WRITE(DMDATA_ADDRESS_LOW, attr->address.low_part);
REG_UPDATE(DMDATA_ADDRESS_HIGH,
DMDATA_ADDRESS_HIGH, attr->address.high_part);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 0);
} else {
/* set to SW mode before loading data */
REG_SET(DMDATA_CNTL, 0,
DMDATA_MODE, 0);
/* toggle DMDATA_SW_UPDATED to start loading sequence */
REG_UPDATE(DMDATA_SW_CNTL,
DMDATA_SW_UPDATED, 0);
REG_UPDATE_3(DMDATA_SW_CNTL,
DMDATA_SW_UPDATED, 1,
DMDATA_SW_REPEAT, attr->dmdata_repeat,
DMDATA_SW_SIZE, attr->dmdata_size);
/* load data into hubp dmdata buffer */
hubp2_dmdata_load(hubp, attr->dmdata_size, attr->dmdata_sw_data);
}
/* Note that DL_DELTA must be programmed if we want to use TTU mode */
REG_SET_3(DMDATA_QOS_CNTL, 0,
DMDATA_QOS_MODE, attr->dmdata_qos_mode,
DMDATA_QOS_LEVEL, attr->dmdata_qos_level,
DMDATA_DL_DELTA, attr->dmdata_dl_delta);
}
void hubp2_dmdata_load(
struct hubp *hubp,
uint32_t dmdata_sw_size,
const uint32_t *dmdata_sw_data)
{
int i;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
/* load dmdata into HUBP buffer in SW mode */
for (i = 0; i < dmdata_sw_size / 4; i++)
REG_WRITE(DMDATA_SW_DATA, dmdata_sw_data[i]);
}
bool hubp2_dmdata_status_done(struct hubp *hubp)
{
uint32_t status;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_GET(DMDATA_STATUS, DMDATA_DONE, &status);
return (status == 1);
}
bool hubp2_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
//program flip type
REG_UPDATE(DCSURF_FLIP_CONTROL,
SURFACE_FLIP_TYPE, flip_immediate);
// Program VMID reg
REG_UPDATE(VMID_SETTINGS_0,
VMID, address->vmid);
/* HW automatically latch rest of address register on write to
* DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
*
* program high first and then the low addr, order matters!
*/
switch (address->type) {
case PLN_ADDR_TYPE_GRAPHICS:
/* DCN1.0 does not support const color
* TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
* base on address->grph.dcc_const_color
* x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
* x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
*/
if (address->grph.addr.quad_part == 0)
break;
REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface);
if (address->grph.meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->grph.meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->grph.meta_addr.low_part);
}
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->grph.addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->grph.addr.low_part);
break;
case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
if (address->video_progressive.luma_addr.quad_part == 0
|| address->video_progressive.chroma_addr.quad_part == 0)
break;
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->video_progressive.luma_meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
address->video_progressive.chroma_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
PRIMARY_META_SURFACE_ADDRESS_C,
address->video_progressive.chroma_meta_addr.low_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->video_progressive.luma_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->video_progressive.luma_meta_addr.low_part);
}
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_SURFACE_ADDRESS_HIGH_C,
address->video_progressive.chroma_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
PRIMARY_SURFACE_ADDRESS_C,
address->video_progressive.chroma_addr.low_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->video_progressive.luma_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->video_progressive.luma_addr.low_part);
break;
case PLN_ADDR_TYPE_GRPH_STEREO:
if (address->grph_stereo.left_addr.quad_part == 0)
break;
if (address->grph_stereo.right_addr.quad_part == 0)
break;
REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface,
SECONDARY_SURFACE_TMZ, address->tmz_surface,
SECONDARY_SURFACE_TMZ_C, address->tmz_surface,
SECONDARY_META_SURFACE_TMZ, address->tmz_surface,
SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->grph_stereo.right_meta_addr.quad_part != 0) {
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
SECONDARY_META_SURFACE_ADDRESS_HIGH,
address->grph_stereo.right_meta_addr.high_part);
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
SECONDARY_META_SURFACE_ADDRESS,
address->grph_stereo.right_meta_addr.low_part);
}
if (address->grph_stereo.left_meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->grph_stereo.left_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->grph_stereo.left_meta_addr.low_part);
}
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
SECONDARY_SURFACE_ADDRESS_HIGH,
address->grph_stereo.right_addr.high_part);
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
SECONDARY_SURFACE_ADDRESS,
address->grph_stereo.right_addr.low_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->grph_stereo.left_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->grph_stereo.left_addr.low_part);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
hubp->request_address = *address;
return true;
}
void hubp2_enable_triplebuffer(
struct hubp *hubp,
bool enable)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t triple_buffer_en = 0;
bool tri_buffer_en;
REG_GET(DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, &triple_buffer_en);
tri_buffer_en = (triple_buffer_en == 1);
if (tri_buffer_en != enable) {
REG_UPDATE(DCSURF_FLIP_CONTROL2,
SURFACE_TRIPLE_BUFFER_ENABLE, enable ? DC_TRIPLEBUFFER_ENABLE : DC_TRIPLEBUFFER_DISABLE);
}
}
bool hubp2_is_triplebuffer_enabled(
struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t triple_buffer_en = 0;
REG_GET(DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, &triple_buffer_en);
return (bool)triple_buffer_en;
}
void hubp2_set_flip_control_surface_gsl(struct hubp *hubp, bool enable)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, enable ? 1 : 0);
}
bool hubp2_is_flip_pending(struct hubp *hubp)
{
uint32_t flip_pending = 0;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
struct dc_plane_address earliest_inuse_address;
if (hubp && hubp->power_gated)
return false;
REG_GET(DCSURF_FLIP_CONTROL,
SURFACE_FLIP_PENDING, &flip_pending);
REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part);
REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part);
if (flip_pending)
return true;
if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
return false;
}
void hubp2_set_blank(struct hubp *hubp, bool blank)
{
hubp2_set_blank_regs(hubp, blank);
if (blank) {
hubp->mpcc_id = 0xf;
hubp->opp_id = OPP_ID_INVALID;
}
}
void hubp2_set_blank_regs(struct hubp *hubp, bool blank)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t blank_en = blank ? 1 : 0;
if (blank) {
uint32_t reg_val = REG_READ(DCHUBP_CNTL);
if (reg_val) {
/* init sequence workaround: in case HUBP is
* power gated, this wait would timeout.
*
* we just wrote reg_val to non-0, if it stay 0
* it means HUBP is gated
*/
REG_WAIT(DCHUBP_CNTL,
HUBP_NO_OUTSTANDING_REQ, 1,
1, 100000);
}
}
REG_UPDATE_2(DCHUBP_CNTL,
HUBP_BLANK_EN, blank_en,
HUBP_TTU_DISABLE, 0);
}
void hubp2_cursor_set_position(
struct hubp *hubp,
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
int x_pos = pos->x - param->viewport.x;
int y_pos = pos->y - param->viewport.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
int src_x_offset = x_pos - pos->x_hotspot;
int src_y_offset = y_pos - pos->y_hotspot;
int cursor_height = (int)hubp->curs_attr.height;
int cursor_width = (int)hubp->curs_attr.width;
uint32_t dst_x_offset;
uint32_t cur_en = pos->enable ? 1 : 0;
hubp->curs_pos = *pos;
/*
* Guard aganst cursor_set_position() from being called with invalid
* attributes
*
* TODO: Look at combining cursor_set_position() and
* cursor_set_attributes() into cursor_update()
*/
if (hubp->curs_attr.address.quad_part == 0)
return;
// Transform cursor width / height and hotspots for offset calculations
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
swap(cursor_height, cursor_width);
swap(x_hotspot, y_hotspot);
if (param->rotation == ROTATION_ANGLE_90) {
// hotspot = (-y, x)
src_x_offset = x_pos - (cursor_width - x_hotspot);
src_y_offset = y_pos - y_hotspot;
} else if (param->rotation == ROTATION_ANGLE_270) {
// hotspot = (y, -x)
src_x_offset = x_pos - x_hotspot;
src_y_offset = y_pos - (cursor_height - y_hotspot);
}
} else if (param->rotation == ROTATION_ANGLE_180) {
// hotspot = (-x, -y)
if (!param->mirror)
src_x_offset = x_pos - (cursor_width - x_hotspot);
src_y_offset = y_pos - (cursor_height - y_hotspot);
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
dst_x_offset *= param->ref_clk_khz;
dst_x_offset /= param->pixel_clk_khz;
ASSERT(param->h_scale_ratio.value);
if (param->h_scale_ratio.value)
dst_x_offset = dc_fixpt_floor(dc_fixpt_div(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + cursor_width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, pos->x,
CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
CURSOR_HOT_SPOT_X, pos->x_hotspot,
CURSOR_HOT_SPOT_Y, pos->y_hotspot);
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
hubp->pos.position.bits.x_pos = pos->x;
hubp->pos.position.bits.y_pos = pos->y;
hubp->pos.hot_spot.bits.x_hot = pos->x_hotspot;
hubp->pos.hot_spot.bits.y_hot = pos->y_hotspot;
hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
/* Cursor Rectangle Cache
* Cursor bitmaps have different hotspot values
* There's a possibility that the above logic returns a negative value,
* so we clamp them to 0
*/
if (src_x_offset < 0)
src_x_offset = 0;
if (src_y_offset < 0)
src_y_offset = 0;
/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
hubp->cur_rect.x = src_x_offset + param->viewport.x;
hubp->cur_rect.y = src_y_offset + param->viewport.y;
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t clk_enable = enable ? 1 : 0;
REG_UPDATE(HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, clk_enable);
}
void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
void hubp2_clear_underflow(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
}
void hubp2_read_state_common(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
struct dcn_hubp_state *s = &hubp2->state;
struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
/* Requester */
REG_GET(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
REG_GET_4(DCN_EXPANSION_MODE,
DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR,
MC_VM_SYSTEM_APERTURE_HIGH_ADDR, &rq_regs->aperture_high_addr);
REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR,
MC_VM_SYSTEM_APERTURE_LOW_ADDR, &rq_regs->aperture_low_addr);
/* DLG - Per hubp */
REG_GET_2(BLANK_OFFSET_0,
REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
REG_GET(BLANK_OFFSET_1,
MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
REG_GET(DST_DIMENSIONS,
REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
REG_GET_2(DST_AFTER_SCALER,
REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
if (REG(PREFETCH_SETTINS))
REG_GET_2(PREFETCH_SETTINS,
DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
else
REG_GET_2(PREFETCH_SETTINGS,
DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
REG_GET_2(VBLANK_PARAMETERS_0,
DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
REG_GET(REF_FREQ_TO_PIX_FREQ,
REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
/* DLG - Per luma/chroma */
REG_GET(VBLANK_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
REG_GET(VBLANK_PARAMETERS_3,
REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
if (REG(NOM_PARAMETERS_0))
REG_GET(NOM_PARAMETERS_0,
DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
if (REG(NOM_PARAMETERS_1))
REG_GET(NOM_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
REG_GET(NOM_PARAMETERS_4,
DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
REG_GET(NOM_PARAMETERS_5,
REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
REG_GET_2(PER_LINE_DELIVERY_PRE,
REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
REG_GET_2(PER_LINE_DELIVERY,
REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
if (REG(PREFETCH_SETTINS_C))
REG_GET(PREFETCH_SETTINS_C,
VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
else
REG_GET(PREFETCH_SETTINGS_C,
VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
REG_GET(VBLANK_PARAMETERS_2,
REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
REG_GET(VBLANK_PARAMETERS_4,
REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
if (REG(NOM_PARAMETERS_2))
REG_GET(NOM_PARAMETERS_2,
DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
if (REG(NOM_PARAMETERS_3))
REG_GET(NOM_PARAMETERS_3,
REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
REG_GET(NOM_PARAMETERS_6,
DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
REG_GET(NOM_PARAMETERS_7,
REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
/* TTU - per hubp */
REG_GET_2(DCN_TTU_QOS_WM,
QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
REG_GET_2(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
REG_GET_3(DCN_SURF0_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
REG_GET(DCN_SURF0_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE,
&ttu_attr->refcyc_per_req_delivery_pre_l);
REG_GET_3(DCN_SURF1_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
REG_GET(DCN_SURF1_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE,
&ttu_attr->refcyc_per_req_delivery_pre_c);
/* Rest of hubp */
REG_GET(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, &s->pixel_format);
REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
SURFACE_EARLIEST_INUSE_ADDRESS, &s->inuse_addr_lo);
REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
PRI_VIEWPORT_WIDTH, &s->viewport_width,
PRI_VIEWPORT_HEIGHT, &s->viewport_height);
REG_GET_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, &s->rotation_angle,
H_MIRROR_EN, &s->h_mirror_en);
REG_GET(DCSURF_TILING_CONFIG,
SW_MODE, &s->sw_mode);
REG_GET(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_DCC_EN, &s->dcc_en);
REG_GET_3(DCHUBP_CNTL,
HUBP_BLANK_EN, &s->blank_en,
HUBP_TTU_DISABLE, &s->ttu_disable,
HUBP_UNDERFLOW_STATUS, &s->underflow_status);
REG_GET(HUBP_CLK_CNTL,
HUBP_CLOCK_ENABLE, &s->clock_en);
REG_GET(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &s->min_ttu_vblank);
REG_GET_2(DCN_TTU_QOS_WM,
QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS,
PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_lo);
REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH,
PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_hi);
REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS,
PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_lo);
REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH,
PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_hi);
}
void hubp2_read_state(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
struct dcn_hubp_state *s = &hubp2->state;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
hubp2_read_state_common(hubp);
REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
}
static void hubp2_validate_dml_output(struct hubp *hubp,
struct dc_context *ctx,
struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
DC_LOGGER_INIT(ctx->logger);
DC_LOG_DEBUG("DML Validation | Running Validation");
/* Requestor Regs */
REG_GET(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
REG_GET_4(DCN_EXPANSION_MODE,
DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size,
SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size);
if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
/* DLG - Per hubp */
REG_GET_2(BLANK_OFFSET_0,
REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
REG_GET(BLANK_OFFSET_1,
MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
REG_GET(DST_DIMENSIONS,
REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
REG_GET_2(DST_AFTER_SCALER,
REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
REG_GET(REF_FREQ_TO_PIX_FREQ,
REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
/* DLG - Per luma/chroma */
REG_GET(VBLANK_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
if (REG(NOM_PARAMETERS_0))
REG_GET(NOM_PARAMETERS_0,
DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
if (REG(NOM_PARAMETERS_1))
REG_GET(NOM_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
REG_GET(NOM_PARAMETERS_4,
DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
REG_GET(NOM_PARAMETERS_5,
REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
REG_GET_2(PER_LINE_DELIVERY,
REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
REG_GET_2(PER_LINE_DELIVERY_PRE,
REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
REG_GET(VBLANK_PARAMETERS_2,
REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
if (REG(NOM_PARAMETERS_2))
REG_GET(NOM_PARAMETERS_2,
DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
if (REG(NOM_PARAMETERS_3))
REG_GET(NOM_PARAMETERS_3,
REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
REG_GET(NOM_PARAMETERS_6,
DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
REG_GET(NOM_PARAMETERS_7,
REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
REG_GET(VBLANK_PARAMETERS_3,
REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
REG_GET(VBLANK_PARAMETERS_4,
REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
/* TTU - per hubp */
REG_GET_2(DCN_TTU_QOS_WM,
QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
REG_GET_3(DCN_SURF0_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
REG_GET_3(DCN_SURF1_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
REG_GET_3(DCN_CUR0_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
REG_GET(FLIP_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
REG_GET(DCN_CUR0_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
REG_GET(DCN_CUR1_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
REG_GET(DCN_SURF0_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
REG_GET(DCN_SURF1_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
}
static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp2_program_surface_config,
.hubp_is_flip_pending = hubp2_is_flip_pending,
.hubp_setup = hubp2_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp2_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
.hubp_clk_cntl = hubp2_clk_cntl,
.hubp_vtg_sel = hubp2_vtg_sel,
.dmdata_set_attributes = hubp2_dmdata_set_attributes,
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp2_read_state,
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
.validate_dml_output = hubp2_validate_dml_output,
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
};
bool hubp2_construct(
struct dcn20_hubp *hubp2,
struct dc_context *ctx,
uint32_t inst,
const struct dcn_hubp2_registers *hubp_regs,
const struct dcn_hubp2_shift *hubp_shift,
const struct dcn_hubp2_mask *hubp_mask)
{
hubp2->base.funcs = &dcn20_hubp_funcs;
hubp2->base.ctx = ctx;
hubp2->hubp_regs = hubp_regs;
hubp2->hubp_shift = hubp_shift;
hubp2->hubp_mask = hubp_mask;
hubp2->base.inst = inst;
hubp2->base.opp_id = OPP_ID_INVALID;
hubp2->base.mpcc_id = 0xf;
return true;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "link_encoder.h"
#include "dcn20_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
enc10->base.ctx->logger
#define REG(reg)\
(enc10->link_regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc10->link_shift->field_name, enc10->link_mask->field_name
#define IND_REG(index) \
(enc10->link_regs->index)
#ifndef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#endif
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
static struct mpll_cfg dcn2_mpll_cfg[] = {
// RBR
{
.hdmimode_enable = 1,
.ref_range = 3,
.ref_clk_mpllb_div = 2,
.mpllb_ssc_en = 1,
.mpllb_div5_clk_en = 1,
.mpllb_multiplier = 226,
.mpllb_fracn_en = 1,
.mpllb_fracn_quot = 39321,
.mpllb_fracn_rem = 3,
.mpllb_fracn_den = 5,
.mpllb_ssc_up_spread = 0,
.mpllb_ssc_peak = 38221,
.mpllb_ssc_stepsize = 49314,
.mpllb_div_clk_en = 0,
.mpllb_div_multiplier = 0,
.mpllb_hdmi_div = 0,
.mpllb_tx_clk_div = 2,
.tx_vboost_lvl = 4,
.mpllb_pmix_en = 1,
.mpllb_word_div2_en = 0,
.mpllb_ana_v2i = 2,
.mpllb_ana_freq_vco = 2,
.mpllb_ana_cp_int = 7,
.mpllb_ana_cp_prop = 18,
.hdmi_pixel_clk_div = 0,
},
// HBR
{
.hdmimode_enable = 1,
.ref_range = 3,
.ref_clk_mpllb_div = 2,
.mpllb_ssc_en = 1,
.mpllb_div5_clk_en = 1,
.mpllb_multiplier = 184,
.mpllb_fracn_en = 0,
.mpllb_fracn_quot = 0,
.mpllb_fracn_rem = 0,
.mpllb_fracn_den = 1,
.mpllb_ssc_up_spread = 0,
.mpllb_ssc_peak = 31850,
.mpllb_ssc_stepsize = 41095,
.mpllb_div_clk_en = 0,
.mpllb_div_multiplier = 0,
.mpllb_hdmi_div = 0,
.mpllb_tx_clk_div = 1,
.tx_vboost_lvl = 4,
.mpllb_pmix_en = 1,
.mpllb_word_div2_en = 0,
.mpllb_ana_v2i = 2,
.mpllb_ana_freq_vco = 3,
.mpllb_ana_cp_int = 7,
.mpllb_ana_cp_prop = 18,
.hdmi_pixel_clk_div = 0,
},
//HBR2
{
.hdmimode_enable = 1,
.ref_range = 3,
.ref_clk_mpllb_div = 2,
.mpllb_ssc_en = 1,
.mpllb_div5_clk_en = 1,
.mpllb_multiplier = 184,
.mpllb_fracn_en = 0,
.mpllb_fracn_quot = 0,
.mpllb_fracn_rem = 0,
.mpllb_fracn_den = 1,
.mpllb_ssc_up_spread = 0,
.mpllb_ssc_peak = 31850,
.mpllb_ssc_stepsize = 41095,
.mpllb_div_clk_en = 0,
.mpllb_div_multiplier = 0,
.mpllb_hdmi_div = 0,
.mpllb_tx_clk_div = 0,
.tx_vboost_lvl = 4,
.mpllb_pmix_en = 1,
.mpllb_word_div2_en = 0,
.mpllb_ana_v2i = 2,
.mpllb_ana_freq_vco = 3,
.mpllb_ana_cp_int = 7,
.mpllb_ana_cp_prop = 18,
.hdmi_pixel_clk_div = 0,
},
//HBR3
{
.hdmimode_enable = 1,
.ref_range = 3,
.ref_clk_mpllb_div = 2,
.mpllb_ssc_en = 1,
.mpllb_div5_clk_en = 1,
.mpllb_multiplier = 292,
.mpllb_fracn_en = 0,
.mpllb_fracn_quot = 0,
.mpllb_fracn_rem = 0,
.mpllb_fracn_den = 1,
.mpllb_ssc_up_spread = 0,
.mpllb_ssc_peak = 47776,
.mpllb_ssc_stepsize = 61642,
.mpllb_div_clk_en = 0,
.mpllb_div_multiplier = 0,
.mpllb_hdmi_div = 0,
.mpllb_tx_clk_div = 0,
.tx_vboost_lvl = 4,
.mpllb_pmix_en = 1,
.mpllb_word_div2_en = 0,
.mpllb_ana_v2i = 2,
.mpllb_ana_freq_vco = 0,
.mpllb_ana_cp_int = 7,
.mpllb_ana_cp_prop = 18,
.hdmi_pixel_clk_div = 0,
},
};
void enc2_fec_set_enable(struct link_encoder *enc, bool enable)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
DC_LOG_DSC("%s FEC at link encoder inst %d",
enable ? "Enabling" : "Disabling", enc->id.enum_id);
REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable);
}
void enc2_fec_set_ready(struct link_encoder *enc, bool ready)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, ready);
}
bool enc2_fec_is_active(struct link_encoder *enc)
{
uint32_t active = 0;
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &active);
return (active != 0);
}
/* this function reads dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status);
REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete);
}
static bool update_cfg_data(
struct dcn10_link_encoder *enc10,
const struct dc_link_settings *link_settings,
struct dpcssys_phy_seq_cfg *cfg)
{
int i;
cfg->load_sram_fw = false;
for (i = 0; i < link_settings->lane_count; i++)
cfg->lane_en[i] = true;
switch (link_settings->link_rate) {
case LINK_RATE_LOW:
cfg->mpll_cfg = dcn2_mpll_cfg[0];
break;
case LINK_RATE_HIGH:
cfg->mpll_cfg = dcn2_mpll_cfg[1];
break;
case LINK_RATE_HIGH2:
cfg->mpll_cfg = dcn2_mpll_cfg[2];
break;
case LINK_RATE_HIGH3:
cfg->mpll_cfg = dcn2_mpll_cfg[3];
break;
default:
DC_LOG_ERROR("%s: No supported link rate found %X!\n",
__func__, link_settings->link_rate);
return false;
}
return true;
}
void dcn20_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct dcn20_link_encoder *enc20 = (struct dcn20_link_encoder *) enc10;
struct dpcssys_phy_seq_cfg *cfg = &enc20->phy_seq_cfg;
if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
return;
}
if (!update_cfg_data(enc10, link_settings, cfg))
return;
enc1_configure_encoder(enc10, link_settings);
dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
}
void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t is_in_usb_c_dp4_mode = 0;
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
/* in usb c dp2 mode, max lane count is 2 */
if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
if (!is_in_usb_c_dp4_mode)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
}
bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t dp_alt_mode_disable = 0;
bool is_usb_c_alt_mode = false;
if (enc->features.flags.bits.DP_IS_USB_C) {
/* if value == 1 alt mode is disabled, otherwise it is enabled */
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
}
return is_usb_c_alt_mode;
}
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
#define AUX_REG_READ(reg_name) \
dm_read_reg(CTX, AUX_REG(reg_name))
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
void enc2_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
/*
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
*/
/*
AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
AUX_RX_START_WINDOW = 1 [6:4]
AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1
AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0
AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1
AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
*/
if (enc->ctx->dc_bios->golden_table.dc_golden_table_ver > 0) {
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control0_val);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, enc->ctx->dc_bios->golden_table.aux_dphy_tx_control_val);
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL1, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control1_val);
} else {
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
}
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
// 27MHz -> 0xd
// 100MHz -> 0x32
// 48MHz -> 0x18
// Set TMDS_CTL0 to 1. This is a legacy setting.
REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
dcn10_aux_initialize(enc10);
}
static const struct link_encoder_funcs dcn20_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
.setup = dcn10_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa,
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
.disable_output = dcn10_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
dcn10_link_encoder_update_mst_stream_allocation_table,
.psr_program_dp_dphy_fast_training =
dcn10_psr_program_dp_dphy_fast_training,
.psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
.connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
.enable_hpd = dcn10_link_encoder_enable_hpd,
.disable_hpd = dcn10_link_encoder_disable_hpd,
.is_dig_enabled = dcn10_is_dig_enabled,
.destroy = dcn10_link_encoder_destroy,
.fec_set_enable = enc2_fec_set_enable,
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
.get_dig_mode = dcn10_get_dig_mode,
.get_dig_frontend = dcn10_get_dig_frontend,
.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn20_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
const struct encoder_feature_support *enc_features,
const struct dcn10_link_enc_registers *link_regs,
const struct dcn10_link_enc_aux_registers *aux_regs,
const struct dcn10_link_enc_hpd_registers *hpd_regs,
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask)
{
struct bp_encoder_cap_info bp_cap_info = {0};
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
struct dcn10_link_encoder *enc10 = &enc20->enc10;
enc10->base.funcs = &dcn20_link_enc_funcs;
enc10->base.ctx = init_data->ctx;
enc10->base.id = init_data->encoder;
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
enc10->base.transmitter = init_data->transmitter;
/* set the flag to indicate whether driver poll the I2C data pin
* while doing the DP sink detect
*/
/* if (dal_adapter_service_is_feature_supported(as,
FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
enc10->base.features.flags.bits.
DP_SINK_DETECT_POLL_DATA_PIN = true;*/
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
SIGNAL_TYPE_LVDS |
SIGNAL_TYPE_DISPLAY_PORT |
SIGNAL_TYPE_DISPLAY_PORT_MST |
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
* DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
* Prefer DIG assignment is decided by board design.
* For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
* and VBIOS will filter out 7 UNIPHY for DCE 8.0.
* By this, adding DIGG should not hurt DCE 8.0.
* This will let DCE 8.1 share DCE 8.0 as much as possible
*/
enc10->link_regs = link_regs;
enc10->aux_regs = aux_regs;
enc10->hpd_regs = hpd_regs;
enc10->link_shift = link_shift;
enc10->link_mask = link_mask;
switch (enc10->base.transmitter) {
case TRANSMITTER_UNIPHY_A:
enc10->base.preferred_engine = ENGINE_ID_DIGA;
break;
case TRANSMITTER_UNIPHY_B:
enc10->base.preferred_engine = ENGINE_ID_DIGB;
break;
case TRANSMITTER_UNIPHY_C:
enc10->base.preferred_engine = ENGINE_ID_DIGC;
break;
case TRANSMITTER_UNIPHY_D:
enc10->base.preferred_engine = ENGINE_ID_DIGD;
break;
case TRANSMITTER_UNIPHY_E:
enc10->base.preferred_engine = ENGINE_ID_DIGE;
break;
case TRANSMITTER_UNIPHY_F:
enc10->base.preferred_engine = ENGINE_ID_DIGF;
break;
case TRANSMITTER_UNIPHY_G:
enc10->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
ASSERT_CRITICAL(false);
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
/* default to one to mirror Windows behavior */
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
enc10->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
if (result == BP_RESULT_OK) {
enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
}
if (enc10->base.ctx->dc->debug.hdmi20_disable) {
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <drm/display/drm_dsc_helper.h>
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
/* Object I/F functions */
static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc2_disable(struct display_stream_compressor *dsc);
static void dsc2_disconnect(struct display_stream_compressor *dsc);
static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
.dsc_enable = dsc2_enable,
.dsc_disable = dsc2_disable,
.dsc_disconnect = dsc2_disconnect,
};
/* Macro definitios for REG_SET macros*/
#define CTX \
dsc20->base.ctx
#define REG(reg)\
dsc20->dsc_regs->reg
#undef FN
#define FN(reg_name, field_name) \
dsc20->dsc_shift->field_name, dsc20->dsc_mask->field_name
#define DC_LOGGER \
dsc->ctx->logger
enum dsc_bits_per_comp {
DSC_BPC_8 = 8,
DSC_BPC_10 = 10,
DSC_BPC_12 = 12,
DSC_BPC_UNKNOWN
};
/* API functions (external or via structure->function_pointer) */
void dsc2_construct(struct dcn20_dsc *dsc,
struct dc_context *ctx,
int inst,
const struct dcn20_dsc_registers *dsc_regs,
const struct dcn20_dsc_shift *dsc_shift,
const struct dcn20_dsc_mask *dsc_mask)
{
dsc->base.ctx = ctx;
dsc->base.inst = inst;
dsc->base.funcs = &dcn20_dsc_funcs;
dsc->dsc_regs = dsc_regs;
dsc->dsc_shift = dsc_shift;
dsc->dsc_mask = dsc_mask;
dsc->max_image_width = 5184;
}
#define DCN20_MAX_PIXEL_CLOCK_Mhz 1188
#define DCN20_MAX_DISPLAY_CLOCK_Mhz 1200
/* This returns the capabilities for a single DSC encoder engine. Number of slices and total throughput
* can be doubled, tripled etc. by using additional DSC engines.
*/
void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
{
dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
dsc_enc_caps->lb_bit_depth = 13;
dsc_enc_caps->is_block_pred_supported = true;
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
/* Maximum total throughput with all the slices combined. This is different from how DP spec specifies it.
* Our decoder's total throughput in Pix/s is equal to DISPCLK. This is then shared between slices.
* The value below is the absolute maximum value. The actual throughput may be lower, but it'll always
* be sufficient to process the input pixel rate fed into a single DSC engine.
*/
dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz;
/* For pixel clock bigger than a single-pipe limit we'll need two engines, which then doubles our
* throughput and number of slices, but also introduces a lower limit of 2 slices
*/
if (pixel_clock_100Hz >= DCN20_MAX_PIXEL_CLOCK_Mhz*10000) {
dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 0;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 2;
}
// TODO DSC: This is actually image width limitation, not a slice width. This should be added to the criteria to use ODM.
dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel);
REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en,
DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
struct dsc_optc_config dsc_optc_cfg;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
if (dsc_cfg->pic_width > dsc20->max_image_width)
return false;
return dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, &dsc_optc_cfg);
}
void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config)
{
DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h);
DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v);
DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)",
config->dc_dsc_cfg.bits_per_pixel,
config->dc_dsc_cfg.bits_per_pixel / 16,
((config->dc_dsc_cfg.bits_per_pixel % 16) * 10000) / 16);
DC_LOG_DSC("\tcolor_depth %d", config->color_depth);
}
static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg)
{
bool is_config_ok;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
dsc_config_log(dsc, dsc_cfg);
is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg);
ASSERT(is_config_ok);
DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):");
dsc_log_pps(dsc, &dsc20->reg_vals.pps);
dsc_write_to_registers(dsc, &dsc20->reg_vals);
}
bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps)
{
bool is_config_ok;
struct dsc_reg_values dsc_reg_vals;
struct dsc_optc_config dsc_optc_cfg;
memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
dsc_config_log(dsc, dsc_cfg);
DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
is_config_ok = dsc_prepare_config(dsc_cfg, &dsc_reg_vals, &dsc_optc_cfg);
ASSERT(is_config_ok);
drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc_reg_vals.pps);
dsc_log_pps(dsc, &dsc_reg_vals.pps);
return is_config_ok;
}
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
int dsc_clock_en;
int dsc_fw_config;
int enabled_opp_pipe;
DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
ASSERT(0);
}
REG_UPDATE(DSC_TOP_CONTROL,
DSC_CLOCK_EN, 1);
REG_UPDATE_2(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 1,
DSCRM_DSC_OPP_PIPE_SOURCE, opp_pipe);
}
static void dsc2_disable(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
int dsc_clock_en;
int dsc_fw_config;
int enabled_opp_pipe;
DC_LOG_DSC("disable DSC %d", dsc->inst);
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
if (!dsc_clock_en || !dsc_fw_config) {
DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe);
ASSERT(0);
}
REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 0);
REG_UPDATE(DSC_TOP_CONTROL,
DSC_CLOCK_EN, 0);
}
static void dsc2_disconnect(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
DC_LOG_DSC("disconnect DSC %d", dsc->inst);
REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 0);
}
/* This module's internal functions */
void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps)
{
int i;
int bits_per_pixel = pps->bits_per_pixel;
DC_LOG_DSC("\tdsc_version_major %d", pps->dsc_version_major);
DC_LOG_DSC("\tdsc_version_minor %d", pps->dsc_version_minor);
DC_LOG_DSC("\tbits_per_component %d", pps->bits_per_component);
DC_LOG_DSC("\tline_buf_depth %d", pps->line_buf_depth);
DC_LOG_DSC("\tblock_pred_enable %d", pps->block_pred_enable);
DC_LOG_DSC("\tconvert_rgb %d", pps->convert_rgb);
DC_LOG_DSC("\tsimple_422 %d", pps->simple_422);
DC_LOG_DSC("\tvbr_enable %d", pps->vbr_enable);
DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)", bits_per_pixel, bits_per_pixel / 16, ((bits_per_pixel % 16) * 10000) / 16);
DC_LOG_DSC("\tpic_height %d", pps->pic_height);
DC_LOG_DSC("\tpic_width %d", pps->pic_width);
DC_LOG_DSC("\tslice_height %d", pps->slice_height);
DC_LOG_DSC("\tslice_width %d", pps->slice_width);
DC_LOG_DSC("\tslice_chunk_size %d", pps->slice_chunk_size);
DC_LOG_DSC("\tinitial_xmit_delay %d", pps->initial_xmit_delay);
DC_LOG_DSC("\tinitial_dec_delay %d", pps->initial_dec_delay);
DC_LOG_DSC("\tinitial_scale_value %d", pps->initial_scale_value);
DC_LOG_DSC("\tscale_increment_interval %d", pps->scale_increment_interval);
DC_LOG_DSC("\tscale_decrement_interval %d", pps->scale_decrement_interval);
DC_LOG_DSC("\tfirst_line_bpg_offset %d", pps->first_line_bpg_offset);
DC_LOG_DSC("\tnfl_bpg_offset %d", pps->nfl_bpg_offset);
DC_LOG_DSC("\tslice_bpg_offset %d", pps->slice_bpg_offset);
DC_LOG_DSC("\tinitial_offset %d", pps->initial_offset);
DC_LOG_DSC("\tfinal_offset %d", pps->final_offset);
DC_LOG_DSC("\tflatness_min_qp %d", pps->flatness_min_qp);
DC_LOG_DSC("\tflatness_max_qp %d", pps->flatness_max_qp);
/* DC_LOG_DSC("\trc_parameter_set %d", pps->rc_parameter_set); */
DC_LOG_DSC("\tnative_420 %d", pps->native_420);
DC_LOG_DSC("\tnative_422 %d", pps->native_422);
DC_LOG_DSC("\tsecond_line_bpg_offset %d", pps->second_line_bpg_offset);
DC_LOG_DSC("\tnsl_bpg_offset %d", pps->nsl_bpg_offset);
DC_LOG_DSC("\tsecond_line_offset_adj %d", pps->second_line_offset_adj);
DC_LOG_DSC("\trc_model_size %d", pps->rc_model_size);
DC_LOG_DSC("\trc_edge_factor %d", pps->rc_edge_factor);
DC_LOG_DSC("\trc_quant_incr_limit0 %d", pps->rc_quant_incr_limit0);
DC_LOG_DSC("\trc_quant_incr_limit1 %d", pps->rc_quant_incr_limit1);
DC_LOG_DSC("\trc_tgt_offset_high %d", pps->rc_tgt_offset_high);
DC_LOG_DSC("\trc_tgt_offset_low %d", pps->rc_tgt_offset_low);
for (i = 0; i < NUM_BUF_RANGES - 1; i++)
DC_LOG_DSC("\trc_buf_thresh[%d] %d", i, pps->rc_buf_thresh[i]);
for (i = 0; i < NUM_BUF_RANGES; i++) {
DC_LOG_DSC("\trc_range_parameters[%d].range_min_qp %d", i, pps->rc_range_params[i].range_min_qp);
DC_LOG_DSC("\trc_range_parameters[%d].range_max_qp %d", i, pps->rc_range_params[i].range_max_qp);
DC_LOG_DSC("\trc_range_parameters[%d].range_bpg_offset %d", i, pps->rc_range_params[i].range_bpg_offset);
}
}
void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override)
{
uint8_t i;
rc->rc_model_size = override->rc_model_size;
for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++)
rc->rc_buf_thresh[i] = override->rc_buf_thresh[i];
for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) {
rc->qp_min[i] = override->rc_minqp[i];
rc->qp_max[i] = override->rc_maxqp[i];
rc->ofs[i] = override->rc_offset[i];
}
rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi;
rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo;
rc->rc_edge_factor = override->rc_edge_factor;
rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0;
rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1;
rc->initial_fullness_offset = override->initial_fullness_offset;
rc->initial_xmit_delay = override->initial_delay;
rc->flatness_min_qp = override->flatness_min_qp;
rc->flatness_max_qp = override->flatness_max_qp;
rc->flatness_det_thresh = override->flatness_det_thresh;
}
bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg)
{
struct dsc_parameters dsc_params;
struct rc_params rc;
/* Validate input parameters */
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h);
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_v);
ASSERT(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2);
ASSERT(dsc_cfg->pic_width);
ASSERT(dsc_cfg->pic_height);
ASSERT((dsc_cfg->dc_dsc_cfg.version_minor == 1 &&
(8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13)) ||
(dsc_cfg->dc_dsc_cfg.version_minor == 2 &&
((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) ||
dsc_cfg->dc_dsc_cfg.linebuf_depth == 0)));
ASSERT(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff); // 6.0 <= bits_per_pixel <= 63.9375
if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_h ||
!(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2) ||
!dsc_cfg->pic_width || !dsc_cfg->pic_height ||
!((dsc_cfg->dc_dsc_cfg.version_minor == 1 && // v1.1 line buffer depth range:
8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13) ||
(dsc_cfg->dc_dsc_cfg.version_minor == 2 && // v1.2 line buffer depth range:
((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) ||
dsc_cfg->dc_dsc_cfg.linebuf_depth == 0))) ||
!(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff)) {
dm_output_to_console("%s: Invalid parameters\n", __func__);
return false;
}
dsc_init_reg_values(dsc_reg_vals);
/* Copy input config */
dsc_reg_vals->pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple);
dsc_reg_vals->num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h;
dsc_reg_vals->num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v;
dsc_reg_vals->pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor;
dsc_reg_vals->pps.pic_width = dsc_cfg->pic_width;
dsc_reg_vals->pps.pic_height = dsc_cfg->pic_height;
dsc_reg_vals->pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth);
dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
if (!(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) {
dm_output_to_console("%s: pix height %d not divisible by num_slices_v %d\n\n", __func__, dsc_cfg->pic_height, dsc_cfg->dc_dsc_cfg.num_slices_v);
return false;
}
dsc_reg_vals->bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1;
if (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32;
else
dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32 >> 1;
dsc_reg_vals->pps.convert_rgb = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ? 1 : 0;
dsc_reg_vals->pps.native_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422);
dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
calc_rc_params(&rc, &dsc_reg_vals->pps);
if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd)
dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd);
if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) {
dm_output_to_console("%s: DSC config failed\n", __func__);
return false;
}
dsc_update_from_dsc_parameters(dsc_reg_vals, &dsc_params);
dsc_optc_cfg->bytes_per_pixel = dsc_params.bytes_per_pixel;
dsc_optc_cfg->slice_width = dsc_reg_vals->pps.slice_width;
dsc_optc_cfg->is_pixel_format_444 = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ||
dsc_reg_vals->pixel_format == DSC_PIXFMT_YCBCR444 ||
dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422;
return true;
}
enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple)
{
enum dsc_pixel_format dsc_pix_fmt = DSC_PIXFMT_UNKNOWN;
/* NOTE: We don't support DSC_PIXFMT_SIMPLE_YCBCR422 */
switch (dc_pix_enc) {
case PIXEL_ENCODING_RGB:
dsc_pix_fmt = DSC_PIXFMT_RGB;
break;
case PIXEL_ENCODING_YCBCR422:
if (is_ycbcr422_simple)
dsc_pix_fmt = DSC_PIXFMT_SIMPLE_YCBCR422;
else
dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR422;
break;
case PIXEL_ENCODING_YCBCR444:
dsc_pix_fmt = DSC_PIXFMT_YCBCR444;
break;
case PIXEL_ENCODING_YCBCR420:
dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR420;
break;
default:
dsc_pix_fmt = DSC_PIXFMT_UNKNOWN;
break;
}
ASSERT(dsc_pix_fmt != DSC_PIXFMT_UNKNOWN);
return dsc_pix_fmt;
}
enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth)
{
enum dsc_bits_per_comp bpc = DSC_BPC_UNKNOWN;
switch (dc_color_depth) {
case COLOR_DEPTH_888:
bpc = DSC_BPC_8;
break;
case COLOR_DEPTH_101010:
bpc = DSC_BPC_10;
break;
case COLOR_DEPTH_121212:
bpc = DSC_BPC_12;
break;
default:
bpc = DSC_BPC_UNKNOWN;
break;
}
return bpc;
}
void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
{
int i;
memset(reg_vals, 0, sizeof(struct dsc_reg_values));
/* Non-PPS values */
reg_vals->dsc_clock_enable = 1;
reg_vals->dsc_clock_gating_disable = 0;
reg_vals->underflow_recovery_en = 0;
reg_vals->underflow_occurred_int_en = 0;
reg_vals->underflow_occurred_status = 0;
reg_vals->ich_reset_at_eol = 0;
reg_vals->alternate_ich_encoding_en = 0;
reg_vals->rc_buffer_model_size = 0;
/*reg_vals->disable_ich = 0;*/
reg_vals->dsc_dbg_en = 0;
for (i = 0; i < 4; i++)
reg_vals->rc_buffer_model_overflow_int_en[i] = 0;
/* PPS values */
reg_vals->pps.dsc_version_minor = 2;
reg_vals->pps.dsc_version_major = 1;
reg_vals->pps.line_buf_depth = 9;
reg_vals->pps.bits_per_component = 8;
reg_vals->pps.block_pred_enable = 1;
reg_vals->pps.slice_chunk_size = 0;
reg_vals->pps.pic_width = 0;
reg_vals->pps.pic_height = 0;
reg_vals->pps.slice_width = 0;
reg_vals->pps.slice_height = 0;
reg_vals->pps.initial_xmit_delay = 170;
reg_vals->pps.initial_dec_delay = 0;
reg_vals->pps.initial_scale_value = 0;
reg_vals->pps.scale_increment_interval = 0;
reg_vals->pps.scale_decrement_interval = 0;
reg_vals->pps.nfl_bpg_offset = 0;
reg_vals->pps.slice_bpg_offset = 0;
reg_vals->pps.nsl_bpg_offset = 0;
reg_vals->pps.initial_offset = 6144;
reg_vals->pps.final_offset = 0;
reg_vals->pps.flatness_min_qp = 3;
reg_vals->pps.flatness_max_qp = 12;
reg_vals->pps.rc_model_size = 8192;
reg_vals->pps.rc_edge_factor = 6;
reg_vals->pps.rc_quant_incr_limit0 = 11;
reg_vals->pps.rc_quant_incr_limit1 = 11;
reg_vals->pps.rc_tgt_offset_low = 3;
reg_vals->pps.rc_tgt_offset_high = 3;
}
/* Updates dsc_reg_values::reg_vals::xxx fields based on the values from computed params.
* This is required because dscc_compute_dsc_parameters returns a modified PPS, which in turn
* affects non-PPS register values.
*/
void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params)
{
int i;
reg_vals->pps = dsc_params->pps;
// pps_computed will have the "expanded" values; need to shift them to make them fit for regs.
for (i = 0; i < NUM_BUF_RANGES - 1; i++)
reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
}
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
{
uint32_t temp_int;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
REG_SET(DSC_DEBUG_CONTROL, 0,
DSC_DBG_EN, reg_vals->dsc_dbg_en);
// dsccif registers
REG_SET_5(DSCCIF_CONFIG0, 0,
INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, reg_vals->underflow_recovery_en,
INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, reg_vals->underflow_occurred_int_en,
INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, reg_vals->underflow_occurred_status,
INPUT_PIXEL_FORMAT, reg_vals->pixel_format,
DSCCIF_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component);
REG_SET_2(DSCCIF_CONFIG1, 0,
PIC_WIDTH, reg_vals->pps.pic_width,
PIC_HEIGHT, reg_vals->pps.pic_height);
// dscc registers
if (dsc20->dsc_mask->ICH_RESET_AT_END_OF_LINE == 0) {
REG_SET_3(DSCC_CONFIG0, 0,
NUMBER_OF_SLICES_PER_LINE, reg_vals->num_slices_h - 1,
ALTERNATE_ICH_ENCODING_EN, reg_vals->alternate_ich_encoding_en,
NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, reg_vals->num_slices_v - 1);
} else {
REG_SET_4(DSCC_CONFIG0, 0, ICH_RESET_AT_END_OF_LINE,
reg_vals->ich_reset_at_eol, NUMBER_OF_SLICES_PER_LINE,
reg_vals->num_slices_h - 1, ALTERNATE_ICH_ENCODING_EN,
reg_vals->alternate_ich_encoding_en, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION,
reg_vals->num_slices_v - 1);
}
REG_SET(DSCC_CONFIG1, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size);
/*REG_SET_2(DSCC_CONFIG1, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size,
DSCC_DISABLE_ICH, reg_vals->disable_ich);*/
REG_SET_4(DSCC_INTERRUPT_CONTROL_STATUS, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[0],
DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[1],
DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[2],
DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[3]);
REG_SET_3(DSCC_PPS_CONFIG0, 0,
DSC_VERSION_MINOR, reg_vals->pps.dsc_version_minor,
LINEBUF_DEPTH, reg_vals->pps.line_buf_depth,
DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component);
if (reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
temp_int = reg_vals->bpp_x32;
else
temp_int = reg_vals->bpp_x32 >> 1;
REG_SET_7(DSCC_PPS_CONFIG1, 0,
BITS_PER_PIXEL, temp_int,
SIMPLE_422, reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422,
CONVERT_RGB, reg_vals->pixel_format == DSC_PIXFMT_RGB,
BLOCK_PRED_ENABLE, reg_vals->pps.block_pred_enable,
NATIVE_422, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422,
NATIVE_420, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420,
CHUNK_SIZE, reg_vals->pps.slice_chunk_size);
REG_SET_2(DSCC_PPS_CONFIG2, 0,
PIC_WIDTH, reg_vals->pps.pic_width,
PIC_HEIGHT, reg_vals->pps.pic_height);
REG_SET_2(DSCC_PPS_CONFIG3, 0,
SLICE_WIDTH, reg_vals->pps.slice_width,
SLICE_HEIGHT, reg_vals->pps.slice_height);
REG_SET(DSCC_PPS_CONFIG4, 0,
INITIAL_XMIT_DELAY, reg_vals->pps.initial_xmit_delay);
REG_SET_2(DSCC_PPS_CONFIG5, 0,
INITIAL_SCALE_VALUE, reg_vals->pps.initial_scale_value,
SCALE_INCREMENT_INTERVAL, reg_vals->pps.scale_increment_interval);
REG_SET_3(DSCC_PPS_CONFIG6, 0,
SCALE_DECREMENT_INTERVAL, reg_vals->pps.scale_decrement_interval,
FIRST_LINE_BPG_OFFSET, reg_vals->pps.first_line_bpg_offset,
SECOND_LINE_BPG_OFFSET, reg_vals->pps.second_line_bpg_offset);
REG_SET_2(DSCC_PPS_CONFIG7, 0,
NFL_BPG_OFFSET, reg_vals->pps.nfl_bpg_offset,
SLICE_BPG_OFFSET, reg_vals->pps.slice_bpg_offset);
REG_SET_2(DSCC_PPS_CONFIG8, 0,
NSL_BPG_OFFSET, reg_vals->pps.nsl_bpg_offset,
SECOND_LINE_OFFSET_ADJ, reg_vals->pps.second_line_offset_adj);
REG_SET_2(DSCC_PPS_CONFIG9, 0,
INITIAL_OFFSET, reg_vals->pps.initial_offset,
FINAL_OFFSET, reg_vals->pps.final_offset);
REG_SET_3(DSCC_PPS_CONFIG10, 0,
FLATNESS_MIN_QP, reg_vals->pps.flatness_min_qp,
FLATNESS_MAX_QP, reg_vals->pps.flatness_max_qp,
RC_MODEL_SIZE, reg_vals->pps.rc_model_size);
REG_SET_5(DSCC_PPS_CONFIG11, 0,
RC_EDGE_FACTOR, reg_vals->pps.rc_edge_factor,
RC_QUANT_INCR_LIMIT0, reg_vals->pps.rc_quant_incr_limit0,
RC_QUANT_INCR_LIMIT1, reg_vals->pps.rc_quant_incr_limit1,
RC_TGT_OFFSET_LO, reg_vals->pps.rc_tgt_offset_low,
RC_TGT_OFFSET_HI, reg_vals->pps.rc_tgt_offset_high);
REG_SET_4(DSCC_PPS_CONFIG12, 0,
RC_BUF_THRESH0, reg_vals->pps.rc_buf_thresh[0],
RC_BUF_THRESH1, reg_vals->pps.rc_buf_thresh[1],
RC_BUF_THRESH2, reg_vals->pps.rc_buf_thresh[2],
RC_BUF_THRESH3, reg_vals->pps.rc_buf_thresh[3]);
REG_SET_4(DSCC_PPS_CONFIG13, 0,
RC_BUF_THRESH4, reg_vals->pps.rc_buf_thresh[4],
RC_BUF_THRESH5, reg_vals->pps.rc_buf_thresh[5],
RC_BUF_THRESH6, reg_vals->pps.rc_buf_thresh[6],
RC_BUF_THRESH7, reg_vals->pps.rc_buf_thresh[7]);
REG_SET_4(DSCC_PPS_CONFIG14, 0,
RC_BUF_THRESH8, reg_vals->pps.rc_buf_thresh[8],
RC_BUF_THRESH9, reg_vals->pps.rc_buf_thresh[9],
RC_BUF_THRESH10, reg_vals->pps.rc_buf_thresh[10],
RC_BUF_THRESH11, reg_vals->pps.rc_buf_thresh[11]);
REG_SET_5(DSCC_PPS_CONFIG15, 0,
RC_BUF_THRESH12, reg_vals->pps.rc_buf_thresh[12],
RC_BUF_THRESH13, reg_vals->pps.rc_buf_thresh[13],
RANGE_MIN_QP0, reg_vals->pps.rc_range_params[0].range_min_qp,
RANGE_MAX_QP0, reg_vals->pps.rc_range_params[0].range_max_qp,
RANGE_BPG_OFFSET0, reg_vals->pps.rc_range_params[0].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG16, 0,
RANGE_MIN_QP1, reg_vals->pps.rc_range_params[1].range_min_qp,
RANGE_MAX_QP1, reg_vals->pps.rc_range_params[1].range_max_qp,
RANGE_BPG_OFFSET1, reg_vals->pps.rc_range_params[1].range_bpg_offset,
RANGE_MIN_QP2, reg_vals->pps.rc_range_params[2].range_min_qp,
RANGE_MAX_QP2, reg_vals->pps.rc_range_params[2].range_max_qp,
RANGE_BPG_OFFSET2, reg_vals->pps.rc_range_params[2].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG17, 0,
RANGE_MIN_QP3, reg_vals->pps.rc_range_params[3].range_min_qp,
RANGE_MAX_QP3, reg_vals->pps.rc_range_params[3].range_max_qp,
RANGE_BPG_OFFSET3, reg_vals->pps.rc_range_params[3].range_bpg_offset,
RANGE_MIN_QP4, reg_vals->pps.rc_range_params[4].range_min_qp,
RANGE_MAX_QP4, reg_vals->pps.rc_range_params[4].range_max_qp,
RANGE_BPG_OFFSET4, reg_vals->pps.rc_range_params[4].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG18, 0,
RANGE_MIN_QP5, reg_vals->pps.rc_range_params[5].range_min_qp,
RANGE_MAX_QP5, reg_vals->pps.rc_range_params[5].range_max_qp,
RANGE_BPG_OFFSET5, reg_vals->pps.rc_range_params[5].range_bpg_offset,
RANGE_MIN_QP6, reg_vals->pps.rc_range_params[6].range_min_qp,
RANGE_MAX_QP6, reg_vals->pps.rc_range_params[6].range_max_qp,
RANGE_BPG_OFFSET6, reg_vals->pps.rc_range_params[6].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG19, 0,
RANGE_MIN_QP7, reg_vals->pps.rc_range_params[7].range_min_qp,
RANGE_MAX_QP7, reg_vals->pps.rc_range_params[7].range_max_qp,
RANGE_BPG_OFFSET7, reg_vals->pps.rc_range_params[7].range_bpg_offset,
RANGE_MIN_QP8, reg_vals->pps.rc_range_params[8].range_min_qp,
RANGE_MAX_QP8, reg_vals->pps.rc_range_params[8].range_max_qp,
RANGE_BPG_OFFSET8, reg_vals->pps.rc_range_params[8].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG20, 0,
RANGE_MIN_QP9, reg_vals->pps.rc_range_params[9].range_min_qp,
RANGE_MAX_QP9, reg_vals->pps.rc_range_params[9].range_max_qp,
RANGE_BPG_OFFSET9, reg_vals->pps.rc_range_params[9].range_bpg_offset,
RANGE_MIN_QP10, reg_vals->pps.rc_range_params[10].range_min_qp,
RANGE_MAX_QP10, reg_vals->pps.rc_range_params[10].range_max_qp,
RANGE_BPG_OFFSET10, reg_vals->pps.rc_range_params[10].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG21, 0,
RANGE_MIN_QP11, reg_vals->pps.rc_range_params[11].range_min_qp,
RANGE_MAX_QP11, reg_vals->pps.rc_range_params[11].range_max_qp,
RANGE_BPG_OFFSET11, reg_vals->pps.rc_range_params[11].range_bpg_offset,
RANGE_MIN_QP12, reg_vals->pps.rc_range_params[12].range_min_qp,
RANGE_MAX_QP12, reg_vals->pps.rc_range_params[12].range_max_qp,
RANGE_BPG_OFFSET12, reg_vals->pps.rc_range_params[12].range_bpg_offset);
REG_SET_6(DSCC_PPS_CONFIG22, 0,
RANGE_MIN_QP13, reg_vals->pps.rc_range_params[13].range_min_qp,
RANGE_MAX_QP13, reg_vals->pps.rc_range_params[13].range_max_qp,
RANGE_BPG_OFFSET13, reg_vals->pps.rc_range_params[13].range_bpg_offset,
RANGE_MIN_QP14, reg_vals->pps.rc_range_params[14].range_min_qp,
RANGE_MAX_QP14, reg_vals->pps.rc_range_params[14].range_max_qp,
RANGE_BPG_OFFSET14, reg_vals->pps.rc_range_params[14].range_bpg_offset);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn20_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
#define VERT_MAX_TAPS 8
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
void dpp20_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
REG_GET(DPP_CONTROL,
DPP_CLOCK_ENABLE, &s->is_enabled);
REG_GET(CM_DGAM_CONTROL,
CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
// BGAM has no ROM, and definition is different, can't reuse same dump
//REG_GET(CM_BLNDGAM_CONTROL,
// CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode);
REG_GET(CM_GAMUT_REMAP_CONTROL,
CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
if (s->gamut_remap_mode) {
s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
}
}
void dpp2_power_on_obuf(
struct dpp *dpp_base,
bool power_on)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
REG_UPDATE(CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, power_on == true ? 1:0);
REG_UPDATE(OBUF_MEM_PWR_CTRL,
OBUF_MEM_PWR_FORCE, power_on == true ? 0:1);
REG_UPDATE(DSCL_MEM_PWR_CTRL,
LUT_MEM_PWR_FORCE, power_on == true ? 0:1);
}
void dpp2_dummy_program_input_lut(
struct dpp *dpp_base,
const struct dc_gamma *gamma)
{}
static void dpp2_cnv_setup (
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
uint32_t pixel_format = 0;
uint32_t alpha_en = 1;
enum dc_color_space color_space = COLOR_SPACE_SRGB;
enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;
bool force_disable_cursor = false;
struct out_csc_color_matrix tbl_entry;
uint32_t is_2bit = 0;
int i = 0;
REG_SET_2(FORMAT_CONTROL, 0,
CNVC_BYPASS, 0,
FORMAT_EXPANSION_MODE, mode);
//hardcode default
//FORMAT_CONTROL. FORMAT_CNV16 default 0: U0.16/S.1.15; 1: U1.15/ S.1.14
//FORMAT_CONTROL. CNVC_BYPASS_MSB_ALIGN default 0: disabled 1: enabled
//FORMAT_CONTROL. CLAMP_POSITIVE default 0: disabled 1: enabled
//FORMAT_CONTROL. CLAMP_POSITIVE_C default 0: disabled 1: enabled
REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0);
REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0);
REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0);
REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0);
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
pixel_format = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
pixel_format = 3;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
pixel_format = 8;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
pixel_format = 10;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
force_disable_cursor = false;
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
force_disable_cursor = true;
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
force_disable_cursor = true;
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
force_disable_cursor = true;
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
pixel_format = 24;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
pixel_format = 25;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
pixel_format = 12;
color_space = COLOR_SPACE_YCBCR709;
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
color_space = COLOR_SPACE_YCBCR709;
select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
pixel_format = 115;
color_space = COLOR_SPACE_YCBCR709;
select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
alpha_en = 0;
break;
default:
break;
}
/* Set default color space based on format if none is given. */
color_space = input_color_space ? input_color_space : color_space;
if (is_2bit == 1 && alpha_2bit_lut != NULL) {
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3);
}
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
// if input adjustments exist, program icsc with those values
if (input_csc_color_matrix.enable_adjustment
== true) {
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
tbl_entry.color_space = input_color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = DCN2_ICSC_SELECT_ICSC_A;
else
select = DCN2_ICSC_SELECT_BYPASS;
dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);
} else
dpp2_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, 0);
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, 0);
}
dpp2_power_on_obuf(dpp_base, true);
}
/*compute the maximum number of lines that we can fit in the line buffer*/
void dscl2_calc_lb_num_partitions(
const struct scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
int *num_part_c)
{
int memory_line_size_y, memory_line_size_c, memory_line_size_a,
lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
int line_size = scl_data->viewport.width < scl_data->recout.width ?
scl_data->viewport.width : scl_data->recout.width;
int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
scl_data->viewport_c.width : scl_data->recout.width;
if (line_size == 0)
line_size = 1;
if (line_size_c == 0)
line_size_c = 1;
memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */
memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */
memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
if (lb_config == LB_MEMORY_CONFIG_1) {
lb_memory_size = 970;
lb_memory_size_c = 970;
lb_memory_size_a = 970;
} else if (lb_config == LB_MEMORY_CONFIG_2) {
lb_memory_size = 1290;
lb_memory_size_c = 1290;
lb_memory_size_a = 1290;
} else if (lb_config == LB_MEMORY_CONFIG_3) {
/* 420 mode: using 3rd mem from Y, Cr and Cb */
lb_memory_size = 970 + 1290 + 484 + 484 + 484;
lb_memory_size_c = 970 + 1290;
lb_memory_size_a = 970 + 1290 + 484;
} else {
lb_memory_size = 970 + 1290 + 484;
lb_memory_size_c = 970 + 1290 + 484;
lb_memory_size_a = 970 + 1290 + 484;
}
*num_part_y = lb_memory_size / memory_line_size_y;
*num_part_c = lb_memory_size_c / memory_line_size_c;
num_partitions_a = lb_memory_size_a / memory_line_size_a;
if (scl_data->lb_params.alpha_en
&& (num_partitions_a < *num_part_y))
*num_part_y = num_partitions_a;
if (*num_part_y > 64)
*num_part_y = 64;
if (*num_part_c > 64)
*num_part_c = 64;
}
void dpp2_cnv_set_alpha_keyer(
struct dpp *dpp_base,
struct cnv_color_keyer_params *color_keyer)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_EN, color_keyer->color_keyer_en);
REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, color_keyer->color_keyer_mode);
REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, color_keyer->color_keyer_alpha_low);
REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, color_keyer->color_keyer_alpha_high);
REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, color_keyer->color_keyer_red_low);
REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, color_keyer->color_keyer_red_high);
REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, color_keyer->color_keyer_green_low);
REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, color_keyer->color_keyer_green_high);
REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, color_keyer->color_keyer_blue_low);
REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, color_keyer->color_keyer_blue_high);
}
void dpp2_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes)
{
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
cur_rom_en = 1;
}
}
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,
CUR0_EXPANSION_MODE, 0,
CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
REG_UPDATE(CURSOR0_COLOR0,
CUR0_COLOR0, 0x00000000);
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
}
void oppn20_dummy_program_regamma_pwl(
struct dpp *dpp,
const struct pwl_params *params,
enum opp_regamma mode)
{}
static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
.dpp_set_degamma = dpp2_set_degamma,
.dpp_program_input_lut = dpp2_dummy_program_input_lut,
.dpp_full_bypass = dpp1_full_bypass,
.dpp_setup = dpp2_cnv_setup,
.dpp_program_degamma_pwl = dpp2_set_degamma_pwl,
.dpp_program_blnd_lut = dpp20_program_blnd_lut,
.dpp_program_shaper_lut = dpp20_program_shaper,
.dpp_program_3dlut = dpp20_program_3dlut,
.dpp_program_bias_and_scale = NULL,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp2_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
};
static struct dpp_caps dcn20_dpp_cap = {
.dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT,
.dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions,
};
bool dpp2_construct(
struct dcn20_dpp *dpp,
struct dc_context *ctx,
uint32_t inst,
const struct dcn2_dpp_registers *tf_regs,
const struct dcn2_dpp_shift *tf_shift,
const struct dcn2_dpp_mask *tf_mask)
{
dpp->base.ctx = ctx;
dpp->base.inst = inst;
dpp->base.funcs = &dcn20_dpp_funcs;
dpp->base.caps = &dcn20_dpp_cap;
dpp->tf_regs = tf_regs;
dpp->tf_shift = tf_shift;
dpp->tf_mask = tf_mask;
dpp->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
LB_PIXEL_DEPTH_30BPP |
LB_PIXEL_DEPTH_36BPP;
dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
return true;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/delay.h>
#include "dcn20_vmid.h"
#include "reg_helper.h"
#define REG(reg)\
vmid->regs->reg
#define CTX \
vmid->ctx
#undef FN
#define FN(reg_name, field_name) \
vmid->shifts->field_name, vmid->masks->field_name
static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid)
{
/* According the hardware spec, we need to poll for the lowest
* bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM
* context is updated. We can't use REG_WAIT here since we
* don't have a seperate field to wait on.
*
* TODO: Confirm timeout / poll interval with hardware team
*/
int max_times = 10000;
int delay_us = 5;
int i;
for (i = 0; i < max_times; ++i) {
uint32_t entry_lo32;
REG_GET(PAGE_TABLE_BASE_ADDR_LO32,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32,
&entry_lo32);
if (entry_lo32 & 0x1)
return;
udelay(delay_us);
}
/* VM setup timed out */
DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n");
ASSERT(0);
}
void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config)
{
REG_SET(PAGE_TABLE_START_ADDR_HI32, 0,
VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4, (config->page_table_start_addr >> 32) & 0xF);
REG_SET(PAGE_TABLE_START_ADDR_LO32, 0,
VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32, config->page_table_start_addr & 0xFFFFFFFF);
REG_SET(PAGE_TABLE_END_ADDR_HI32, 0,
VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4, (config->page_table_end_addr >> 32) & 0xF);
REG_SET(PAGE_TABLE_END_ADDR_LO32, 0,
VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32, config->page_table_end_addr & 0xFFFFFFFF);
REG_SET_2(CNTL, 0,
VM_CONTEXT0_PAGE_TABLE_DEPTH, config->depth,
VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE, config->block_size);
REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF);
/* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */
REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF);
dcn20_wait_for_vmid_ready(vmid);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn20_mpc.h"
#include "reg_helper.h"
#include "dc.h"
#include "mem_input.h"
#include "dcn10/dcn10_cm_common.h"
#define REG(reg)\
mpc20->mpc_regs->reg
#define IND_REG(index) \
(index)
#define CTX \
mpc20->base.ctx
#undef FN
#define FN(reg_name, field_name) \
mpc20->mpc_shift->field_name, mpc20->mpc_mask->field_name
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
void mpc2_update_blending(
struct mpc *mpc,
struct mpcc_blnd_cfg *blnd_cfg,
int mpcc_id)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id);
REG_UPDATE_7(MPCC_CONTROL[mpcc_id],
MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode,
MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha,
MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only,
MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha,
MPCC_GLOBAL_GAIN, blnd_cfg->global_gain,
MPCC_BG_BPC, blnd_cfg->background_color_bpc,
MPCC_BOT_GAIN_MODE, blnd_cfg->bottom_gain_mode);
REG_SET(MPCC_TOP_GAIN[mpcc_id], 0, MPCC_TOP_GAIN, blnd_cfg->top_gain);
REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain);
REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain);
mpcc->blnd_cfg = *blnd_cfg;
}
void mpc2_set_denorm(
struct mpc *mpc,
int opp_id,
enum dc_color_depth output_depth)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
int denorm_mode = 0;
switch (output_depth) {
case COLOR_DEPTH_666:
denorm_mode = 1;
break;
case COLOR_DEPTH_888:
denorm_mode = 2;
break;
case COLOR_DEPTH_999:
denorm_mode = 3;
break;
case COLOR_DEPTH_101010:
denorm_mode = 4;
break;
case COLOR_DEPTH_111111:
denorm_mode = 5;
break;
case COLOR_DEPTH_121212:
denorm_mode = 6;
break;
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
/* not valid used case! */
break;
}
REG_UPDATE(DENORM_CONTROL[opp_id],
MPC_OUT_DENORM_MODE, denorm_mode);
}
void mpc2_set_denorm_clamp(
struct mpc *mpc,
int opp_id,
struct mpc_denorm_clamp denorm_clamp)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
REG_UPDATE_2(DENORM_CONTROL[opp_id],
MPC_OUT_DENORM_CLAMP_MAX_R_CR, denorm_clamp.clamp_max_r_cr,
MPC_OUT_DENORM_CLAMP_MIN_R_CR, denorm_clamp.clamp_min_r_cr);
REG_UPDATE_2(DENORM_CLAMP_G_Y[opp_id],
MPC_OUT_DENORM_CLAMP_MAX_G_Y, denorm_clamp.clamp_max_g_y,
MPC_OUT_DENORM_CLAMP_MIN_G_Y, denorm_clamp.clamp_min_g_y);
REG_UPDATE_2(DENORM_CLAMP_B_CB[opp_id],
MPC_OUT_DENORM_CLAMP_MAX_B_CB, denorm_clamp.clamp_max_b_cb,
MPC_OUT_DENORM_CLAMP_MIN_B_CB, denorm_clamp.clamp_min_b_cb);
}
void mpc2_set_output_csc(
struct mpc *mpc,
int opp_id,
const uint16_t *regval,
enum mpc_output_csc_mode ocsc_mode)
{
uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct color_matrices_reg ocsc_regs;
if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
}
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
/* determine which CSC coefficients (A or B) we are using
* currently. select the alternate set to double buffer
* the CSC update so CSC is updated on frame boundary
*/
IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
else
ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
ocsc_regs.masks.csc_c12 = mpc20->mpc_mask->MPC_OCSC_C12_A;
if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) {
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]);
} else {
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
cm_helper_program_color_matrices(
mpc20->base.ctx,
regval,
&ocsc_regs);
REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
void mpc2_set_ocsc_default(
struct mpc *mpc,
int opp_id,
enum dc_color_space color_space,
enum mpc_output_csc_mode ocsc_mode)
{
uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
uint32_t arr_size;
struct color_matrices_reg ocsc_regs;
const uint16_t *regval = NULL;
if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
}
regval = find_color_matrix(color_space, &arr_size);
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
/* determine which CSC coefficients (A or B) we are using
* currently. select the alternate set to double buffer
* the CSC update so CSC is updated on frame boundary
*/
IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
else
ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
ocsc_regs.masks.csc_c12 = mpc20->mpc_mask->MPC_OCSC_C12_A;
if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) {
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]);
} else {
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
cm_helper_program_color_matrices(
mpc20->base.ctx,
regval,
&ocsc_regs);
REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
static void mpc2_ogam_get_reg_field(
struct mpc *mpc,
struct xfer_func_reg *reg)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
reg->shifts.exp_region0_lut_offset = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->masks.exp_region0_lut_offset = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->shifts.exp_region0_num_segments = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->masks.exp_region0_num_segments = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->shifts.exp_region1_lut_offset = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->masks.exp_region1_lut_offset = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->shifts.exp_region1_num_segments = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->masks.exp_region1_num_segments = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->shifts.field_region_end = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_B;
reg->masks.field_region_end = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_B;
reg->shifts.field_region_end_slope = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
reg->masks.field_region_end_slope = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
reg->shifts.field_region_end_base = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;
reg->masks.field_region_end_base = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;
reg->shifts.field_region_linear_slope = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;
reg->masks.field_region_linear_slope = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;
reg->shifts.exp_region_start = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_B;
reg->masks.exp_region_start = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_B;
reg->shifts.exp_resion_start_segment = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
reg->masks.exp_resion_start_segment = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
}
void mpc20_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
}
static void mpc20_configure_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool is_ram_a)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
REG_UPDATE_2(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id],
MPCC_OGAM_LUT_WRITE_EN_MASK, 7,
MPCC_OGAM_LUT_RAM_SEL, is_ram_a == true ? 0:1);
REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0);
}
static enum dc_lut_mode mpc20_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
enum dc_lut_mode mode;
uint32_t state_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id], MPCC_OGAM_CONFIG_STATUS, &state_mode);
switch (state_mode) {
case 0:
mode = LUT_BYPASS;
break;
case 1:
mode = LUT_RAM_A;
break;
case 2:
mode = LUT_RAM_B;
break;
default:
mode = LUT_BYPASS;
break;
}
return mode;
}
static void mpc2_program_lutb(struct mpc *mpc, int mpcc_id,
const struct pwl_params *params)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct xfer_func_reg gam_regs;
mpc2_ogam_get_reg_field(mpc, &gam_regs);
gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMB_START_CNTL_B[mpcc_id]);
gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMB_START_CNTL_G[mpcc_id]);
gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMB_START_CNTL_R[mpcc_id]);
gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_B[mpcc_id]);
gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_G[mpcc_id]);
gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_R[mpcc_id]);
gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMB_END_CNTL1_B[mpcc_id]);
gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMB_END_CNTL2_B[mpcc_id]);
gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMB_END_CNTL1_G[mpcc_id]);
gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMB_END_CNTL2_G[mpcc_id]);
gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMB_END_CNTL1_R[mpcc_id]);
gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMB_END_CNTL2_R[mpcc_id]);
gam_regs.region_start = REG(MPCC_OGAM_RAMB_REGION_0_1[mpcc_id]);
gam_regs.region_end = REG(MPCC_OGAM_RAMB_REGION_32_33[mpcc_id]);
cm_helper_program_xfer_func(mpc20->base.ctx, params, &gam_regs);
}
static void mpc2_program_luta(struct mpc *mpc, int mpcc_id,
const struct pwl_params *params)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct xfer_func_reg gam_regs;
mpc2_ogam_get_reg_field(mpc, &gam_regs);
gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMA_START_CNTL_B[mpcc_id]);
gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMA_START_CNTL_G[mpcc_id]);
gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMA_START_CNTL_R[mpcc_id]);
gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_B[mpcc_id]);
gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_G[mpcc_id]);
gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_R[mpcc_id]);
gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMA_END_CNTL1_B[mpcc_id]);
gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMA_END_CNTL2_B[mpcc_id]);
gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMA_END_CNTL1_G[mpcc_id]);
gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMA_END_CNTL2_G[mpcc_id]);
gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMA_END_CNTL1_R[mpcc_id]);
gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMA_END_CNTL2_R[mpcc_id]);
gam_regs.region_start = REG(MPCC_OGAM_RAMA_REGION_0_1[mpcc_id]);
gam_regs.region_end = REG(MPCC_OGAM_RAMA_REGION_32_33[mpcc_id]);
cm_helper_program_xfer_func(mpc20->base.ctx, params, &gam_regs);
}
static void mpc20_program_ogam_pwl(
struct mpc *mpc, int mpcc_id,
const struct pwl_result_data *rgb,
uint32_t num)
{
uint32_t i;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
PERF_TRACE();
REG_SEQ_START();
for (i = 0 ; i < num; i++) {
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].blue_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
MPCC_OGAM_LUT_DATA, rgb[i].delta_red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg);
}
}
static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
enum dc_lut_mode current_mode,
enum dc_lut_mode next_mode)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
if (mpc->ctx->dc->debug.cm_in_bypass) {
REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
return;
}
if (mpc->ctx->dc->work_arounds.dedcn20_305_wa == false) {
/*hw fixed in new review*/
return;
}
if (current_mode == LUT_BYPASS)
/*this will only work if OTG is locked.
*if we were to support OTG unlock case,
*the workaround will be more complex
*/
REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE,
next_mode == LUT_RAM_A ? 1:2);
}
void mpc2_set_output_gamma(
struct mpc *mpc,
int mpcc_id,
const struct pwl_params *params)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
if (mpc->ctx->dc->debug.cm_in_bypass) {
REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
return;
}
if (params == NULL) {
REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
return;
}
current_mode = mpc20_get_ogam_current(mpc, mpcc_id);
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
next_mode = LUT_RAM_B;
else
next_mode = LUT_RAM_A;
mpc20_power_on_ogam_lut(mpc, mpcc_id, true);
mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
mpc2_program_luta(mpc, mpcc_id, params);
else
mpc2_program_lutb(mpc, mpcc_id, params);
apply_DEDCN20_305_wa(mpc, mpcc_id, current_mode, next_mode);
mpc20_program_ogam_pwl(
mpc, mpcc_id, params->rgb_resulted, params->hw_points_num);
REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE,
next_mode == LUT_RAM_A ? 1:2);
}
void mpc2_assert_idle_mpcc(struct mpc *mpc, int id)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
unsigned int mpc_disabled;
ASSERT(!(mpc20->mpcc_in_use_mask & 1 << id));
REG_GET(MPCC_STATUS[id], MPCC_DISABLED, &mpc_disabled);
if (mpc_disabled)
return;
REG_WAIT(MPCC_STATUS[id],
MPCC_IDLE, 1,
1, 100000);
}
void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
unsigned int top_sel, mpc_busy, mpc_idle, mpc_disabled;
REG_GET(MPCC_TOP_SEL[mpcc_id],
MPCC_TOP_SEL, &top_sel);
REG_GET_3(MPCC_STATUS[mpcc_id],
MPCC_BUSY, &mpc_busy,
MPCC_IDLE, &mpc_idle,
MPCC_DISABLED, &mpc_disabled);
if (top_sel == 0xf) {
ASSERT(!mpc_busy);
ASSERT(mpc_idle);
ASSERT(mpc_disabled);
} else {
ASSERT(!mpc_disabled);
ASSERT(!mpc_idle);
}
REG_SEQ_SUBMIT();
PERF_TRACE();
REG_SEQ_WAIT_DONE();
PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
{
mpcc->mpcc_id = mpcc_inst;
mpcc->dpp_id = 0xf;
mpcc->mpcc_bot = NULL;
mpcc->blnd_cfg.overlap_only = false;
mpcc->blnd_cfg.global_alpha = 0xff;
mpcc->blnd_cfg.global_gain = 0xff;
mpcc->blnd_cfg.background_color_bpc = 4;
mpcc->blnd_cfg.bottom_gain_mode = 0;
mpcc->blnd_cfg.top_gain = 0x1f000;
mpcc->blnd_cfg.bottom_inside_gain = 0x1f000;
mpcc->blnd_cfg.bottom_outside_gain = 0x1f000;
mpcc->sm_cfg.enable = false;
}
static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
{
struct mpcc *tmp_mpcc = tree->opp_list;
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
/* avoid circular linked list */
ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
if (tmp_mpcc == tmp_mpcc->mpcc_bot)
break;
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
}
static const struct mpc_funcs dcn20_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
.wait_for_idle = mpc2_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.set_denorm = mpc2_set_denorm,
.set_denorm_clamp = mpc2_set_denorm_clamp,
.set_output_csc = mpc2_set_output_csc,
.set_ocsc_default = mpc2_set_ocsc_default,
.set_output_gamma = mpc2_set_output_gamma,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
};
void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
struct dc_context *ctx,
const struct dcn20_mpc_registers *mpc_regs,
const struct dcn20_mpc_shift *mpc_shift,
const struct dcn20_mpc_mask *mpc_mask,
int num_mpcc)
{
int i;
mpc20->base.ctx = ctx;
mpc20->base.funcs = &dcn20_mpc_funcs;
mpc20->mpc_regs = mpc_regs;
mpc20->mpc_shift = mpc_shift;
mpc20->mpc_mask = mpc_mask;
mpc20->mpcc_in_use_mask = 0;
mpc20->num_mpcc = num_mpcc;
for (i = 0; i < MAX_MPCC; i++)
mpc2_init_mpcc(&mpc20->base.mpcc_array[i], i);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc.h"
#include "reg_helper.h"
#include "dcn10_dpp.h"
#include "dcn10_cm_common.h"
#include "custom_float.h"
#define REG(reg) reg
#define CTX \
ctx
#undef FN
#define FN(reg_name, field_name) \
reg->shifts.field_name, reg->masks.field_name
void cm_helper_program_color_matrices(
struct dc_context *ctx,
const uint16_t *regval,
const struct color_matrices_reg *reg)
{
uint32_t cur_csc_reg;
unsigned int i = 0;
for (cur_csc_reg = reg->csc_c11_c12;
cur_csc_reg <= reg->csc_c33_c34;
cur_csc_reg++) {
const uint16_t *regval0 = &(regval[2 * i]);
const uint16_t *regval1 = &(regval[(2 * i) + 1]);
REG_SET_2(cur_csc_reg, 0,
csc_c11, *regval0,
csc_c12, *regval1);
i++;
}
}
void cm_helper_program_xfer_func(
struct dc_context *ctx,
const struct pwl_params *params,
const struct xfer_func_reg *reg)
{
uint32_t reg_region_cur;
unsigned int i = 0;
REG_SET_2(reg->start_cntl_b, 0,
exp_region_start, params->corner_points[0].blue.custom_float_x,
exp_resion_start_segment, 0);
REG_SET_2(reg->start_cntl_g, 0,
exp_region_start, params->corner_points[0].green.custom_float_x,
exp_resion_start_segment, 0);
REG_SET_2(reg->start_cntl_r, 0,
exp_region_start, params->corner_points[0].red.custom_float_x,
exp_resion_start_segment, 0);
REG_SET(reg->start_slope_cntl_b, 0,
field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
REG_SET(reg->start_slope_cntl_g, 0,
field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
REG_SET(reg->start_slope_cntl_r, 0,
field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
REG_SET(reg->start_end_cntl1_b, 0,
field_region_end, params->corner_points[1].blue.custom_float_x);
REG_SET_2(reg->start_end_cntl2_b, 0,
field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
field_region_end_base, params->corner_points[1].blue.custom_float_y);
REG_SET(reg->start_end_cntl1_g, 0,
field_region_end, params->corner_points[1].green.custom_float_x);
REG_SET_2(reg->start_end_cntl2_g, 0,
field_region_end_slope, params->corner_points[1].green.custom_float_slope,
field_region_end_base, params->corner_points[1].green.custom_float_y);
REG_SET(reg->start_end_cntl1_r, 0,
field_region_end, params->corner_points[1].red.custom_float_x);
REG_SET_2(reg->start_end_cntl2_r, 0,
field_region_end_slope, params->corner_points[1].red.custom_float_slope,
field_region_end_base, params->corner_points[1].red.custom_float_y);
for (reg_region_cur = reg->region_start;
reg_region_cur <= reg->region_end;
reg_region_cur++) {
const struct gamma_curve *curve0 = &(params->arr_curve_points[2 * i]);
const struct gamma_curve *curve1 = &(params->arr_curve_points[(2 * i) + 1]);
REG_SET_4(reg_region_cur, 0,
exp_region0_lut_offset, curve0->offset,
exp_region0_num_segments, curve0->segments_num,
exp_region1_lut_offset, curve1->offset,
exp_region1_num_segments, curve1->segments_num);
i++;
}
}
bool cm_helper_convert_to_custom_float(
struct pwl_result_data *rgb_resulted,
struct curve_points3 *corner_points,
uint32_t hw_points_num,
bool fixpoint)
{
struct custom_float_format fmt;
struct pwl_result_data *rgb = rgb_resulted;
uint32_t i = 0;
fmt.exponenta_bits = 6;
fmt.mantissa_bits = 12;
fmt.sign = false;
/* corner_points[0] - beginning base, slope offset for R,G,B
* corner_points[1] - end base, slope offset for R,G,B
*/
if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
&corner_points[0].red.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
&corner_points[0].green.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
&corner_points[0].blue.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
&corner_points[0].red.custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
&corner_points[0].green.custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
&corner_points[0].blue.custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
&corner_points[0].red.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
&corner_points[0].green.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
&corner_points[0].blue.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
fmt.mantissa_bits = 10;
fmt.sign = false;
if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
&corner_points[1].red.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
&corner_points[1].green.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
&corner_points[1].blue.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
if (fixpoint == true) {
corner_points[1].red.custom_float_y =
dc_fixpt_clamp_u0d14(corner_points[1].red.y);
corner_points[1].green.custom_float_y =
dc_fixpt_clamp_u0d14(corner_points[1].green.y);
corner_points[1].blue.custom_float_y =
dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
} else {
if (!convert_to_custom_float_format(corner_points[1].red.y,
&fmt, &corner_points[1].red.custom_float_y)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].green.y,
&fmt, &corner_points[1].green.custom_float_y)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].blue.y,
&fmt, &corner_points[1].blue.custom_float_y)) {
BREAK_TO_DEBUGGER();
return false;
}
}
if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
&corner_points[1].red.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
&corner_points[1].green.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
&corner_points[1].blue.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
if (hw_points_num == 0 || rgb_resulted == NULL || fixpoint == true)
return true;
fmt.mantissa_bits = 12;
fmt.sign = true;
while (i != hw_points_num) {
if (!convert_to_custom_float_format(rgb->red, &fmt,
&rgb->red_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->green, &fmt,
&rgb->green_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->blue, &fmt,
&rgb->blue_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
&rgb->delta_red_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
&rgb->delta_green_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
&rgb->delta_blue_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
++rgb;
++i;
}
return true;
}
/* driver uses 32 regions or less, but DCN HW has 34, extra 2 are set to 0 */
#define MAX_REGIONS_NUMBER 34
#define MAX_LOW_POINT 25
#define NUMBER_REGIONS 32
#define NUMBER_SW_SEGMENTS 16
#define DC_LOGGER \
ctx->logger
bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint)
{
struct curve_points3 *corner_points;
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
struct pwl_result_data *rgb_minus_1;
int32_t region_start, region_end;
int32_t i;
uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
memset(lut_params, 0, sizeof(struct pwl_params));
memset(seg_distr, 0, sizeof(seg_distr));
if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) {
/* 32 segments
* segments are from 2^-25 to 2^7
*/
for (i = 0; i < NUMBER_REGIONS ; i++)
seg_distr[i] = 3;
region_start = -MAX_LOW_POINT;
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
} else {
/* 11 segments
* segment is from 2^-10 to 2^1
* There are less than 256 points, for optimization
*/
seg_distr[0] = 3;
seg_distr[1] = 4;
seg_distr[2] = 4;
seg_distr[3] = 4;
seg_distr[4] = 4;
seg_distr[5] = 4;
seg_distr[6] = 4;
seg_distr[7] = 4;
seg_distr[8] = 4;
seg_distr[9] = 4;
seg_distr[10] = 1;
region_start = -10;
region_end = 1;
}
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
seg_distr[i] = -1;
for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
if (seg_distr[k] != -1)
hw_points += (1 << seg_distr[k]);
}
j = 0;
for (k = 0; k < (region_end - region_start); k++) {
increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
start_index = (region_start + k + MAX_LOW_POINT) *
NUMBER_SW_SEGMENTS;
for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
i += increment) {
if (j == hw_points - 1)
break;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
j++;
}
}
/* last point */
start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
// All 3 color channels have same x
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
corner_points[0].blue.x = corner_points[0].red.x;
corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
corner_points[1].green.x = corner_points[1].red.x;
corner_points[1].blue.x = corner_points[1].red.x;
corner_points[0].red.y = rgb_resulted[0].red;
corner_points[0].green.y = rgb_resulted[0].green;
corner_points[0].blue.y = rgb_resulted[0].blue;
corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
corner_points[0].red.x);
corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
corner_points[0].green.x);
corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
corner_points[0].blue.x);
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
corner_points[1].red.slope = dc_fixpt_zero;
corner_points[1].green.slope = dc_fixpt_zero;
corner_points[1].blue.slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
* and the slope to be such that we hit 1.0 at 10000 nits.
*/
const struct fixed31_32 end_value =
dc_fixpt_from_int(125);
corner_points[1].red.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
dc_fixpt_sub(end_value, corner_points[1].red.x));
corner_points[1].green.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
dc_fixpt_sub(end_value, corner_points[1].green.x));
corner_points[1].blue.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
dc_fixpt_sub(end_value, corner_points[1].blue.x));
}
lut_params->hw_points_num = hw_points;
k = 0;
for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
if (seg_distr[k] != -1) {
lut_params->arr_curve_points[k].segments_num =
seg_distr[k];
lut_params->arr_curve_points[i].offset =
lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
k++;
}
if (seg_distr[k] != -1)
lut_params->arr_curve_points[k].segments_num = seg_distr[k];
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
rgb_minus_1 = rgb;
i = 1;
while (i != hw_points + 1) {
if (i >= hw_points - 1) {
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
}
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
if (fixpoint == true) {
uint32_t red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red);
uint32_t green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green);
uint32_t blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue);
if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10)
DC_LOG_WARNING("Losing delta precision while programming shaper LUT.");
rgb->delta_red_reg = red_clamp & 0x3ff;
rgb->delta_green_reg = green_clamp & 0x3ff;
rgb->delta_blue_reg = blue_clamp & 0x3ff;
rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
}
++rgb_plus_1;
rgb_minus_1 = rgb;
++rgb;
++i;
}
cm_helper_convert_to_custom_float(rgb_resulted,
lut_params->corner_points,
hw_points, fixpoint);
return true;
}
#define NUM_DEGAMMA_REGIONS 12
bool cm_helper_translate_curve_to_degamma_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params)
{
struct curve_points3 *corner_points;
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
int32_t region_start, region_end;
int32_t i;
uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
memset(lut_params, 0, sizeof(struct pwl_params));
memset(seg_distr, 0, sizeof(seg_distr));
region_start = -NUM_DEGAMMA_REGIONS;
region_end = 0;
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
seg_distr[i] = -1;
/* 12 segments
* segments are from 2^-12 to 0
*/
for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++)
seg_distr[i] = 4;
for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
if (seg_distr[k] != -1)
hw_points += (1 << seg_distr[k]);
}
j = 0;
for (k = 0; k < (region_end - region_start); k++) {
increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
start_index = (region_start + k + MAX_LOW_POINT) *
NUMBER_SW_SEGMENTS;
for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
i += increment) {
if (j == hw_points - 1)
break;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
j++;
}
}
/* last point */
start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
corner_points[0].blue.x = corner_points[0].red.x;
corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
corner_points[1].green.x = corner_points[1].red.x;
corner_points[1].blue.x = corner_points[1].red.x;
corner_points[0].red.y = rgb_resulted[0].red;
corner_points[0].green.y = rgb_resulted[0].green;
corner_points[0].blue.y = rgb_resulted[0].blue;
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
corner_points[1].red.slope = dc_fixpt_zero;
corner_points[1].green.slope = dc_fixpt_zero;
corner_points[1].blue.slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
* and the slope to be such that we hit 1.0 at 10000 nits.
*/
const struct fixed31_32 end_value =
dc_fixpt_from_int(125);
corner_points[1].red.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
dc_fixpt_sub(end_value, corner_points[1].red.x));
corner_points[1].green.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
dc_fixpt_sub(end_value, corner_points[1].green.x));
corner_points[1].blue.slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
dc_fixpt_sub(end_value, corner_points[1].blue.x));
}
lut_params->hw_points_num = hw_points;
k = 0;
for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
if (seg_distr[k] != -1) {
lut_params->arr_curve_points[k].segments_num =
seg_distr[k];
lut_params->arr_curve_points[i].offset =
lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
k++;
}
if (seg_distr[k] != -1)
lut_params->arr_curve_points[k].segments_num = seg_distr[k];
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
i = 1;
while (i != hw_points + 1) {
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
++rgb_plus_1;
++rgb;
++i;
}
cm_helper_convert_to_custom_float(rgb_resulted,
lut_params->corner_points,
hw_points, false);
return true;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc.h"
#include "dcn10_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn10_resource.h"
#include "dcn10_ipp.h"
#include "dcn10_mpc.h"
#include "irq/dcn10/irq_service_dcn10.h"
#include "dcn10_dpp.h"
#include "dcn10_optc.h"
#include "dcn10_hw_sequencer.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10_opp.h"
#include "dcn10_link_encoder.h"
#include "dcn10_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dce/dce_panel_cntl.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "nbio/nbio_7_0_offset.h"
#include "mmhub/mmhub_9_1_offset.h"
#include "mmhub/mmhub_9_1_sh_mask.h"
#include "reg_helper.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
#define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
#define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#endif
enum dcn10_clk_src_array_id {
DCN10_CLK_SRC_PLL0,
DCN10_CLK_SRC_PLL1,
DCN10_CLK_SRC_PLL2,
DCN10_CLK_SRC_PLL3,
DCN10_CLK_SRC_TOTAL,
DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3
};
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
#define BASE(seg) \
BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
mm ## reg_name ## 0 ## _ ## block ## id
/* set field/register/bitfield name */
#define SFRB(field_name, reg_name, bitfield, post_fix)\
.field_name = reg_name ## __ ## bitfield ## post_fix
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIF_BASE__INST0_SEG ## seg
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
/* MMHUB */
#define MMHUB_BASE_INNER(seg) \
MMHUB_BASE__INST0_SEG ## seg
#define MMHUB_BASE(seg) \
MMHUB_BASE_INNER(seg)
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
/* macros to expend register list macro defined in HW object header file
* end *********************/
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCN10_REG_LIST()
};
static const struct dce_dmcu_shift dmcu_shift = {
DMCU_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dce_dmcu_mask dmcu_mask = {
DMCU_MASK_SH_LIST_DCN10(_MASK)
};
static const struct dce_abm_registers abm_regs = {
ABM_DCN10_REG_LIST(0)
};
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN10(_MASK)
};
#define stream_enc_regs(id)\
[id] = {\
SE_DCN_REG_LIST(id)\
}
static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
stream_enc_regs(3),
};
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN10(_MASK)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
}
static const struct dce_audio_registers audio_regs[] = {
audio_regs(0),
audio_regs(1),
audio_regs(2),
audio_regs(3),
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
#define aux_regs(id)\
[id] = {\
AUX_REG_LIST(id)\
}
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
aux_regs(2),
aux_regs(3)
};
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
}
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3)
};
#define link_regs(id)\
[id] = {\
LE_DCN10_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0),
link_regs(1),
link_regs(2),
link_regs(3)
};
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
{ DCN_PANEL_CNTL_REG_LIST() }
};
static const struct dce_panel_cntl_shift panel_cntl_shift = {
DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
};
static const struct dce_panel_cntl_mask panel_cntl_mask = {
DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
};
static const struct dce110_aux_registers_shift aux_shift = {
DCN10_AUX_MASK_SH_LIST(__SHIFT)
};
static const struct dce110_aux_registers_mask aux_mask = {
DCN10_AUX_MASK_SH_LIST(_MASK)
};
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN10(id),\
}
static const struct dcn10_ipp_registers ipp_regs[] = {
ipp_regs(0),
ipp_regs(1),
ipp_regs(2),
ipp_regs(3),
};
static const struct dcn10_ipp_shift ipp_shift = {
IPP_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dcn10_ipp_mask ipp_mask = {
IPP_MASK_SH_LIST_DCN10(_MASK),
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN10(id),\
}
static const struct dcn10_opp_registers opp_regs[] = {
opp_regs(0),
opp_regs(1),
opp_regs(2),
opp_regs(3),
};
static const struct dcn10_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dcn10_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN10(_MASK),
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
.AUX_RESET_MASK = 0 \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN10(id),\
}
static const struct dcn_dpp_registers tf_regs[] = {
tf_regs(0),
tf_regs(1),
tf_regs(2),
tf_regs(3),
};
static const struct dcn_dpp_shift tf_shift = {
TF_REG_LIST_SH_MASK_DCN10(__SHIFT),
TF_DEBUG_REG_LIST_SH_DCN10
};
static const struct dcn_dpp_mask tf_mask = {
TF_REG_LIST_SH_MASK_DCN10(_MASK),
TF_DEBUG_REG_LIST_MASK_DCN10
};
static const struct dcn_mpc_registers mpc_regs = {
MPC_COMMON_REG_LIST_DCN1_0(0),
MPC_COMMON_REG_LIST_DCN1_0(1),
MPC_COMMON_REG_LIST_DCN1_0(2),
MPC_COMMON_REG_LIST_DCN1_0(3),
MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0),
MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1),
MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2),
MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3)
};
static const struct dcn_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
};
static const struct dcn_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
};
#define tg_regs(id)\
[id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
static const struct dcn_optc_registers tg_regs[] = {
tg_regs(0),
tg_regs(1),
tg_regs(2),
tg_regs(3),
};
static const struct dcn_optc_shift tg_shift = {
TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
};
static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
#define hubp_regs(id)\
[id] = {\
HUBP_REG_LIST_DCN10(id)\
}
static const struct dcn_mi_registers hubp_regs[] = {
hubp_regs(0),
hubp_regs(1),
hubp_regs(2),
hubp_regs(3),
};
static const struct dcn_mi_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dcn_mi_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN10(_MASK)
};
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN10(0)
};
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN10(_MASK)
};
static int map_transmitter_id_to_phy_instance(
enum transmitter transmitter)
{
switch (transmitter) {
case TRANSMITTER_UNIPHY_A:
return 0;
break;
case TRANSMITTER_UNIPHY_B:
return 1;
break;
case TRANSMITTER_UNIPHY_C:
return 2;
break;
case TRANSMITTER_UNIPHY_D:
return 3;
break;
default:
ASSERT(0);
return 0;
}
}
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
}
static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(0, A),
clk_src_regs(1, B),
clk_src_regs(2, C),
clk_src_regs(3, D)
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
};
static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
.num_audio = 4,
.num_stream_encoder = 4,
.num_pll = 4,
.num_ddc = 4,
};
static const struct resource_caps rv2_res_cap = {
.num_timing_generator = 3,
.num_opp = 3,
.num_video_plane = 3,
.num_audio = 3,
.num_stream_encoder = 3,
.num_pll = 3,
.num_ddc = 4,
};
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
.p010 = true
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 1
}
};
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
/* raven smu dones't allow 0 disp clk,
* smu min disp clk limit is 50Mhz
* keep min disp clk 100Mhz avoid smu hang
*/
.min_disp_clk_khz = 100000,
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = true,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
.disable_stereo_support = true,
.vsr_support = true,
.performance_trace = false,
.az_endpoint_mute_only = true,
.recovery_enabled = false, /*enable this by default after testing.*/
.max_downscale_src_width = 3840,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_legacy_fast_update = true,
};
static const struct dc_debug_options debug_defaults_diags = {
.disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
.disable_stutter = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
};
static void dcn10_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN10_DPP(*dpp));
*dpp = NULL;
}
static struct dpp *dcn10_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn10_dpp *dpp =
kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
dpp1_construct(dpp, ctx, inst,
&tf_regs[inst], &tf_shift, &tf_mask);
return &dpp->base;
}
static struct input_pixel_processor *dcn10_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn10_ipp_construct(ipp, ctx, inst,
&ipp_regs[inst], &ipp_shift, &ipp_mask);
return &ipp->base;
}
static struct output_pixel_processor *dcn10_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_opp *opp =
kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn10_opp_construct(opp, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
static struct dce_aux *dcn10_aux_engine_create(struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
&aux_mask,
&aux_shift,
ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
i2c_inst_regs(1),
i2c_inst_regs(2),
i2c_inst_regs(3),
i2c_inst_regs(4),
i2c_inst_regs(5),
i2c_inst_regs(6),
};
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
};
static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
static struct dce_i2c_hw *dcn10_i2c_hw_create(struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
return dce_i2c_hw;
}
static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
{
struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
GFP_KERNEL);
if (!mpc10)
return NULL;
dcn10_mpc_construct(mpc10, ctx,
&mpc_regs,
&mpc_shift,
&mpc_mask,
4);
return &mpc10->base;
}
static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx)
{
struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub),
GFP_KERNEL);
if (!dcn10_hubbub)
return NULL;
hubbub1_construct(&dcn10_hubbub->base, ctx,
&hubbub_reg,
&hubbub_shift,
&hubbub_mask);
return &dcn10_hubbub->base;
}
static struct timing_generator *dcn10_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
tgn10->tg_regs = &tg_regs[instance];
tgn10->tg_shift = &tg_shift;
tgn10->tg_mask = &tg_mask;
dcn10_timing_generator_init(tgn10);
return &tgn10->base;
}
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true
};
static struct link_encoder *dcn10_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
struct dcn10_link_encoder *enc10 =
kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
int link_regs_id;
if (!enc10)
return NULL;
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
dcn10_link_encoder_construct(enc10,
enc_init_data,
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc10->base;
}
static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dce_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
if (!panel_cntl)
return NULL;
dce_panel_cntl_construct(panel_cntl,
init_data,
&panel_cntl_regs[init_data->inst],
&panel_cntl_shift,
&panel_cntl_mask);
return &panel_cntl->base;
}
static struct clock_source *dcn10_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dce112_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
static struct audio *create_audio(
struct dc_context *ctx, unsigned int inst)
{
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
static struct stream_encoder *dcn10_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
if (!enc1)
return NULL;
dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
return &enc1->base;
}
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN1_REG_LIST()
};
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN1_MASK_SH_LIST(__SHIFT)
};
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN1_MASK_SH_LIST(_MASK)
};
static struct dce_hwseq *dcn10_hwseq_create(
struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN10_253 = true;
hws->wa.false_optc_underflow = true;
hws->wa.DEGVIDCN10_254 = true;
if ((ctx->asic_id.chip_family == FAMILY_RV) &&
ASICREV_IS_RAVEN2(ctx->asic_id.hw_internal_rev))
switch (ctx->asic_id.pci_revision_id) {
case PRID_POLLOCK_94:
case PRID_POLLOCK_95:
case PRID_POLLOCK_E9:
case PRID_POLLOCK_EA:
case PRID_POLLOCK_EB:
hws->wa.wait_hubpret_read_start_during_mpo_transition = true;
break;
default:
hws->wa.wait_hubpret_read_start_during_mpo_transition = false;
break;
}
}
return hws;
}
static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = create_audio,
.create_stream_encoder = dcn10_stream_encoder_create,
.create_hwseq = dcn10_hwseq_create,
};
static void dcn10_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
}
static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
dm_pp_get_funcs(ctx, pp_smu);
return pp_smu;
}
static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
{
unsigned int i;
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
if (pool->base.mpc != NULL) {
kfree(TO_DCN10_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
kfree(pool->base.hubbub);
pool->base.hubbub = NULL;
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
if (pool->base.dpps[i] != NULL)
dcn10_dpp_destroy(&pool->base.dpps[i]);
if (pool->base.ipps[i] != NULL)
pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
if (pool->base.hubps[i] != NULL) {
kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
pool->base.hubps[i] = NULL;
}
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
if (pool->base.timing_generators[i] != NULL) {
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
kfree(pool->base.sw_i2cs[i]);
pool->base.sw_i2cs[i] = NULL;
}
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] != NULL) {
dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
pool->base.clock_sources[i] = NULL;
}
}
if (pool->base.dp_clock_source != NULL) {
dcn10_clock_source_destroy(&pool->base.dp_clock_source);
pool->base.dp_clock_source = NULL;
}
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
kfree(pool->base.pp_smu);
}
static struct hubp *dcn10_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn10_hubp *hubp1 =
kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL);
if (!hubp1)
return NULL;
dcn10_hubp_construct(hubp1, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask);
return &hubp1->base;
}
static void get_pixel_clock_parameters(
const struct pipe_ctx *pipe_ctx,
struct pixel_clk_params *pixel_clk_params)
{
const struct dc_stream_state *stream = pipe_ctx->stream;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
pixel_clk_params->signal_type = pipe_ctx->stream->signal;
pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
/* TODO: un-hardcode*/
pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
LINK_RATE_REF_FREQ_IN_KHZ;
pixel_clk_params->flags.ENABLE_SS = 0;
pixel_clk_params->color_depth =
stream->timing.display_color_depth;
pixel_clk_params->flags.DISPLAY_BLANKED = 1;
pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
pixel_clk_params->color_depth = COLOR_DEPTH_888;
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
pixel_clk_params->requested_pix_clk_100hz /= 2;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
pixel_clk_params->requested_pix_clk_100hz *= 2;
}
static void build_clamping_params(struct dc_stream_state *stream)
{
stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
stream->clamping.c_depth = stream->timing.display_color_depth;
stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
}
static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
{
get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
&pipe_ctx->pll_settings);
pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
resource_build_bit_depth_reduction_params(pipe_ctx->stream,
&pipe_ctx->stream->bit_depth_params);
build_clamping_params(pipe_ctx->stream);
}
static enum dc_status build_mapped_resource(
const struct dc *dc,
struct dc_state *context,
struct dc_stream_state *stream)
{
struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
build_pipe_hw_param(pipe_ctx);
return DC_OK;
}
static enum dc_status dcn10_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *dc_stream)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
result = resource_map_pool_resources(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = build_mapped_resource(dc, new_ctx, dc_stream);
return result;
}
static struct pipe_ctx *dcn10_acquire_free_pipe_for_layer(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *opp_head_pipe)
{
struct resource_context *res_ctx = &new_ctx->res_ctx;
struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
if (!head_pipe) {
ASSERT(0);
return NULL;
}
if (!idle_pipe)
return NULL;
idle_pipe->stream = head_pipe->stream;
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
idle_pipe->stream_res.abm = head_pipe->stream_res.abm;
idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
return idle_pipe;
}
static bool dcn10_get_dcc_compression_cap(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
dc->res_pool->hubbub,
input,
output);
}
static void dcn10_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
dcn10_resource_destruct(dcn10_pool);
kfree(dcn10_pool);
*pool = NULL;
}
static bool dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
DC_FP_START();
voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
DC_FP_END();
return voltage_supported;
}
static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
{
if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
&& caps->max_video_width != 0
&& plane_state->src_rect.width > caps->max_video_width)
return DC_FAIL_SURFACE_VALIDATE;
return DC_OK;
}
static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context)
{
int i, j;
bool video_down_scaled = false;
bool video_large = false;
bool desktop_large = false;
bool dcc_disabled = false;
bool mpo_enabled = false;
for (i = 0; i < context->stream_count; i++) {
if (context->stream_status[i].plane_count == 0)
continue;
if (context->stream_status[i].plane_count > 2)
return DC_FAIL_UNSUPPORTED_1;
if (context->stream_status[i].plane_count > 1)
mpo_enabled = true;
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =
context->stream_status[i].plane_states[j];
if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
if (plane->src_rect.width > plane->dst_rect.width ||
plane->src_rect.height > plane->dst_rect.height)
video_down_scaled = true;
if (plane->src_rect.width >= 3840)
video_large = true;
} else {
if (plane->src_rect.width >= 3840)
desktop_large = true;
if (!plane->dcc.enable)
dcc_disabled = true;
}
}
}
/* Disable MPO in multi-display configurations. */
if (context->stream_count > 1 && mpo_enabled)
return DC_FAIL_UNSUPPORTED_1;
/*
* Workaround: On DCN10 there is UMC issue that causes underflow when
* playing 4k video on 4k desktop with video downscaled and single channel
* memory
*/
if (video_large && desktop_large && video_down_scaled && dcc_disabled &&
dc->dcn_soc->number_of_channels == 1)
return DC_FAIL_SURFACE_VALIDATE;
return DC_OK;
}
static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
enum swizzle_mode_values swizzle = DC_SW_LINEAR;
if (bpp == 64)
swizzle = DC_SW_64KB_D;
else
swizzle = DC_SW_64KB_S;
plane_state->tiling_info.gfx9.swizzle = swizzle;
return DC_OK;
}
struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream)
{
int i;
int j = -1;
struct dc_link *link = stream->link;
for (i = 0; i < pool->stream_enc_count; i++) {
if (!res_ctx->is_stream_enc_acquired[i] &&
pool->stream_enc[i]) {
/* Store first available for MST second display
* in daisy chain use case
*/
j = i;
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
}
}
/*
* For CZ and later, we can allow DIG FE and BE to differ for all display types
*/
if (j >= 0)
return pool->stream_enc[j];
return NULL;
}
static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
.panel_cntl_create = dcn10_panel_cntl_create,
.validate_bandwidth = dcn10_validate_bandwidth,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn10_acquire_free_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
.patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
{
uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
/* RV1 support max 4 pipes */
value = value & 0xf;
return value;
}
static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
{
int i;
if (clks->num_levels == 0)
return false;
for (i = 0; i < clks->num_levels; i++)
/* Ensure that the result is sane */
if (clks->data[i].clocks_in_khz == 0)
return false;
return true;
}
static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn10_resource_pool *pool)
{
int i;
int j;
struct dc_context *ctx = dc->ctx;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
int min_fclk_khz, min_dcfclk_khz, socclk_khz;
bool res;
ctx->dc_bios->regs = &bios_regs;
if (ctx->dce_version == DCN_VERSION_1_01)
pool->base.res_cap = &rv2_res_cap;
else
pool->base.res_cap = &res_cap;
pool->base.funcs = &dcn10_res_pool_funcs;
/*
* TODO fill in from actual raven resource when we create
* more than virtual encoder
*/
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
/* max pipe num for ASIC before check pipe fuses */
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
if (dc->ctx->dce_version == DCN_VERSION_1_01)
pool->base.pipe_count = 3;
dc->caps.max_video_width = 3840;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 0;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
dc->caps.extended_aux_timeout_support = false;
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 1;
dc->caps.color.dpp.icsc = 1;
dc->caps.color.dpp.dgam_ram = 1;
dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.dgam_rom_caps.pq = 0;
dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
dc->caps.color.dpp.post_csc = 0;
dc->caps.color.dpp.gamma_corr = 0;
dc->caps.color.dpp.dgam_rom_for_yuv = 1;
dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 1; // RGAM on DCN1
dc->caps.color.dpp.ogam_rom_caps.srgb = 1;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.ogam_rom_caps.pq = 0;
dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
dc->caps.color.dpp.ocsc = 1;
/* no post-blend color operations */
dc->caps.color.mpc.gamut_remap = 0;
dc->caps.color.mpc.num_3dluts = 0;
dc->caps.color.mpc.shared_3d_lut = 0;
dc->caps.color.mpc.ogam_ram = 0;
dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 0;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
dc->debug = debug_defaults_diags;
/*************************************************
* Create resources *
*************************************************/
pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
dcn10_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
dcn10_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
dcn10_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
if (dc->ctx->dce_version == DCN_VERSION_1_0) {
pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
dcn10_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
}
pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
if (dc->ctx->dce_version == DCN_VERSION_1_01)
pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL;
pool->base.dp_clock_source =
dcn10_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_ID_DP_DTO,
/* todo: not reuse phy_pll registers */
&clk_src_regs[0], true);
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
goto fail;
}
}
pool->base.dmcu = dcn10_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
&dmcu_mask);
if (pool->base.dmcu == NULL) {
dm_error("DC: failed to create dmcu!\n");
BREAK_TO_DEBUGGER();
goto fail;
}
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
&abm_mask);
if (pool->base.abm == NULL) {
dm_error("DC: failed to create abm!\n");
BREAK_TO_DEBUGGER();
goto fail;
}
dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1);
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
DC_FP_START();
dcn10_resource_construct_fp(dc);
DC_FP_END();
if (!dc->config.is_vmin_only_asic)
if (ASICREV_IS_RAVEN2(dc->ctx->asic_id.hw_internal_rev))
switch (dc->ctx->asic_id.pci_revision_id) {
case PRID_DALI_DE:
case PRID_DALI_DF:
case PRID_DALI_E3:
case PRID_DALI_E4:
case PRID_POLLOCK_94:
case PRID_POLLOCK_95:
case PRID_POLLOCK_E9:
case PRID_POLLOCK_EA:
case PRID_POLLOCK_EB:
dc->config.is_vmin_only_asic = true;
break;
default:
break;
}
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
/*
* Right now SMU/PPLIB and DAL all have the AZ D3 force PME notification *
* implemented. So AZ D3 should work.For issue 197007. *
*/
if (pool->base.pp_smu != NULL
&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
dc->debug.az_endpoint_mute_only = false;
if (!dc->debug.disable_pplib_clock_request) {
/*
* TODO: This is not the proper way to obtain
* fabric_and_dram_bandwidth, should be min(fclk, memclk).
*/
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
DC_FP_START();
if (res)
res = verify_clock_values(&fclks);
if (res)
dcn_bw_update_from_pplib_fclks(dc, &fclks);
else
BREAK_TO_DEBUGGER();
DC_FP_END();
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
DC_FP_START();
if (res)
res = verify_clock_values(&dcfclks);
if (res)
dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks);
else
BREAK_TO_DEBUGGER();
DC_FP_END();
}
dcn_bw_sync_calcs_and_dml(dc);
if (!dc->debug.disable_pplib_wm_range) {
dc->res_pool = &pool->base;
DC_FP_START();
dcn_get_soc_clks(
dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
dcn_bw_notify_pplib_of_wm_ranges(
dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
if (!pool->base.irqs)
goto fail;
}
/* index to valid pipe resource */
j = 0;
/* mem input -> ipp -> dpp -> opp -> TG */
for (i = 0; i < pool->base.pipe_count; i++) {
/* if pipe is disabled, skip instance of HW pipe,
* i.e, skip ASIC register instance
*/
if ((pipe_fuses & (1 << i)) != 0)
continue;
pool->base.hubps[j] = dcn10_hubp_create(ctx, i);
if (pool->base.hubps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create memory input!\n");
goto fail;
}
pool->base.ipps[j] = dcn10_ipp_create(ctx, i);
if (pool->base.ipps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create input pixel processor!\n");
goto fail;
}
pool->base.dpps[j] = dcn10_dpp_create(ctx, i);
if (pool->base.dpps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpp!\n");
goto fail;
}
pool->base.opps[j] = dcn10_opp_create(ctx, i);
if (pool->base.opps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto fail;
}
pool->base.timing_generators[j] = dcn10_timing_generator_create(
ctx, i);
if (pool->base.timing_generators[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto fail;
}
/* check next valid pipe */
j++;
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto fail;
}
pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create hw i2c!!\n");
goto fail;
}
pool->base.sw_i2cs[i] = NULL;
}
/* valid pipe num */
pool->base.pipe_count = j;
pool->base.timing_generator_count = j;
/* within dml lib, it is hard code to 4. If ASIC pipe is fused,
* the value may be changed
*/
dc->dml.ip.max_num_dpp = pool->base.pipe_count;
dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
pool->base.mpc = dcn10_mpc_create(ctx);
if (pool->base.mpc == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mpc!\n");
goto fail;
}
pool->base.hubbub = dcn10_hubbub_create(ctx);
if (pool->base.hubbub == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create hubbub!\n");
goto fail;
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto fail;
dcn10_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->cap_funcs = cap_funcs;
return true;
fail:
dcn10_resource_destruct(pool);
return false;
}
struct resource_pool *dcn10_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn10_resource_pool *pool =
kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dcn10_optc.h"
#include "dc.h"
#include "dc_trace.h"
#define REG(reg)\
optc1->tg_regs->reg
#define CTX \
optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100
/**
* apply_front_porch_workaround() - This is a workaround for a bug that has
* existed since R5xx and has not been fixed
* keep Front porch at minimum 2 for Interlaced
* mode or 1 for progressive.
*
* @timing: Timing parameters used to configure DCN blocks.
*/
static void apply_front_porch_workaround(struct dc_crtc_timing *timing)
{
if (timing->flags.INTERLACE == 1) {
if (timing->v_front_porch < 2)
timing->v_front_porch = 2;
} else {
if (timing->v_front_porch < 1)
timing->v_front_porch = 1;
}
}
void optc1_program_global_sync(
struct timing_generator *optc,
int vready_offset,
int vstartup_start,
int vupdate_offset,
int vupdate_width)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
optc1->vready_offset = vready_offset;
optc1->vstartup_start = vstartup_start;
optc1->vupdate_offset = vupdate_offset;
optc1->vupdate_width = vupdate_width;
if (optc1->vstartup_start == 0) {
BREAK_TO_DEBUGGER();
return;
}
REG_SET(OTG_VSTARTUP_PARAM, 0,
VSTARTUP_START, optc1->vstartup_start);
REG_SET_2(OTG_VUPDATE_PARAM, 0,
VUPDATE_OFFSET, optc1->vupdate_offset,
VUPDATE_WIDTH, optc1->vupdate_width);
REG_SET(OTG_VREADY_PARAM, 0,
VREADY_OFFSET, optc1->vready_offset);
}
static void optc1_disable_stereo(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_STEREO_CONTROL, 0,
OTG_STEREO_EN, 0);
REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0,
OTG_3D_STRUCTURE_EN, 0,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
}
void optc1_setup_vertical_interrupt0(
struct timing_generator *optc,
uint32_t start_line,
uint32_t end_line)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
}
void optc1_setup_vertical_interrupt1(
struct timing_generator *optc,
uint32_t start_line)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
OTG_VERTICAL_INTERRUPT1_LINE_START, start_line);
}
void optc1_setup_vertical_interrupt2(
struct timing_generator *optc,
uint32_t start_line)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
OTG_VERTICAL_INTERRUPT2_LINE_START, start_line);
}
/**
* optc1_program_timing() - used by mode timing set Program
* CRTC Timing Registers - OTG_H_*,
* OTG_V_*, Pixel repetition.
* Including SYNC. Call BIOS command table to program Timings.
*
* @optc: timing_generator instance.
* @dc_crtc_timing: Timing parameters used to configure DCN blocks.
* @vready_offset: Vready's starting position.
* @vstartup_start: Vstartup period.
* @vupdate_offset: Vupdate starting position.
* @vupdate_width: Vupdate duration.
* @signal: DC signal types.
* @use_vbios: to program timings from BIOS command table.
*
*/
void optc1_program_timing(
struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
int vready_offset,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
const enum signal_type signal,
bool use_vbios)
{
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end;
uint32_t asic_blank_start;
uint32_t v_total;
uint32_t v_sync_end;
uint32_t h_sync_polarity, v_sync_polarity;
uint32_t start_point = 0;
uint32_t field_num = 0;
enum h_timing_div_mode h_div = H_TIMING_NO_DIV;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
optc1->signal = signal;
optc1->vready_offset = vready_offset;
optc1->vstartup_start = vstartup_start;
optc1->vupdate_offset = vupdate_offset;
optc1->vupdate_width = vupdate_width;
patched_crtc_timing = *dc_crtc_timing;
apply_front_porch_workaround(&patched_crtc_timing);
optc1->orginal_patched_timing = patched_crtc_timing;
/* Load horizontal timing */
/* CRTC_H_TOTAL = vesa.h_total - 1 */
REG_SET(OTG_H_TOTAL, 0,
OTG_H_TOTAL, patched_crtc_timing.h_total - 1);
/* h_sync_start = 0, h_sync_end = vesa.h_sync_width */
REG_UPDATE_2(OTG_H_SYNC_A,
OTG_H_SYNC_A_START, 0,
OTG_H_SYNC_A_END, patched_crtc_timing.h_sync_width);
/* blank_start = line end - front porch */
asic_blank_start = patched_crtc_timing.h_total -
patched_crtc_timing.h_front_porch;
/* blank_end = blank_start - active */
asic_blank_end = asic_blank_start -
patched_crtc_timing.h_border_right -
patched_crtc_timing.h_addressable -
patched_crtc_timing.h_border_left;
REG_UPDATE_2(OTG_H_BLANK_START_END,
OTG_H_BLANK_START, asic_blank_start,
OTG_H_BLANK_END, asic_blank_end);
/* h_sync polarity */
h_sync_polarity = patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ?
0 : 1;
REG_UPDATE(OTG_H_SYNC_A_CNTL,
OTG_H_SYNC_A_POL, h_sync_polarity);
v_total = patched_crtc_timing.v_total - 1;
REG_SET(OTG_V_TOTAL, 0,
OTG_V_TOTAL, v_total);
/* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
* OTG_V_TOTAL_MIN are equal to V_TOTAL.
*/
optc->funcs->set_vtotal_min_max(optc, v_total, v_total);
/* v_sync_start = 0, v_sync_end = v_sync_width */
v_sync_end = patched_crtc_timing.v_sync_width;
REG_UPDATE_2(OTG_V_SYNC_A,
OTG_V_SYNC_A_START, 0,
OTG_V_SYNC_A_END, v_sync_end);
/* blank_start = frame end - front porch */
asic_blank_start = patched_crtc_timing.v_total -
patched_crtc_timing.v_front_porch;
/* blank_end = blank_start - active */
asic_blank_end = asic_blank_start -
patched_crtc_timing.v_border_bottom -
patched_crtc_timing.v_addressable -
patched_crtc_timing.v_border_top;
REG_UPDATE_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, asic_blank_start,
OTG_V_BLANK_END, asic_blank_end);
/* v_sync polarity */
v_sync_polarity = patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ?
0 : 1;
REG_UPDATE(OTG_V_SYNC_A_CNTL,
OTG_V_SYNC_A_POL, v_sync_polarity);
if (optc1->signal == SIGNAL_TYPE_DISPLAY_PORT ||
optc1->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
optc1->signal == SIGNAL_TYPE_EDP) {
start_point = 1;
if (patched_crtc_timing.flags.INTERLACE == 1)
field_num = 1;
}
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
if (patched_crtc_timing.flags.INTERLACE == 1)
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 1);
else
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 0);
}
/* VTG enable set to 0 first VInit */
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
/* original code is using VTG offset to address OTG reg, seems wrong */
REG_UPDATE_2(OTG_CONTROL,
OTG_START_POINT_CNTL, start_point,
OTG_FIELD_NUMBER_CNTL, field_num);
optc->funcs->program_global_sync(optc,
vready_offset,
vstartup_start,
vupdate_offset,
vupdate_width);
optc->funcs->set_vtg_params(optc, dc_crtc_timing, true);
/* TODO
* patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
* program_horz_count_by_2
* for DVI 30bpp mode, 0 otherwise
* program_horz_count_by_2(optc, &patched_crtc_timing);
*/
/* Enable stereo - only when we need to pack 3D frame. Other types
* of stereo handled in explicit call
*/
if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
if (REG(OPTC_DATA_FORMAT_CONTROL) && optc1->tg_mask->OPTC_DATA_FORMAT != 0) {
uint32_t data_fmt = 0;
if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
data_fmt = 1;
else if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
data_fmt = 2;
REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
}
if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) {
if (optc1->opp_count == 4)
h_div = H_TIMING_DIV_BY4;
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, h_div);
} else {
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div);
}
}
/**
* optc1_set_vtg_params - Set Vertical Timing Generator (VTG) parameters
*
* @optc: timing_generator struct used to extract the optc parameters
* @dc_crtc_timing: Timing parameters configured
* @program_fp2: Boolean value indicating if FP2 will be programmed or not
*
* OTG is responsible for generating the global sync signals, including
* vertical timing information for each HUBP in the dcfclk domain. Each VTG is
* associated with one OTG that provides HUBP with vertical timing information
* (i.e., there is 1:1 correspondence between OTG and VTG). This function is
* responsible for setting the OTG parameters to the VTG during the pipe
* programming.
*/
void optc1_set_vtg_params(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2)
{
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end;
uint32_t v_init;
uint32_t v_fp2 = 0;
int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
patched_crtc_timing = *dc_crtc_timing;
apply_front_porch_workaround(&patched_crtc_timing);
/* VCOUNT_INIT is the start of blank */
v_init = patched_crtc_timing.v_total - patched_crtc_timing.v_front_porch;
/* end of blank = v_init - active */
asic_blank_end = v_init -
patched_crtc_timing.v_border_bottom -
patched_crtc_timing.v_addressable -
patched_crtc_timing.v_border_top;
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
if (vertical_line_start < 0)
v_fp2 = -vertical_line_start;
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
if (patched_crtc_timing.flags.INTERLACE == 1) {
v_init = v_init / 2;
if ((optc1->vstartup_start/2)*2 > asic_blank_end)
v_fp2 = v_fp2 / 2;
}
}
if (program_fp2)
REG_UPDATE_2(CONTROL,
VTG0_FP2, v_fp2,
VTG0_VCOUNT_INIT, v_init);
else
REG_UPDATE(CONTROL, VTG0_VCOUNT_INIT, v_init);
}
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t blank_data_double_buffer_enable = enable ? 1 : 0;
REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
OTG_BLANK_DATA_DOUBLE_BUFFER_EN, blank_data_double_buffer_enable);
}
/**
* optc1_set_timing_double_buffer() - DRR double buffering control
*
* Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN,
* VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers.
*
* @optc: timing_generator instance.
* @enable: Enable DRR double buffering control if true, disable otherwise.
*
* Options: any time, start of frame, dp start of frame (range timing)
*/
void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t mode = enable ? 2 : 0;
REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mode);
}
/**
* optc1_unblank_crtc() - Call ASIC Control Object to UnBlank CRTC.
*
* @optc: timing_generator instance.
*/
static void optc1_unblank_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 0,
OTG_BLANK_DE_MODE, 0);
/* W/A for automated testing
* Automated testing will fail underflow test as there
* sporadic underflows which occur during the optc blank
* sequence. As a w/a, clear underflow on unblank.
* This prevents the failure, but will not mask actual
* underflow that affect real use cases.
*/
optc1_clear_optc_underflow(optc);
}
/**
* optc1_blank_crtc() - Call ASIC Control Object to Blank CRTC.
*
* @optc: timing_generator instance.
*/
static void optc1_blank_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 1,
OTG_BLANK_DE_MODE, 0);
optc1_set_blank_data_double_buffer(optc, false);
}
void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking)
{
if (enable_blanking)
optc1_blank_crtc(optc);
else
optc1_unblank_crtc(optc);
}
bool optc1_is_blanked(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t blank_en;
uint32_t blank_state;
REG_GET_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, &blank_en,
OTG_CURRENT_BLANK_STATE, &blank_state);
return blank_en && blank_state;
}
void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (enable) {
REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
OPTC_INPUT_CLK_EN, 1,
OPTC_INPUT_CLK_GATE_DIS, 1);
REG_WAIT(OPTC_INPUT_CLOCK_CONTROL,
OPTC_INPUT_CLK_ON, 1,
1, 1000);
/* Enable clock */
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_EN, 1,
OTG_CLOCK_GATE_DIS, 1);
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_CLOCK_ON, 1,
1, 1000);
} else {
//last chance to clear underflow, otherwise, it will always there due to clock is off.
if (optc->funcs->is_optc_underflow_occurred(optc) == true)
optc->funcs->clear_optc_underflow(optc);
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
OPTC_INPUT_CLK_GATE_DIS, 0,
OPTC_INPUT_CLK_EN, 0);
}
}
/**
* optc1_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator.
*
* @optc: timing_generator instance.
*/
static bool optc1_enable_crtc(struct timing_generator *optc)
{
/* TODO FPGA wait for answer
* OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
* OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
*/
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* opp instance for OTG. For DCN1.0, ODM is remoed.
* OPP and OPTC should 1:1 mapping
*/
REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
OPTC_SRC_SEL, optc->inst);
/* VTG enable first is for HW workaround */
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
REG_SEQ_START();
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
REG_SEQ_SUBMIT();
REG_SEQ_WAIT_DONE();
return true;
}
/* disable_crtc - call ASIC Control Object to disable Timing generator. */
bool optc1_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* disable otg request until end of the first line
* in the vertical blank region
*/
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 0);
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
return true;
}
void optc1_program_blank_color(
struct timing_generator *optc,
const struct tg_color *black_color)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET_3(OTG_BLACK_COLOR, 0,
OTG_BLACK_COLOR_B_CB, black_color->color_b_cb,
OTG_BLACK_COLOR_G_Y, black_color->color_g_y,
OTG_BLACK_COLOR_R_CR, black_color->color_r_cr);
}
bool optc1_validate_timing(
struct timing_generator *optc,
const struct dc_crtc_timing *timing)
{
uint32_t v_blank;
uint32_t h_blank;
uint32_t min_v_blank;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
ASSERT(timing != NULL);
v_blank = (timing->v_total - timing->v_addressable -
timing->v_border_top - timing->v_border_bottom);
h_blank = (timing->h_total - timing->h_addressable -
timing->h_border_right -
timing->h_border_left);
if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING &&
timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM &&
timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE &&
timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE &&
timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
return false;
/* Temporarily blocking interlacing mode until it's supported */
if (timing->flags.INTERLACE == 1)
return false;
/* Check maximum number of pixels supported by Timing Generator
* (Currently will never fail, in order to fail needs display which
* needs more than 8192 horizontal and
* more than 8192 vertical total pixels)
*/
if (timing->h_total > optc1->max_h_total ||
timing->v_total > optc1->max_v_total)
return false;
if (h_blank < optc1->min_h_blank)
return false;
if (timing->h_sync_width < optc1->min_h_sync_width ||
timing->v_sync_width < optc1->min_v_sync_width)
return false;
min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank;
if (v_blank < min_v_blank)
return false;
return true;
}
/*
* get_vblank_counter
*
* @brief
* Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which
* holds the counter of frames.
*
* @param
* struct timing_generator *optc - [in] timing generator which controls the
* desired CRTC
*
* @return
* Counter of frames, which should equal to number of vblanks.
*/
uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t frame_count;
REG_GET(OTG_STATUS_FRAME_COUNT,
OTG_FRAME_COUNT, &frame_count);
return frame_count;
}
void optc1_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);
}
void optc1_unlock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 0);
TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, false);
}
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET_2(OTG_STATUS_POSITION,
OTG_HORZ_COUNT, &position->horizontal_count,
OTG_VERT_COUNT, &position->vertical_count);
REG_GET(OTG_NOM_VERT_POSITION,
OTG_VERT_COUNT_NOM, &position->nominal_vcount);
}
bool optc1_is_counter_moving(struct timing_generator *optc)
{
struct crtc_position position1, position2;
optc->funcs->get_position(optc, &position1);
optc->funcs->get_position(optc, &position2);
if (position1.horizontal_count == position2.horizontal_count &&
position1.vertical_count == position2.vertical_count)
return false;
else
return true;
}
bool optc1_did_triggered_reset_occur(
struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t occurred_force, occurred_vsync;
REG_GET(OTG_FORCE_COUNT_NOW_CNTL,
OTG_FORCE_COUNT_NOW_OCCURRED, &occurred_force);
REG_GET(OTG_VERT_SYNC_CONTROL,
OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, &occurred_vsync);
return occurred_vsync != 0 || occurred_force != 0;
}
void optc1_disable_reset_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_WRITE(OTG_TRIGA_CNTL, 0);
REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
OTG_FORCE_COUNT_NOW_CLEAR, 1);
REG_SET(OTG_VERT_SYNC_CONTROL, 0,
OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, 1);
}
void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t falling_edge;
REG_GET(OTG_V_SYNC_A_CNTL,
OTG_V_SYNC_A_POL, &falling_edge);
if (falling_edge)
REG_SET_3(OTG_TRIGA_CNTL, 0,
/* vsync signal from selected OTG pipe based
* on OTG_TRIG_SOURCE_PIPE_SELECT setting
*/
OTG_TRIGA_SOURCE_SELECT, 20,
OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
/* always detect falling edge */
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 1);
else
REG_SET_3(OTG_TRIGA_CNTL, 0,
/* vsync signal from selected OTG pipe based
* on OTG_TRIG_SOURCE_PIPE_SELECT setting
*/
OTG_TRIGA_SOURCE_SELECT, 20,
OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
/* always detect rising edge */
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1);
REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
/* force H count to H_TOTAL and V count to V_TOTAL in
* progressive mode and V_TOTAL-1 in interlaced mode
*/
OTG_FORCE_COUNT_NOW_MODE, 2);
}
void optc1_enable_crtc_reset(
struct timing_generator *optc,
int source_tg_inst,
struct crtc_trigger_info *crtc_tp)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t falling_edge = 0;
uint32_t rising_edge = 0;
switch (crtc_tp->event) {
case CRTC_EVENT_VSYNC_RISING:
rising_edge = 1;
break;
case CRTC_EVENT_VSYNC_FALLING:
falling_edge = 1;
break;
}
REG_SET_4(OTG_TRIGA_CNTL, 0,
/* vsync signal from selected OTG pipe based
* on OTG_TRIG_SOURCE_PIPE_SELECT setting
*/
OTG_TRIGA_SOURCE_SELECT, 20,
OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
/* always detect falling edge */
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, rising_edge,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, falling_edge);
switch (crtc_tp->delay) {
case TRIGGER_DELAY_NEXT_LINE:
REG_SET(OTG_VERT_SYNC_CONTROL, 0,
OTG_AUTO_FORCE_VSYNC_MODE, 1);
break;
case TRIGGER_DELAY_NEXT_PIXEL:
REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
/* force H count to H_TOTAL and V count to V_TOTAL in
* progressive mode and V_TOTAL-1 in interlaced mode
*/
OTG_FORCE_COUNT_NOW_MODE, 2);
break;
}
}
void optc1_wait_for_state(struct timing_generator *optc,
enum crtc_state state)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
switch (state) {
case CRTC_STATE_VBLANK:
REG_WAIT(OTG_STATUS,
OTG_V_BLANK, 1,
1, 100000); /* 1 vupdate at 10hz */
break;
case CRTC_STATE_VACTIVE:
REG_WAIT(OTG_STATUS,
OTG_V_ACTIVE_DISP, 1,
1, 100000); /* 1 vupdate at 10hz */
break;
default:
break;
}
}
void optc1_set_early_control(
struct timing_generator *optc,
uint32_t early_cntl)
{
/* asic design change, do not need this control
* empty for share caller logic
*/
}
void optc1_set_static_screen_control(
struct timing_generator *optc,
uint32_t event_triggers,
uint32_t num_frames)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
// By register spec, it only takes 8 bit value
if (num_frames > 0xFF)
num_frames = 0xFF;
/* Bit 8 is no longer applicable in RV for PSR case,
* set bit 8 to 0 if given
*/
if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
!= 0)
event_triggers = event_triggers &
~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;
REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0,
OTG_STATIC_SCREEN_EVENT_MASK, event_triggers,
OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
static void optc1_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_GLOBAL_CONTROL2, 0,
MANUAL_FLOW_CONTROL_SEL, optc->inst);
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 22,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
OTG_TRIGA_POLARITY_SELECT, 0,
OTG_TRIGA_FREQUENCY_SELECT, 0,
OTG_TRIGA_DELAY, 0,
OTG_TRIGA_CLEAR, 1);
}
static void optc1_program_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
MANUAL_FLOW_CONTROL, 1);
REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
MANUAL_FLOW_CONTROL, 0);
}
/**
* optc1_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
*
* @optc: timing_generator instance.
* @params: parameters used for Dynamic Refresh Rate.
*/
void optc1_set_drr(
struct timing_generator *optc,
const struct drr_params *params)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (params != NULL &&
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
if (params->vertical_total_mid != 0) {
REG_SET(OTG_V_TOTAL_MID, 0,
OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
OTG_VTOTAL_MID_FRAME_NUM,
(uint8_t)params->vertical_total_mid_frame_num);
}
optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
}
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
}
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_V_TOTAL_MAX, 0,
OTG_V_TOTAL_MAX, vtotal_max);
REG_SET(OTG_V_TOTAL_MIN, 0,
OTG_V_TOTAL_MIN, vtotal_min);
}
static void optc1_set_test_pattern(
struct timing_generator *optc,
/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
* because this is not DP-specific (which is probably somewhere in DP
* encoder) */
enum controller_dp_test_pattern test_pattern,
enum dc_color_depth color_depth)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
enum test_pattern_color_format bit_depth;
enum test_pattern_dyn_range dyn_range;
enum test_pattern_mode mode;
uint32_t pattern_mask;
uint32_t pattern_data;
/* color ramp generator mixes 16-bits color */
uint32_t src_bpc = 16;
/* requested bpc */
uint32_t dst_bpc;
uint32_t index;
/* RGB values of the color bars.
* Produce two RGB colors: RGB0 - white (all Fs)
* and RGB1 - black (all 0s)
* (three RGB components for two colors)
*/
uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
0x0000, 0x0000};
/* dest color (converted to the specified color format) */
uint16_t dst_color[6];
uint32_t inc_base;
/* translate to bit depth */
switch (color_depth) {
case COLOR_DEPTH_666:
bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
break;
case COLOR_DEPTH_888:
bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
break;
case COLOR_DEPTH_101010:
bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
break;
case COLOR_DEPTH_121212:
bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
break;
default:
bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
break;
}
switch (test_pattern) {
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
{
dyn_range = (test_pattern ==
CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
TEST_PATTERN_DYN_RANGE_CEA :
TEST_PATTERN_DYN_RANGE_VESA);
mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
REG_UPDATE_2(OTG_TEST_PATTERN_PARAMETERS,
OTG_TEST_PATTERN_VRES, 6,
OTG_TEST_PATTERN_HRES, 6);
REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL,
OTG_TEST_PATTERN_EN, 1,
OTG_TEST_PATTERN_MODE, mode,
OTG_TEST_PATTERN_DYNAMIC_RANGE, dyn_range,
OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
}
break;
case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
{
mode = (test_pattern ==
CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
TEST_PATTERN_MODE_VERTICALBARS :
TEST_PATTERN_MODE_HORIZONTALBARS);
switch (bit_depth) {
case TEST_PATTERN_COLOR_FORMAT_BPC_6:
dst_bpc = 6;
break;
case TEST_PATTERN_COLOR_FORMAT_BPC_8:
dst_bpc = 8;
break;
case TEST_PATTERN_COLOR_FORMAT_BPC_10:
dst_bpc = 10;
break;
default:
dst_bpc = 8;
break;
}
/* adjust color to the required colorFormat */
for (index = 0; index < 6; index++) {
/* dst = 2^dstBpc * src / 2^srcBpc = src >>
* (srcBpc - dstBpc);
*/
dst_color[index] =
src_color[index] >> (src_bpc - dst_bpc);
/* CRTC_TEST_PATTERN_DATA has 16 bits,
* lowest 6 are hardwired to ZERO
* color bits should be left aligned to MSB
* XXXXXXXXXX000000 for 10 bit,
* XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
*/
dst_color[index] <<= (16 - dst_bpc);
}
REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0);
/* We have to write the mask before data, similar to pipeline.
* For example, for 8 bpc, if we want RGB0 to be magenta,
* and RGB1 to be cyan,
* we need to make 7 writes:
* MASK DATA
* 000001 00000000 00000000 set mask to R0
* 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
* 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
* 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
* 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
* 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
* 100000 11111111 00000000 B1 255, 0xFF00
*
* we will make a loop of 6 in which we prepare the mask,
* then write, then prepare the color for next write.
* first iteration will write mask only,
* but each next iteration color prepared in
* previous iteration will be written within new mask,
* the last component will written separately,
* mask is not changing between 6th and 7th write
* and color will be prepared by last iteration
*/
/* write color, color values mask in CRTC_TEST_PATTERN_MASK
* is B1, G1, R1, B0, G0, R0
*/
pattern_data = 0;
for (index = 0; index < 6; index++) {
/* prepare color mask, first write PATTERN_DATA
* will have all zeros
*/
pattern_mask = (1 << index);
/* write color component */
REG_SET_2(OTG_TEST_PATTERN_COLOR, 0,
OTG_TEST_PATTERN_MASK, pattern_mask,
OTG_TEST_PATTERN_DATA, pattern_data);
/* prepare next color component,
* will be written in the next iteration
*/
pattern_data = dst_color[index];
}
/* write last color component,
* it's been already prepared in the loop
*/
REG_SET_2(OTG_TEST_PATTERN_COLOR, 0,
OTG_TEST_PATTERN_MASK, pattern_mask,
OTG_TEST_PATTERN_DATA, pattern_data);
/* enable test pattern */
REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL,
OTG_TEST_PATTERN_EN, 1,
OTG_TEST_PATTERN_MODE, mode,
OTG_TEST_PATTERN_DYNAMIC_RANGE, 0,
OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
}
break;
case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
{
mode = (bit_depth ==
TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
TEST_PATTERN_MODE_DUALRAMP_RGB :
TEST_PATTERN_MODE_SINGLERAMP_RGB);
switch (bit_depth) {
case TEST_PATTERN_COLOR_FORMAT_BPC_6:
dst_bpc = 6;
break;
case TEST_PATTERN_COLOR_FORMAT_BPC_8:
dst_bpc = 8;
break;
case TEST_PATTERN_COLOR_FORMAT_BPC_10:
dst_bpc = 10;
break;
default:
dst_bpc = 8;
break;
}
/* increment for the first ramp for one color gradation
* 1 gradation for 6-bit color is 2^10
* gradations in 16-bit color
*/
inc_base = (src_bpc - dst_bpc);
switch (bit_depth) {
case TEST_PATTERN_COLOR_FORMAT_BPC_6:
{
REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
OTG_TEST_PATTERN_INC0, inc_base,
OTG_TEST_PATTERN_INC1, 0,
OTG_TEST_PATTERN_HRES, 6,
OTG_TEST_PATTERN_VRES, 6,
OTG_TEST_PATTERN_RAMP0_OFFSET, 0);
}
break;
case TEST_PATTERN_COLOR_FORMAT_BPC_8:
{
REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
OTG_TEST_PATTERN_INC0, inc_base,
OTG_TEST_PATTERN_INC1, 0,
OTG_TEST_PATTERN_HRES, 8,
OTG_TEST_PATTERN_VRES, 6,
OTG_TEST_PATTERN_RAMP0_OFFSET, 0);
}
break;
case TEST_PATTERN_COLOR_FORMAT_BPC_10:
{
REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
OTG_TEST_PATTERN_INC0, inc_base,
OTG_TEST_PATTERN_INC1, inc_base + 2,
OTG_TEST_PATTERN_HRES, 8,
OTG_TEST_PATTERN_VRES, 5,
OTG_TEST_PATTERN_RAMP0_OFFSET, 384 << 6);
}
break;
default:
break;
}
REG_WRITE(OTG_TEST_PATTERN_COLOR, 0);
/* enable test pattern */
REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0);
REG_SET_4(OTG_TEST_PATTERN_CONTROL, 0,
OTG_TEST_PATTERN_EN, 1,
OTG_TEST_PATTERN_MODE, mode,
OTG_TEST_PATTERN_DYNAMIC_RANGE, 0,
OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
}
break;
case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
{
REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0);
REG_WRITE(OTG_TEST_PATTERN_COLOR, 0);
REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0);
}
break;
default:
break;
}
}
void optc1_get_crtc_scanoutpos(
struct timing_generator *optc,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
uint32_t *h_position,
uint32_t *v_position)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct crtc_position position;
REG_GET_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, v_blank_start,
OTG_V_BLANK_END, v_blank_end);
optc1_get_position(optc, &position);
*h_position = position.horizontal_count;
*v_position = position.vertical_count;
}
static void optc1_enable_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (flags) {
uint32_t stereo_en;
stereo_en = flags->FRAME_PACKED == 0 ? 1 : 0;
if (flags->PROGRAM_STEREO)
REG_UPDATE_3(OTG_STEREO_CONTROL,
OTG_STEREO_EN, stereo_en,
OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
if (flags->PROGRAM_POLARITY)
REG_UPDATE(OTG_STEREO_CONTROL,
OTG_STEREO_EYE_FLAG_POLARITY,
flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
if (flags->DISABLE_STEREO_DP_SYNC)
REG_UPDATE(OTG_STEREO_CONTROL,
OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
if (flags->PROGRAM_STEREO)
REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL,
OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
}
}
void optc1_program_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
if (flags->PROGRAM_STEREO)
optc1_enable_stereo(optc, timing, flags);
else
optc1_disable_stereo(optc);
}
bool optc1_is_stereo_left_eye(struct timing_generator *optc)
{
bool ret = false;
uint32_t left_eye = 0;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OTG_STEREO_STATUS,
OTG_STEREO_CURRENT_EYE, &left_eye);
if (left_eye == 1)
ret = true;
else
ret = false;
return ret;
}
bool optc1_get_hw_timing(struct timing_generator *tg,
struct dc_crtc_timing *hw_crtc_timing)
{
struct dcn_otg_state s = {0};
if (tg == NULL || hw_crtc_timing == NULL)
return false;
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
hw_crtc_timing->h_total = s.h_total + 1;
hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start;
hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
hw_crtc_timing->v_total = s.v_total + 1;
hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start;
hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
return true;
}
void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s)
{
REG_GET(OTG_CONTROL,
OTG_MASTER_EN, &s->otg_enabled);
REG_GET_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, &s->v_blank_start,
OTG_V_BLANK_END, &s->v_blank_end);
REG_GET(OTG_V_SYNC_A_CNTL,
OTG_V_SYNC_A_POL, &s->v_sync_a_pol);
REG_GET(OTG_V_TOTAL,
OTG_V_TOTAL, &s->v_total);
REG_GET(OTG_V_TOTAL_MAX,
OTG_V_TOTAL_MAX, &s->v_total_max);
REG_GET(OTG_V_TOTAL_MIN,
OTG_V_TOTAL_MIN, &s->v_total_min);
REG_GET(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel);
REG_GET(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel);
REG_GET_2(OTG_V_SYNC_A,
OTG_V_SYNC_A_START, &s->v_sync_a_start,
OTG_V_SYNC_A_END, &s->v_sync_a_end);
REG_GET_2(OTG_H_BLANK_START_END,
OTG_H_BLANK_START, &s->h_blank_start,
OTG_H_BLANK_END, &s->h_blank_end);
REG_GET_2(OTG_H_SYNC_A,
OTG_H_SYNC_A_START, &s->h_sync_a_start,
OTG_H_SYNC_A_END, &s->h_sync_a_end);
REG_GET(OTG_H_SYNC_A_CNTL,
OTG_H_SYNC_A_POL, &s->h_sync_a_pol);
REG_GET(OTG_H_TOTAL,
OTG_H_TOTAL, &s->h_total);
REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL,
OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en);
REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION,
OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line);
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en);
REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION,
OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line);
}
bool optc1_get_otg_active_size(struct timing_generator *optc,
uint32_t *otg_active_width,
uint32_t *otg_active_height)
{
uint32_t otg_enabled;
uint32_t v_blank_start;
uint32_t v_blank_end;
uint32_t h_blank_start;
uint32_t h_blank_end;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OTG_CONTROL,
OTG_MASTER_EN, &otg_enabled);
if (otg_enabled == 0)
return false;
REG_GET_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, &v_blank_start,
OTG_V_BLANK_END, &v_blank_end);
REG_GET_2(OTG_H_BLANK_START_END,
OTG_H_BLANK_START, &h_blank_start,
OTG_H_BLANK_END, &h_blank_end);
*otg_active_width = v_blank_start - v_blank_end;
*otg_active_height = h_blank_start - h_blank_end;
return true;
}
void optc1_clear_optc_underflow(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
}
void optc1_tg_init(struct timing_generator *optc)
{
optc1_set_blank_data_double_buffer(optc, true);
optc1_set_timing_double_buffer(optc, true);
optc1_clear_optc_underflow(optc);
}
bool optc1_is_tg_enabled(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t otg_enabled = 0;
REG_GET(OTG_CONTROL, OTG_MASTER_EN, &otg_enabled);
return (otg_enabled != 0);
}
bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t underflow_occurred = 0;
REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
OPTC_UNDERFLOW_OCCURRED_STATUS,
&underflow_occurred);
return (underflow_occurred == 1);
}
bool optc1_configure_crc(struct timing_generator *optc,
const struct crc_params *params)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* Cannot configure crc on a CRTC that is disabled */
if (!optc1_is_tg_enabled(optc))
return false;
REG_WRITE(OTG_CRC_CNTL, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
/* Window A x axis start and end. */
REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
/* Window A y axis start and end. */
REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
/* Window B x axis start and end. */
REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
/* Window B y axis start and end. */
REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
/* Set crc mode and selection, and enable. Only using CRC0*/
REG_UPDATE_3(OTG_CRC_CNTL,
OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
OTG_CRC0_SELECT, params->selection,
OTG_CRC_EN, 1);
return true;
}
/**
* optc1_get_crc - Capture CRC result per component
*
* @optc: timing_generator instance.
* @r_cr: 16-bit primary CRC signature for red data.
* @g_y: 16-bit primary CRC signature for green data.
* @b_cb: 16-bit primary CRC signature for blue data.
*
* This function reads the CRC signature from the OPTC registers. Notice that
* we have three registers to keep the CRC result per color component (RGB).
*
* Returns:
* If CRC is disabled, return false; otherwise, return true, and the CRC
* results in the parameters.
*/
bool optc1_get_crc(struct timing_generator *optc,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t field = 0;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field);
/* Early return if CRC is not enabled for this CRTC */
if (!field)
return false;
/* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
REG_GET_2(OTG_CRC0_DATA_RG,
CRC0_R_CR, r_cr,
CRC0_G_Y, g_y);
/* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
REG_GET(OTG_CRC0_DATA_B,
CRC0_B_CB, b_cb);
return true;
}
static const struct timing_generator_funcs dcn10_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc1_enable_crtc,
.disable_crtc = optc1_disable_crtc,
/* used by enable_timing_synchronization. Not need for FPGA */
.is_counter_moving = optc1_is_counter_moving,
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
.set_blank = optc1_set_blank,
.is_blanked = optc1_is_blanked,
.set_blank_color = optc1_program_blank_color,
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
.enable_reset_trigger = optc1_enable_reset_trigger,
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = NULL,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.set_test_pattern = optc1_set_test_pattern,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
.tg_init = optc1_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
.get_crc = optc1_get_crc,
.configure_crc = optc1_configure_crc,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc1_program_manual_trigger,
.setup_manual_trigger = optc1_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
};
void dcn10_timing_generator_init(struct optc *optc1)
{
optc1->base.funcs = &dcn10_tg_funcs;
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
/* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this:
*
* - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as
* containter rate.
*
* - In 4:2:0 (DSC or uncompressed) there are two pixels per container, hence the target container rate has to be
* halved to maintain the correct pixel rate.
*
* - Unlike 4:2:2 uncompressed, DSC 4:2:2 Native also has two pixels per container (this happens when DSC is applied
* to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well.
*
*/
bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
return two_pix;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "link_encoder.h"
#include "dcn10_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
enc10->base.ctx->logger
#define REG(reg)\
(enc10->link_regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc10->link_shift->field_name, enc10->link_mask->field_name
/*
* @brief
* Trigger Source Select
* ASIC-dependent, actual values for register programming
*/
#define DCN10_DIG_FE_SOURCE_SELECT_INVALID 0x0
#define DCN10_DIG_FE_SOURCE_SELECT_DIGA 0x1
#define DCN10_DIG_FE_SOURCE_SELECT_DIGB 0x2
#define DCN10_DIG_FE_SOURCE_SELECT_DIGC 0x4
#define DCN10_DIG_FE_SOURCE_SELECT_DIGD 0x08
#define DCN10_DIG_FE_SOURCE_SELECT_DIGE 0x10
#define DCN10_DIG_FE_SOURCE_SELECT_DIGF 0x20
#define DCN10_DIG_FE_SOURCE_SELECT_DIGG 0x40
enum {
DP_MST_UPDATE_MAX_RETRY = 50
};
static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = dcn10_link_encoder_hw_init,
.setup = dcn10_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
.enable_dp_output = dcn10_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
.disable_output = dcn10_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
dcn10_link_encoder_update_mst_stream_allocation_table,
.psr_program_dp_dphy_fast_training =
dcn10_psr_program_dp_dphy_fast_training,
.psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
.connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
.enable_hpd = dcn10_link_encoder_enable_hpd,
.disable_hpd = dcn10_link_encoder_disable_hpd,
.is_dig_enabled = dcn10_is_dig_enabled,
.get_dig_frontend = dcn10_get_dig_frontend,
.get_dig_mode = dcn10_get_dig_mode,
.destroy = dcn10_link_encoder_destroy,
.get_max_link_cap = dcn10_link_encoder_get_max_link_cap,
};
static enum bp_result link_transmitter_control(
struct dcn10_link_encoder *enc10,
struct bp_transmitter_control *cntl)
{
enum bp_result result;
struct dc_bios *bp = enc10->base.ctx->dc_bios;
result = bp->funcs->transmitter_control(bp, cntl);
return result;
}
static void enable_phy_bypass_mode(
struct dcn10_link_encoder *enc10,
bool enable)
{
/* This register resides in DP back end block;
* transmitter is used for the offset
*/
REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable);
}
static void disable_prbs_symbols(
struct dcn10_link_encoder *enc10,
bool disable)
{
/* This register resides in DP back end block;
* transmitter is used for the offset
*/
REG_UPDATE_4(DP_DPHY_CNTL,
DPHY_ATEST_SEL_LANE0, disable,
DPHY_ATEST_SEL_LANE1, disable,
DPHY_ATEST_SEL_LANE2, disable,
DPHY_ATEST_SEL_LANE3, disable);
}
static void disable_prbs_mode(
struct dcn10_link_encoder *enc10)
{
REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0);
}
static void program_pattern_symbols(
struct dcn10_link_encoder *enc10,
uint16_t pattern_symbols[8])
{
/* This register resides in DP back end block;
* transmitter is used for the offset
*/
REG_SET_3(DP_DPHY_SYM0, 0,
DPHY_SYM1, pattern_symbols[0],
DPHY_SYM2, pattern_symbols[1],
DPHY_SYM3, pattern_symbols[2]);
/* This register resides in DP back end block;
* transmitter is used for the offset
*/
REG_SET_3(DP_DPHY_SYM1, 0,
DPHY_SYM4, pattern_symbols[3],
DPHY_SYM5, pattern_symbols[4],
DPHY_SYM6, pattern_symbols[5]);
/* This register resides in DP back end block;
* transmitter is used for the offset
*/
REG_SET_2(DP_DPHY_SYM2, 0,
DPHY_SYM7, pattern_symbols[6],
DPHY_SYM8, pattern_symbols[7]);
}
static void set_dp_phy_pattern_d102(
struct dcn10_link_encoder *enc10)
{
/* Disable PHY Bypass mode to setup the test pattern */
enable_phy_bypass_mode(enc10, false);
/* For 10-bit PRBS or debug symbols
* please use the following sequence:
*
* Enable debug symbols on the lanes
*/
disable_prbs_symbols(enc10, true);
/* Disable PRBS mode */
disable_prbs_mode(enc10);
/* Program debug symbols to be output */
{
uint16_t pattern_symbols[8] = {
0x2AA, 0x2AA, 0x2AA, 0x2AA,
0x2AA, 0x2AA, 0x2AA, 0x2AA
};
program_pattern_symbols(enc10, pattern_symbols);
}
/* Enable phy bypass mode to enable the test pattern */
enable_phy_bypass_mode(enc10, true);
}
static void set_link_training_complete(
struct dcn10_link_encoder *enc10,
bool complete)
{
/* This register resides in DP back end block;
* transmitter is used for the offset
*/
REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete);
}
void dcn10_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc,
uint32_t index)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
/* Write Training Pattern */
REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index);
/* Set HW Register Training Complete to false */
set_link_training_complete(enc10, false);
/* Disable PHY Bypass mode to output Training Pattern */
enable_phy_bypass_mode(enc10, false);
/* Disable PRBS mode */
disable_prbs_mode(enc10);
}
static void setup_panel_mode(
struct dcn10_link_encoder *enc10,
enum dp_panel_mode panel_mode)
{
uint32_t value;
if (!REG(DP_DPHY_INTERNAL_CTRL))
return;
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
switch (panel_mode) {
case DP_PANEL_MODE_EDP:
value = 0x1;
break;
case DP_PANEL_MODE_SPECIAL:
value = 0x11;
break;
default:
value = 0x0;
break;
}
REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
}
static void set_dp_phy_pattern_symbol_error(
struct dcn10_link_encoder *enc10)
{
/* Disable PHY Bypass mode to setup the test pattern */
enable_phy_bypass_mode(enc10, false);
/* program correct panel mode*/
setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
/* A PRBS23 pattern is used for most DP electrical measurements. */
/* Enable PRBS symbols on the lanes */
disable_prbs_symbols(enc10, false);
/* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */
REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
DPHY_PRBS_SEL, 1,
DPHY_PRBS_EN, 1);
/* Enable phy bypass mode to enable the test pattern */
enable_phy_bypass_mode(enc10, true);
}
static void set_dp_phy_pattern_prbs7(
struct dcn10_link_encoder *enc10)
{
/* Disable PHY Bypass mode to setup the test pattern */
enable_phy_bypass_mode(enc10, false);
/* A PRBS7 pattern is used for most DP electrical measurements. */
/* Enable PRBS symbols on the lanes */
disable_prbs_symbols(enc10, false);
/* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */
REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
DPHY_PRBS_SEL, 0,
DPHY_PRBS_EN, 1);
/* Enable phy bypass mode to enable the test pattern */
enable_phy_bypass_mode(enc10, true);
}
static void set_dp_phy_pattern_80bit_custom(
struct dcn10_link_encoder *enc10,
const uint8_t *pattern)
{
/* Disable PHY Bypass mode to setup the test pattern */
enable_phy_bypass_mode(enc10, false);
/* Enable debug symbols on the lanes */
disable_prbs_symbols(enc10, true);
/* Enable PHY bypass mode to enable the test pattern */
/* TODO is it really needed ? */
enable_phy_bypass_mode(enc10, true);
/* Program 80 bit custom pattern */
{
uint16_t pattern_symbols[8];
pattern_symbols[0] =
((pattern[1] & 0x03) << 8) | pattern[0];
pattern_symbols[1] =
((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f);
pattern_symbols[2] =
((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f);
pattern_symbols[3] =
(pattern[4] << 2) | ((pattern[3] >> 6) & 0x03);
pattern_symbols[4] =
((pattern[6] & 0x03) << 8) | pattern[5];
pattern_symbols[5] =
((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f);
pattern_symbols[6] =
((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f);
pattern_symbols[7] =
(pattern[9] << 2) | ((pattern[8] >> 6) & 0x03);
program_pattern_symbols(enc10, pattern_symbols);
}
/* Enable phy bypass mode to enable the test pattern */
enable_phy_bypass_mode(enc10, true);
}
static void set_dp_phy_pattern_hbr2_compliance_cp2520_2(
struct dcn10_link_encoder *enc10,
unsigned int cp2520_pattern)
{
/* previously there is a register DP_HBR2_EYE_PATTERN
* that is enabled to get the pattern.
* But it does not work with the latest spec change,
* so we are programming the following registers manually.
*
* The following settings have been confirmed
* by Nick Chorney and Sandra Liu
*/
/* Disable PHY Bypass mode to setup the test pattern */
enable_phy_bypass_mode(enc10, false);
/* Setup DIG encoder in DP SST mode */
enc10->base.funcs->setup(&enc10->base, SIGNAL_TYPE_DISPLAY_PORT);
/* ensure normal panel mode. */
setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
/* no vbid after BS (SR)
* DP_LINK_FRAMING_CNTL changed history Sandra Liu
* 11000260 / 11000104 / 110000FC
*/
REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
DP_IDLE_BS_INTERVAL, 0xFC,
DP_VBID_DISABLE, 1,
DP_VID_ENHANCED_FRAME_MODE, 1);
/* swap every BS with SR */
REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0);
/* select cp2520 patterns */
if (REG(DP_DPHY_HBR2_PATTERN_CONTROL))
REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL,
DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern);
else
/* pre-DCE11 can only generate CP2520 pattern 2 */
ASSERT(cp2520_pattern == 2);
/* set link training complete */
set_link_training_complete(enc10, true);
/* disable video stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
/* Disable PHY Bypass mode to setup the test pattern */
enable_phy_bypass_mode(enc10, false);
}
static void set_dp_phy_pattern_passthrough_mode(
struct dcn10_link_encoder *enc10,
enum dp_panel_mode panel_mode)
{
/* program correct panel mode */
setup_panel_mode(enc10, panel_mode);
/* restore LINK_FRAMING_CNTL and DPHY_SCRAMBLER_BS_COUNT
* in case we were doing HBR2 compliance pattern before
*/
REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
DP_IDLE_BS_INTERVAL, 0x2000,
DP_VBID_DISABLE, 0,
DP_VID_ENHANCED_FRAME_MODE, 1);
REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF);
/* set link training complete */
set_link_training_complete(enc10, true);
/* Disable PHY Bypass mode to setup the test pattern */
enable_phy_bypass_mode(enc10, false);
/* Disable PRBS mode */
disable_prbs_mode(enc10);
}
/* return value is bit-vector */
static uint8_t get_frontend_source(
enum engine_id engine)
{
switch (engine) {
case ENGINE_ID_DIGA:
return DCN10_DIG_FE_SOURCE_SELECT_DIGA;
case ENGINE_ID_DIGB:
return DCN10_DIG_FE_SOURCE_SELECT_DIGB;
case ENGINE_ID_DIGC:
return DCN10_DIG_FE_SOURCE_SELECT_DIGC;
case ENGINE_ID_DIGD:
return DCN10_DIG_FE_SOURCE_SELECT_DIGD;
case ENGINE_ID_DIGE:
return DCN10_DIG_FE_SOURCE_SELECT_DIGE;
case ENGINE_ID_DIGF:
return DCN10_DIG_FE_SOURCE_SELECT_DIGF;
case ENGINE_ID_DIGG:
return DCN10_DIG_FE_SOURCE_SELECT_DIGG;
default:
ASSERT_CRITICAL(false);
return DCN10_DIG_FE_SOURCE_SELECT_INVALID;
}
}
unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
int32_t value;
enum engine_id result;
REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
switch (value) {
case DCN10_DIG_FE_SOURCE_SELECT_DIGA:
result = ENGINE_ID_DIGA;
break;
case DCN10_DIG_FE_SOURCE_SELECT_DIGB:
result = ENGINE_ID_DIGB;
break;
case DCN10_DIG_FE_SOURCE_SELECT_DIGC:
result = ENGINE_ID_DIGC;
break;
case DCN10_DIG_FE_SOURCE_SELECT_DIGD:
result = ENGINE_ID_DIGD;
break;
case DCN10_DIG_FE_SOURCE_SELECT_DIGE:
result = ENGINE_ID_DIGE;
break;
case DCN10_DIG_FE_SOURCE_SELECT_DIGF:
result = ENGINE_ID_DIGF;
break;
case DCN10_DIG_FE_SOURCE_SELECT_DIGG:
result = ENGINE_ID_DIGG;
break;
default:
// invalid source select DIG
result = ENGINE_ID_UNKNOWN;
}
return result;
}
void enc1_configure_encoder(
struct dcn10_link_encoder *enc10,
const struct dc_link_settings *link_settings)
{
/* set number of lanes */
REG_SET(DP_CONFIG, 0,
DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
/* setup scrambler */
REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
}
void dcn10_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
bool exit_link_training_required)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
if (exit_link_training_required)
REG_UPDATE(DP_DPHY_FAST_TRAINING,
DPHY_RX_FAST_TRAINING_CAPABLE, 1);
else {
REG_UPDATE(DP_DPHY_FAST_TRAINING,
DPHY_RX_FAST_TRAINING_CAPABLE, 0);
/*In DCE 11, we are able to pre-program a Force SR register
* to be able to trigger SR symbol after 5 idle patterns
* transmitted. Upon PSR Exit, DMCU can trigger
* DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to
* DPHY_LOAD_BS_COUNT_START and the internal counter
* reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be
* replaced by SR symbol once.
*/
REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5);
}
}
void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
unsigned int sdp_transmit_line_num_deadline)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
REG_UPDATE_2(DP_SEC_CNTL1,
DP_SEC_GSP0_LINE_NUM, sdp_transmit_line_num_deadline,
DP_SEC_GSP0_PRIORITY, 1);
}
bool dcn10_is_dig_enabled(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t value;
REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
return value;
}
static void link_encoder_disable(struct dcn10_link_encoder *enc10)
{
/* reset training pattern */
REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0,
DPHY_TRAINING_PATTERN_SEL, 0);
/* reset training complete */
REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
/* reset panel mode */
setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
}
static void hpd_initialize(
struct dcn10_link_encoder *enc10)
{
/* Associate HPD with DIG_BE */
enum hpd_source_id hpd_source = enc10->base.hpd_source;
REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source);
}
bool dcn10_link_encoder_validate_dvi_output(
const struct dcn10_link_encoder *enc10,
enum signal_type connector_signal,
enum signal_type signal,
const struct dc_crtc_timing *crtc_timing)
{
uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK;
if (signal == SIGNAL_TYPE_DVI_DUAL_LINK)
max_pixel_clock *= 2;
/* This handles the case of HDMI downgrade to DVI we don't want to
* we don't want to cap the pixel clock if the DDI is not DVI.
*/
if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK &&
connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
max_pixel_clock = enc10->base.features.max_hdmi_pixel_clock;
/* DVI only support RGB pixel encoding */
if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
return false;
/*connect DVI via adpater's HDMI connector*/
if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) &&
signal != SIGNAL_TYPE_HDMI_TYPE_A &&
crtc_timing->pix_clk_100hz > (TMDS_MAX_PIXEL_CLOCK * 10))
return false;
if (crtc_timing->pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10))
return false;
if (crtc_timing->pix_clk_100hz > (max_pixel_clock * 10))
return false;
/* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_666:
case COLOR_DEPTH_888:
break;
case COLOR_DEPTH_101010:
case COLOR_DEPTH_161616:
if (signal != SIGNAL_TYPE_DVI_DUAL_LINK)
return false;
break;
default:
return false;
}
return true;
}
static bool dcn10_link_encoder_validate_hdmi_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing,
const struct dc_edid_caps *edid_caps,
int adjusted_pix_clk_100hz)
{
enum dc_color_depth max_deep_color =
enc10->base.features.max_hdmi_deep_color;
// check pixel clock against edid specified max TMDS clk
if (edid_caps->max_tmds_clk_mhz != 0 &&
adjusted_pix_clk_100hz > edid_caps->max_tmds_clk_mhz * 10000)
return false;
if (max_deep_color < crtc_timing->display_color_depth)
return false;
if (crtc_timing->display_color_depth < COLOR_DEPTH_888)
return false;
if (adjusted_pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10))
return false;
if ((adjusted_pix_clk_100hz == 0) ||
(adjusted_pix_clk_100hz > (enc10->base.features.max_hdmi_pixel_clock * 10)))
return false;
/* DCE11 HW does not support 420 */
if (!enc10->base.features.hdmi_ycbcr420_supported &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
if ((!enc10->base.features.flags.bits.HDMI_6GB_EN ||
enc10->base.ctx->dc->debug.hdmi20_disable) &&
adjusted_pix_clk_100hz >= 3000000)
return false;
if (enc10->base.ctx->dc->debug.hdmi20_disable &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
return true;
}
bool dcn10_link_encoder_validate_dp_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing)
{
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
if (!enc10->base.features.dp_ycbcr420_supported)
return false;
}
return true;
}
void dcn10_link_encoder_construct(
struct dcn10_link_encoder *enc10,
const struct encoder_init_data *init_data,
const struct encoder_feature_support *enc_features,
const struct dcn10_link_enc_registers *link_regs,
const struct dcn10_link_enc_aux_registers *aux_regs,
const struct dcn10_link_enc_hpd_registers *hpd_regs,
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask)
{
struct bp_encoder_cap_info bp_cap_info = {0};
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
enc10->base.funcs = &dcn10_lnk_enc_funcs;
enc10->base.ctx = init_data->ctx;
enc10->base.id = init_data->encoder;
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
enc10->base.transmitter = init_data->transmitter;
/* set the flag to indicate whether driver poll the I2C data pin
* while doing the DP sink detect
*/
/* if (dal_adapter_service_is_feature_supported(as,
FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
enc10->base.features.flags.bits.
DP_SINK_DETECT_POLL_DATA_PIN = true;*/
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
SIGNAL_TYPE_LVDS |
SIGNAL_TYPE_DISPLAY_PORT |
SIGNAL_TYPE_DISPLAY_PORT_MST |
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
* DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
* Prefer DIG assignment is decided by board design.
* For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
* and VBIOS will filter out 7 UNIPHY for DCE 8.0.
* By this, adding DIGG should not hurt DCE 8.0.
* This will let DCE 8.1 share DCE 8.0 as much as possible
*/
enc10->link_regs = link_regs;
enc10->aux_regs = aux_regs;
enc10->hpd_regs = hpd_regs;
enc10->link_shift = link_shift;
enc10->link_mask = link_mask;
switch (enc10->base.transmitter) {
case TRANSMITTER_UNIPHY_A:
enc10->base.preferred_engine = ENGINE_ID_DIGA;
break;
case TRANSMITTER_UNIPHY_B:
enc10->base.preferred_engine = ENGINE_ID_DIGB;
break;
case TRANSMITTER_UNIPHY_C:
enc10->base.preferred_engine = ENGINE_ID_DIGC;
break;
case TRANSMITTER_UNIPHY_D:
enc10->base.preferred_engine = ENGINE_ID_DIGD;
break;
case TRANSMITTER_UNIPHY_E:
enc10->base.preferred_engine = ENGINE_ID_DIGE;
break;
case TRANSMITTER_UNIPHY_F:
enc10->base.preferred_engine = ENGINE_ID_DIGF;
break;
case TRANSMITTER_UNIPHY_G:
enc10->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
ASSERT_CRITICAL(false);
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
/* default to one to mirror Windows behavior */
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
enc10->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
if (result == BP_RESULT_OK) {
enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
}
if (enc10->base.ctx->dc->debug.hdmi20_disable) {
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
}
bool dcn10_link_encoder_validate_output_with_stream(
struct link_encoder *enc,
const struct dc_stream_state *stream)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
bool is_valid;
//if SCDC (340-600MHz) is disabled, set to HDMI 1.4 timing limit
if (stream->sink->edid_caps.panel_patch.skip_scdc_overwrite &&
enc10->base.features.max_hdmi_pixel_clock > 300000)
enc10->base.features.max_hdmi_pixel_clock = 300000;
switch (stream->signal) {
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
is_valid = dcn10_link_encoder_validate_dvi_output(
enc10,
stream->link->connector_signal,
stream->signal,
&stream->timing);
break;
case SIGNAL_TYPE_HDMI_TYPE_A:
is_valid = dcn10_link_encoder_validate_hdmi_output(
enc10,
&stream->timing,
&stream->sink->edid_caps,
stream->phy_pix_clk * 10);
break;
case SIGNAL_TYPE_DISPLAY_PORT:
case SIGNAL_TYPE_DISPLAY_PORT_MST:
is_valid = dcn10_link_encoder_validate_dp_output(
enc10, &stream->timing);
break;
case SIGNAL_TYPE_EDP:
is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
break;
case SIGNAL_TYPE_VIRTUAL:
is_valid = true;
break;
default:
is_valid = false;
break;
}
return is_valid;
}
void dcn10_link_encoder_hw_init(
struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
cntl.action = TRANSMITTER_CONTROL_INIT;
cntl.engine_id = ENGINE_ID_UNKNOWN;
cntl.transmitter = enc10->base.transmitter;
cntl.connector_obj_id = enc10->base.connector;
cntl.lanes_number = LANE_COUNT_FOUR;
cntl.coherent = false;
cntl.hpd_sel = enc10->base.hpd_source;
if (enc10->base.connector.id == CONNECTOR_ID_EDP)
cntl.signal = SIGNAL_TYPE_EDP;
result = link_transmitter_control(enc10, &cntl);
if (result != BP_RESULT_OK) {
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
return;
}
if (enc10->base.connector.id == CONNECTOR_ID_LVDS) {
cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS;
result = link_transmitter_control(enc10, &cntl);
ASSERT(result == BP_RESULT_OK);
}
dcn10_aux_initialize(enc10);
/* reinitialize HPD.
* hpd_initialize() will pass DIG_FE id to HW context.
* All other routine within HW context will use fe_engine_offset
* as DIG_FE id even caller pass DIG_FE id.
* So this routine must be called first.
*/
hpd_initialize(enc10);
}
void dcn10_link_encoder_destroy(struct link_encoder **enc)
{
kfree(TO_DCN10_LINK_ENC(*enc));
*enc = NULL;
}
void dcn10_link_encoder_setup(
struct link_encoder *enc,
enum signal_type signal)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
switch (signal) {
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
/* DP SST */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0);
break;
case SIGNAL_TYPE_LVDS:
/* LVDS */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1);
break;
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
/* TMDS-DVI */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2);
break;
case SIGNAL_TYPE_HDMI_TYPE_A:
/* TMDS-HDMI */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3);
break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* DP MST */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
break;
default:
ASSERT_CRITICAL(false);
/* invalid mode ! */
break;
}
}
/* TODO: still need depth or just pass in adjusted pixel clock? */
void dcn10_link_encoder_enable_tmds_output(
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
enum signal_type signal,
uint32_t pixel_clock)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
/* Enable the PHY */
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc10->base.transmitter;
cntl.pll_id = clock_source;
cntl.signal = signal;
if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
cntl.lanes_number = 8;
else
cntl.lanes_number = 4;
cntl.hpd_sel = enc10->base.hpd_source;
cntl.pixel_clock = pixel_clock;
cntl.color_depth = color_depth;
result = link_transmitter_control(enc10, &cntl);
if (result != BP_RESULT_OK) {
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
}
}
void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
enum signal_type signal,
uint32_t pixel_clock)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
dcn10_link_encoder_enable_tmds_output(
enc, clock_source, color_depth, signal, pixel_clock);
REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
}
/* enables DP PHY output */
void dcn10_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
/* Enable the PHY */
/* number_of_lanes is used for pixel clock adjust,
* but it's not passed to asic_control.
* We need to set number of lanes manually.
*/
enc1_configure_encoder(enc10, link_settings);
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc10->base.transmitter;
cntl.pll_id = clock_source;
cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
cntl.lanes_number = link_settings->lane_count;
cntl.hpd_sel = enc10->base.hpd_source;
cntl.pixel_clock = link_settings->link_rate
* LINK_RATE_REF_FREQ_IN_KHZ;
/* TODO: check if undefined works */
cntl.color_depth = COLOR_DEPTH_UNDEFINED;
result = link_transmitter_control(enc10, &cntl);
if (result != BP_RESULT_OK) {
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
}
}
/* enables DP PHY output in MST mode */
void dcn10_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
/* Enable the PHY */
/* number_of_lanes is used for pixel clock adjust,
* but it's not passed to asic_control.
* We need to set number of lanes manually.
*/
enc1_configure_encoder(enc10, link_settings);
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = ENGINE_ID_UNKNOWN;
cntl.transmitter = enc10->base.transmitter;
cntl.pll_id = clock_source;
cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
cntl.lanes_number = link_settings->lane_count;
cntl.hpd_sel = enc10->base.hpd_source;
cntl.pixel_clock = link_settings->link_rate
* LINK_RATE_REF_FREQ_IN_KHZ;
/* TODO: check if undefined works */
cntl.color_depth = COLOR_DEPTH_UNDEFINED;
result = link_transmitter_control(enc10, &cntl);
if (result != BP_RESULT_OK) {
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
}
}
/*
* @brief
* Disable transmitter and its encoder
*/
void dcn10_link_encoder_disable_output(
struct link_encoder *enc,
enum signal_type signal)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
/*in DP_Alt_No_Connect case, we turn off the dig already,
after excuation the PHY w/a sequence, not allow touch PHY any more*/
return;
}
/* Power-down RX and disable GPU PHY should be paired.
* Disabling PHY without powering down RX may cause
* symbol lock loss, on which we will get DP Sink interrupt.
*/
/* There is a case for the DP active dongles
* where we want to disable the PHY but keep RX powered,
* for those we need to ignore DP Sink interrupt
* by checking lane count that has been set
* on the last do_enable_output().
*/
/* disable transmitter */
cntl.action = TRANSMITTER_CONTROL_DISABLE;
cntl.transmitter = enc10->base.transmitter;
cntl.hpd_sel = enc10->base.hpd_source;
cntl.signal = signal;
cntl.connector_obj_id = enc10->base.connector;
result = link_transmitter_control(enc10, &cntl);
if (result != BP_RESULT_OK) {
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
return;
}
/* disable encoder */
if (dc_is_dp_signal(signal))
link_encoder_disable(enc10);
}
void dcn10_link_encoder_dp_set_lane_settings(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
union dpcd_training_lane_set training_lane_set = { { 0 } };
int32_t lane = 0;
struct bp_transmitter_control cntl = { 0 };
if (!link_settings) {
BREAK_TO_DEBUGGER();
return;
}
cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
cntl.transmitter = enc10->base.transmitter;
cntl.connector_obj_id = enc10->base.connector;
cntl.lanes_number = link_settings->lane_count;
cntl.hpd_sel = enc10->base.hpd_source;
cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
for (lane = 0; lane < link_settings->lane_count; lane++) {
/* translate lane settings */
training_lane_set.bits.VOLTAGE_SWING_SET =
lane_settings[lane].VOLTAGE_SWING;
training_lane_set.bits.PRE_EMPHASIS_SET =
lane_settings[lane].PRE_EMPHASIS;
/* post cursor 2 setting only applies to HBR2 link rate */
if (link_settings->link_rate == LINK_RATE_HIGH2) {
/* this is passed to VBIOS
* to program post cursor 2 level
*/
training_lane_set.bits.POST_CURSOR2_SET =
lane_settings[lane].POST_CURSOR2;
}
cntl.lane_select = lane;
cntl.lane_settings = training_lane_set.raw;
/* call VBIOS table to set voltage swing and pre-emphasis */
link_transmitter_control(enc10, &cntl);
}
}
/* set DP PHY test and training patterns */
void dcn10_link_encoder_dp_set_phy_pattern(
struct link_encoder *enc,
const struct encoder_set_dp_phy_pattern_param *param)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
switch (param->dp_phy_pattern) {
case DP_TEST_PATTERN_TRAINING_PATTERN1:
dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
break;
case DP_TEST_PATTERN_TRAINING_PATTERN2:
dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
break;
case DP_TEST_PATTERN_TRAINING_PATTERN3:
dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
break;
case DP_TEST_PATTERN_TRAINING_PATTERN4:
dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3);
break;
case DP_TEST_PATTERN_D102:
set_dp_phy_pattern_d102(enc10);
break;
case DP_TEST_PATTERN_SYMBOL_ERROR:
set_dp_phy_pattern_symbol_error(enc10);
break;
case DP_TEST_PATTERN_PRBS7:
set_dp_phy_pattern_prbs7(enc10);
break;
case DP_TEST_PATTERN_80BIT_CUSTOM:
set_dp_phy_pattern_80bit_custom(
enc10, param->custom_pattern);
break;
case DP_TEST_PATTERN_CP2520_1:
set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 1);
break;
case DP_TEST_PATTERN_CP2520_2:
set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 2);
break;
case DP_TEST_PATTERN_CP2520_3:
set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 3);
break;
case DP_TEST_PATTERN_VIDEO_MODE: {
set_dp_phy_pattern_passthrough_mode(
enc10, param->dp_panel_mode);
break;
}
default:
/* invalid phy pattern */
ASSERT_CRITICAL(false);
break;
}
}
static void fill_stream_allocation_row_info(
const struct link_mst_stream_allocation *stream_allocation,
uint32_t *src,
uint32_t *slots)
{
const struct stream_encoder *stream_enc = stream_allocation->stream_enc;
if (stream_enc) {
*src = stream_enc->id;
*slots = stream_allocation->slot_count;
} else {
*src = 0;
*slots = 0;
}
}
/* programs DP MST VC payload allocation */
void dcn10_link_encoder_update_mst_stream_allocation_table(
struct link_encoder *enc,
const struct link_mst_stream_allocation_table *table)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t value1 = 0;
uint32_t value2 = 0;
uint32_t slots = 0;
uint32_t src = 0;
uint32_t retries = 0;
/* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/
/* --- Set MSE Stream Attribute -
* Setup VC Payload Table on Tx Side,
* Issue allocation change trigger
* to commit payload on both tx and rx side
*/
/* we should clean-up table each time */
if (table->stream_count >= 1) {
fill_stream_allocation_row_info(
&table->stream_allocations[0],
&src,
&slots);
} else {
src = 0;
slots = 0;
}
REG_UPDATE_2(DP_MSE_SAT0,
DP_MSE_SAT_SRC0, src,
DP_MSE_SAT_SLOT_COUNT0, slots);
if (table->stream_count >= 2) {
fill_stream_allocation_row_info(
&table->stream_allocations[1],
&src,
&slots);
} else {
src = 0;
slots = 0;
}
REG_UPDATE_2(DP_MSE_SAT0,
DP_MSE_SAT_SRC1, src,
DP_MSE_SAT_SLOT_COUNT1, slots);
if (table->stream_count >= 3) {
fill_stream_allocation_row_info(
&table->stream_allocations[2],
&src,
&slots);
} else {
src = 0;
slots = 0;
}
REG_UPDATE_2(DP_MSE_SAT1,
DP_MSE_SAT_SRC2, src,
DP_MSE_SAT_SLOT_COUNT2, slots);
if (table->stream_count >= 4) {
fill_stream_allocation_row_info(
&table->stream_allocations[3],
&src,
&slots);
} else {
src = 0;
slots = 0;
}
REG_UPDATE_2(DP_MSE_SAT1,
DP_MSE_SAT_SRC3, src,
DP_MSE_SAT_SLOT_COUNT3, slots);
/* --- wait for transaction finish */
/* send allocation change trigger (ACT) ?
* this step first sends the ACT,
* then double buffers the SAT into the hardware
* making the new allocation active on the DP MST mode link
*/
/* DP_MSE_SAT_UPDATE:
* 0 - No Action
* 1 - Update SAT with trigger
* 2 - Update SAT without trigger
*/
REG_UPDATE(DP_MSE_SAT_UPDATE,
DP_MSE_SAT_UPDATE, 1);
/* wait for update to complete
* (i.e. DP_MSE_SAT_UPDATE field is reset to 0)
* then wait for the transmission
* of at least 16 MTP headers on immediate local link.
* i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0
* a value of 1 indicates that DP MST mode
* is in the 16 MTP keepout region after a VC has been added.
* MST stream bandwidth (VC rate) can be configured
* after this bit is cleared
*/
do {
udelay(10);
REG_READ(DP_MSE_SAT_UPDATE);
REG_GET(DP_MSE_SAT_UPDATE,
DP_MSE_SAT_UPDATE, &value1);
REG_GET(DP_MSE_SAT_UPDATE,
DP_MSE_16_MTP_KEEPOUT, &value2);
/* bit field DP_MSE_SAT_UPDATE is set to 1 already */
if (!value1 && !value2)
break;
++retries;
} while (retries < DP_MST_UPDATE_MAX_RETRY);
}
void dcn10_link_encoder_connect_dig_be_to_fe(
struct link_encoder *enc,
enum engine_id engine,
bool connect)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t field;
if (engine != ENGINE_ID_UNKNOWN) {
REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field);
if (connect)
field |= get_frontend_source(engine);
else
field &= ~get_frontend_source(engine);
REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field);
}
}
#define HPD_REG(reg)\
(enc10->hpd_regs->reg)
#define HPD_REG_READ(reg_name) \
dm_read_reg(CTX, HPD_REG(reg_name))
#define HPD_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
HPD_REG(reg_name), \
n, __VA_ARGS__)
#define HPD_REG_UPDATE(reg_name, field, val) \
HPD_REG_UPDATE_N(reg_name, 1, \
FN(reg_name, field), val)
void dcn10_link_encoder_enable_hpd(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
HPD_REG_UPDATE(DC_HPD_CONTROL,
DC_HPD_EN, 1);
}
void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
HPD_REG_UPDATE(DC_HPD_CONTROL,
DC_HPD_EN, 0);
}
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
#define AUX_REG_READ(reg_name) \
dm_read_reg(CTX, AUX_REG(reg_name))
#define AUX_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
AUX_REG(reg_name), \
n, __VA_ARGS__)
#define AUX_REG_UPDATE(reg_name, field, val) \
AUX_REG_UPDATE_N(reg_name, 1, \
FN(reg_name, field), val)
#define AUX_REG_UPDATE_2(reg, f1, v1, f2, v2) \
AUX_REG_UPDATE_N(reg, 2,\
FN(reg, f1), v1,\
FN(reg, f2), v2)
void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
{
enum hpd_source_id hpd_source = enc10->base.hpd_source;
AUX_REG_UPDATE_2(AUX_CONTROL,
AUX_HPD_SEL, hpd_source,
AUX_LS_READ_EN, 0);
/* 1/4 window (the maximum allowed) */
AUX_REG_UPDATE(AUX_DPHY_RX_CONTROL0,
AUX_RX_RECEIVE_WINDOW, 0);
}
enum signal_type dcn10_get_dig_mode(
struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t value;
REG_GET(DIG_BE_CNTL, DIG_MODE, &value);
switch (value) {
case 1:
return SIGNAL_TYPE_DISPLAY_PORT;
case 2:
return SIGNAL_TYPE_DVI_SINGLE_LINK;
case 3:
return SIGNAL_TYPE_HDMI_TYPE_A;
case 5:
return SIGNAL_TYPE_DISPLAY_PORT_MST;
default:
return SIGNAL_TYPE_NONE;
}
return SIGNAL_TYPE_NONE;
}
void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
/* Set Default link settings */
struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
/* Higher link settings based on feature supported */
if (enc->features.flags.bits.IS_HBR2_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH2;
if (enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3;
if (enc->features.flags.bits.IS_UHBR10_CAPABLE)
max_link_cap.link_rate = LINK_RATE_UHBR10;
if (enc->features.flags.bits.IS_UHBR13_5_CAPABLE)
max_link_cap.link_rate = LINK_RATE_UHBR13_5;
if (enc->features.flags.bits.IS_UHBR20_CAPABLE)
max_link_cap.link_rate = LINK_RATE_UHBR20;
*link_settings = max_link_cap;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn10_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
#define VERT_MAX_TAPS 8
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
enum dcn10_coef_filter_type_sel {
SCL_COEF_LUMA_VERT_FILTER = 0,
SCL_COEF_LUMA_HORZ_FILTER = 1,
SCL_COEF_CHROMA_VERT_FILTER = 2,
SCL_COEF_CHROMA_HORZ_FILTER = 3,
SCL_COEF_ALPHA_VERT_FILTER = 4,
SCL_COEF_ALPHA_HORZ_FILTER = 5
};
enum dscl_autocal_mode {
AUTOCAL_MODE_OFF = 0,
/* Autocal calculate the scaling ratio and initial phase and the
* DSCL_MODE_SEL must be set to 1
*/
AUTOCAL_MODE_AUTOSCALE = 1,
/* Autocal perform auto centering without replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOCENTER = 2,
/* Autocal perform auto centering and auto replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOREPLICATE = 3
};
enum dscl_mode_sel {
DSCL_MODE_SCALING_444_BYPASS = 0,
DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
DSCL_MODE_DSCL_BYPASS = 6
};
static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
return 0; /* 10 bpc */
else if (depth == LB_PIXEL_DEPTH_24BPP)
return 1; /* 8 bpc */
else if (depth == LB_PIXEL_DEPTH_18BPP)
return 2; /* 6 bpc */
else if (depth == LB_PIXEL_DEPTH_36BPP)
return 3; /* 12 bpc */
else {
ASSERT(0);
return -1; /* Unsupported */
}
}
static bool dpp1_dscl_is_video_format(enum pixel_format format)
{
if (format >= PIXEL_FORMAT_VIDEO_BEGIN
&& format <= PIXEL_FORMAT_VIDEO_END)
return true;
else
return false;
}
static bool dpp1_dscl_is_420_format(enum pixel_format format)
{
if (format == PIXEL_FORMAT_420BPP8 ||
format == PIXEL_FORMAT_420BPP10)
return true;
else
return false;
}
static enum dscl_mode_sel dpp1_dscl_get_dscl_mode(
struct dpp *dpp_base,
const struct scaler_data *data,
bool dbg_always_scale)
{
const long long one = dc_fixpt_one.value;
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
if (data->format == PIXEL_FORMAT_FP16)
return DSCL_MODE_DSCL_BYPASS;
}
if (data->ratios.horz.value == one
&& data->ratios.vert.value == one
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !dbg_always_scale)
return DSCL_MODE_SCALING_444_BYPASS;
if (!dpp1_dscl_is_420_format(data->format)) {
if (dpp1_dscl_is_video_format(data->format))
return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
else
return DSCL_MODE_SCALING_444_RGB_ENABLE;
}
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
return DSCL_MODE_SCALING_420_LUMA_BYPASS;
if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
}
static void dpp1_power_on_dscl(
struct dpp *dpp_base,
bool power_on)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) {
if (power_on) {
REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0);
REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
} else {
if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) {
dpp->base.ctx->dc->optimized_required = true;
dpp->base.deferred_reg_writes.bits.disable_dscl = true;
} else {
REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3);
}
}
}
}
static void dpp1_dscl_set_lb(
struct dcn10_dpp *dpp,
const struct line_buffer_params *lb_params,
enum lb_memory_config mem_size_config)
{
uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */
/* LB */
if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL caps: pixel data processed in fixed format */
uint32_t pixel_depth = dpp1_dscl_get_pixel_depth_val(lb_params->depth);
uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth;
REG_SET_7(LB_DATA_FORMAT, 0,
PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */
PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */
PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */
DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */
DITHER_EN, 0, /* Dithering enable: Disabled */
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
} else {
/* DSCL caps: pixel data processed in float format */
REG_SET_2(LB_DATA_FORMAT, 0,
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
if (dpp->base.caps->max_lb_partitions == 31)
max_partitions = 31;
REG_SET_2(LB_MEMORY_CTRL, 0,
MEMORY_CONFIG, mem_size_config,
LB_MAX_PARTITIONS, max_partitions);
}
static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
{
if (taps == 8)
return get_filter_8tap_64p(ratio);
else if (taps == 7)
return get_filter_7tap_64p(ratio);
else if (taps == 6)
return get_filter_6tap_64p(ratio);
else if (taps == 5)
return get_filter_5tap_64p(ratio);
else if (taps == 4)
return get_filter_4tap_64p(ratio);
else if (taps == 3)
return get_filter_3tap_64p(ratio);
else if (taps == 2)
return get_filter_2tap_64p();
else if (taps == 1)
return NULL;
else {
/* should never happen, bug */
BREAK_TO_DEBUGGER();
return NULL;
}
}
static void dpp1_dscl_set_scaler_filter(
struct dcn10_dpp *dpp,
uint32_t taps,
enum dcn10_coef_filter_type_sel filter_type,
const uint16_t *filter)
{
const int tap_pairs = (taps + 1) / 2;
int phase;
int pair;
uint16_t odd_coef, even_coef;
REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0,
SCL_COEF_RAM_TAP_PAIR_IDX, 0,
SCL_COEF_RAM_PHASE, 0,
SCL_COEF_RAM_FILTER_TYPE, filter_type);
for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
for (pair = 0; pair < tap_pairs; pair++) {
even_coef = filter[phase * taps + 2 * pair];
if ((pair * 2 + 1) < taps)
odd_coef = filter[phase * taps + 2 * pair + 1];
else
odd_coef = 0;
REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
/* Even tap coefficient (bits 1:0 fixed to 0) */
SCL_COEF_RAM_EVEN_TAP_COEF, even_coef,
/* Write/read control for even coefficient */
SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1,
/* Odd tap coefficient (bits 1:0 fixed to 0) */
SCL_COEF_RAM_ODD_TAP_COEF, odd_coef,
/* Write/read control for odd coefficient */
SCL_COEF_RAM_ODD_TAP_COEF_EN, 1);
}
}
}
static void dpp1_dscl_set_scl_filter(
struct dcn10_dpp *dpp,
const struct scaler_data *scl_data,
bool chroma_coef_mode)
{
bool h_2tap_hardcode_coef_en = false;
bool v_2tap_hardcode_coef_en = false;
bool h_2tap_sharp_en = false;
bool v_2tap_sharp_en = false;
uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz;
uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert;
bool coef_ram_current;
const uint16_t *filter_h = NULL;
const uint16_t *filter_v = NULL;
const uint16_t *filter_h_c = NULL;
const uint16_t *filter_v_c = NULL;
h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3
&& scl_data->taps.h_taps_c < 3
&& (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1);
v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3
&& scl_data->taps.v_taps_c < 3
&& (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1);
h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0;
v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0;
REG_UPDATE_6(DSCL_2TAP_CONTROL,
SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en,
SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en,
SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor,
SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en,
SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en,
SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor);
if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) {
bool filter_updated = false;
filter_h = dpp1_dscl_get_filter_coeffs_64p(
scl_data->taps.h_taps, scl_data->ratios.horz);
filter_v = dpp1_dscl_get_filter_coeffs_64p(
scl_data->taps.v_taps, scl_data->ratios.vert);
filter_updated = (filter_h && (filter_h != dpp->filter_h))
|| (filter_v && (filter_v != dpp->filter_v));
if (chroma_coef_mode) {
filter_h_c = dpp1_dscl_get_filter_coeffs_64p(
scl_data->taps.h_taps_c, scl_data->ratios.horz_c);
filter_v_c = dpp1_dscl_get_filter_coeffs_64p(
scl_data->taps.v_taps_c, scl_data->ratios.vert_c);
filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c))
|| (filter_v_c && (filter_v_c != dpp->filter_v_c));
}
if (filter_updated) {
uint32_t scl_mode = REG_READ(SCL_MODE);
if (!h_2tap_hardcode_coef_en && filter_h) {
dpp1_dscl_set_scaler_filter(
dpp, scl_data->taps.h_taps,
SCL_COEF_LUMA_HORZ_FILTER, filter_h);
}
dpp->filter_h = filter_h;
if (!v_2tap_hardcode_coef_en && filter_v) {
dpp1_dscl_set_scaler_filter(
dpp, scl_data->taps.v_taps,
SCL_COEF_LUMA_VERT_FILTER, filter_v);
}
dpp->filter_v = filter_v;
if (chroma_coef_mode) {
if (!h_2tap_hardcode_coef_en && filter_h_c) {
dpp1_dscl_set_scaler_filter(
dpp, scl_data->taps.h_taps_c,
SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c);
}
if (!v_2tap_hardcode_coef_en && filter_v_c) {
dpp1_dscl_set_scaler_filter(
dpp, scl_data->taps.v_taps_c,
SCL_COEF_CHROMA_VERT_FILTER, filter_v_c);
}
}
dpp->filter_h_c = filter_h_c;
dpp->filter_v_c = filter_v_c;
coef_ram_current = get_reg_field_value_ex(
scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT,
dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT);
/* Swap coefficient RAM and set chroma coefficient mode */
REG_SET_2(SCL_MODE, scl_mode,
SCL_COEF_RAM_SELECT, !coef_ram_current,
SCL_CHROMA_COEF_MODE, chroma_coef_mode);
}
}
}
static int dpp1_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
return 10;
else if (depth == LB_PIXEL_DEPTH_24BPP)
return 8;
else if (depth == LB_PIXEL_DEPTH_18BPP)
return 6;
else if (depth == LB_PIXEL_DEPTH_36BPP)
return 12;
else {
BREAK_TO_DEBUGGER();
return -1; /* Unsupported */
}
}
void dpp1_dscl_calc_lb_num_partitions(
const struct scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
int *num_part_c)
{
int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
int line_size = scl_data->viewport.width < scl_data->recout.width ?
scl_data->viewport.width : scl_data->recout.width;
int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
scl_data->viewport_c.width : scl_data->recout.width;
if (line_size == 0)
line_size = 1;
if (line_size_c == 0)
line_size_c = 1;
lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
if (lb_config == LB_MEMORY_CONFIG_1) {
lb_memory_size = 816;
lb_memory_size_c = 816;
lb_memory_size_a = 984;
} else if (lb_config == LB_MEMORY_CONFIG_2) {
lb_memory_size = 1088;
lb_memory_size_c = 1088;
lb_memory_size_a = 1312;
} else if (lb_config == LB_MEMORY_CONFIG_3) {
/* 420 mode: using 3rd mem from Y, Cr and Cb */
lb_memory_size = 816 + 1088 + 848 + 848 + 848;
lb_memory_size_c = 816 + 1088;
lb_memory_size_a = 984 + 1312 + 456;
} else {
lb_memory_size = 816 + 1088 + 848;
lb_memory_size_c = 816 + 1088 + 848;
lb_memory_size_a = 984 + 1312 + 456;
}
*num_part_y = lb_memory_size / memory_line_size_y;
*num_part_c = lb_memory_size_c / memory_line_size_c;
num_partitions_a = lb_memory_size_a / memory_line_size_a;
if (scl_data->lb_params.alpha_en
&& (num_partitions_a < *num_part_y))
*num_part_y = num_partitions_a;
if (*num_part_y > 64)
*num_part_y = 64;
if (*num_part_c > 64)
*num_part_c = 64;
}
bool dpp1_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps)
{
if (ceil_vratio > 2)
return vtaps <= (num_partitions - ceil_vratio + 2);
else
return vtaps <= num_partitions;
}
/*find first match configuration which meets the min required lb size*/
static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *dpp,
const struct scaler_data *scl_data)
{
int num_part_y, num_part_c;
int vtaps = scl_data->taps.v_taps;
int vtaps_c = scl_data->taps.v_taps_c;
int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
if (dpp->base.ctx->dc->debug.use_max_lb) {
if (scl_data->format == PIXEL_FORMAT_420BPP8
|| scl_data->format == PIXEL_FORMAT_420BPP10)
return LB_MEMORY_CONFIG_3;
return LB_MEMORY_CONFIG_0;
}
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
&& dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
return LB_MEMORY_CONFIG_1;
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c);
if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
&& dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
return LB_MEMORY_CONFIG_2;
if (scl_data->format == PIXEL_FORMAT_420BPP8
|| scl_data->format == PIXEL_FORMAT_420BPP10) {
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c);
if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
&& dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
return LB_MEMORY_CONFIG_3;
}
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c);
/*Ensure we can support the requested number of vtaps*/
ASSERT(dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
&& dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c));
return LB_MEMORY_CONFIG_0;
}
static void dpp1_dscl_set_manual_ratio_init(
struct dcn10_dpp *dpp, const struct scaler_data *data)
{
uint32_t init_frac = 0;
uint32_t init_int = 0;
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5);
REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5);
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5);
REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5);
/*
* 0.24 format for fraction, first five bits zeroed
*/
init_frac = dc_fixpt_u0d19(data->inits.h) << 5;
init_int = dc_fixpt_floor(data->inits.h);
REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
SCL_H_INIT_FRAC, init_frac,
SCL_H_INIT_INT, init_int);
init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5;
init_int = dc_fixpt_floor(data->inits.h_c);
REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
SCL_H_INIT_FRAC_C, init_frac,
SCL_H_INIT_INT_C, init_int);
init_frac = dc_fixpt_u0d19(data->inits.v) << 5;
init_int = dc_fixpt_floor(data->inits.v);
REG_SET_2(SCL_VERT_FILTER_INIT, 0,
SCL_V_INIT_FRAC, init_frac,
SCL_V_INIT_INT, init_int);
if (REG(SCL_VERT_FILTER_INIT_BOT)) {
struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
init_frac = dc_fixpt_u0d19(bot) << 5;
init_int = dc_fixpt_floor(bot);
REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
SCL_V_INIT_FRAC_BOT, init_frac,
SCL_V_INIT_INT_BOT, init_int);
}
init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5;
init_int = dc_fixpt_floor(data->inits.v_c);
REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
SCL_V_INIT_FRAC_C, init_frac,
SCL_V_INIT_INT_C, init_int);
if (REG(SCL_VERT_FILTER_INIT_BOT_C)) {
struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
init_frac = dc_fixpt_u0d19(bot) << 5;
init_int = dc_fixpt_floor(bot);
REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
SCL_V_INIT_FRAC_BOT_C, init_frac,
SCL_V_INIT_INT_BOT_C, init_int);
}
}
/**
* dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area
*
* @dpp: DPP data struct
* @recout: Rectangle information
*
* This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on
* the values specified in the recount parameter.
*
* Note: This function only have effect if AutoCal is disabled.
*/
static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
const struct rect *recout)
{
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT in the active OTG area */
RECOUT_START_X, recout->x,
/* First line of RECOUT in the active OTG area */
RECOUT_START_Y, recout->y);
REG_SET_2(RECOUT_SIZE, 0,
/* Number of RECOUT horizontal pixels */
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height);
}
/**
* dpp1_dscl_set_scaler_manual_scale - Manually program scaler and line buffer
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
* This is the primary function to program scaler and line buffer in manual
* scaling mode. To execute the required operations for manual scale, we need
* to disable AutoCal first.
*/
void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
const struct scaler_data *scl_data)
{
enum lb_memory_config lb_config;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
return;
PERF_TRACE();
dpp->scl_data = *scl_data;
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) {
if (dscl_mode != DSCL_MODE_DSCL_BYPASS)
dpp1_power_on_dscl(dpp_base, true);
}
/* Autocal off */
REG_SET_3(DSCL_AUTOCAL, 0,
AUTOCAL_MODE, AUTOCAL_MODE_OFF,
AUTOCAL_NUM_PIPE, 0,
AUTOCAL_PIPE_ID, 0);
/*clean scaler boundary mode when Autocal off*/
REG_SET(DSCL_CONTROL, 0,
SCL_BOUNDARY_MODE, 0);
/* Recout */
dpp1_dscl_set_recout(dpp, &scl_data->recout);
/* MPC Size */
REG_SET_2(MPC_SIZE, 0,
/* Number of horizontal pixels of MPC */
MPC_WIDTH, scl_data->h_active,
/* Number of vertical lines of MPC */
MPC_HEIGHT, scl_data->v_active);
/* SCL mode */
REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
if (dscl_mode == DSCL_MODE_DSCL_BYPASS) {
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl)
dpp1_power_on_dscl(dpp_base, false);
return;
}
/* LB */
lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
return;
/* Black offsets */
if (REG(SCL_BLACK_OFFSET)) {
if (ycbcr)
REG_SET_2(SCL_BLACK_OFFSET, 0,
SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
else
REG_SET_2(SCL_BLACK_OFFSET, 0,
SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
}
/* Manually calculate scale ratio and init values */
dpp1_dscl_set_manual_ratio_init(dpp, scl_data);
/* HTaps/VTaps */
REG_SET_4(SCL_TAP_CONTROL, 0,
SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
PERF_TRACE();
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dcn10_mpc.h"
#define REG(reg)\
mpc10->mpc_regs->reg
#define CTX \
mpc10->base.ctx
#undef FN
#define FN(reg_name, field_name) \
mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name
void mpc1_set_bg_color(struct mpc *mpc,
struct tg_color *bg_color,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
uint32_t bg_r_cr, bg_g_y, bg_b_cb;
bottommost_mpcc->blnd_cfg.black_color = *bg_color;
/* find bottommost mpcc. */
while (bottommost_mpcc->mpcc_bot) {
/* avoid circular linked link */
ASSERT(bottommost_mpcc != bottommost_mpcc->mpcc_bot);
if (bottommost_mpcc == bottommost_mpcc->mpcc_bot)
break;
bottommost_mpcc = bottommost_mpcc->mpcc_bot;
}
/* mpc color is 12 bit. tg_color is 10 bit */
/* todo: might want to use 16 bit to represent color and have each
* hw block translate to correct color depth.
*/
bg_r_cr = bg_color->color_r_cr << 2;
bg_g_y = bg_color->color_g_y << 2;
bg_b_cb = bg_color->color_b_cb << 2;
REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_R_CR, bg_r_cr);
REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_G_Y, bg_g_y);
REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_B_CB, bg_b_cb);
}
static void mpc1_update_blending(
struct mpc *mpc,
struct mpcc_blnd_cfg *blnd_cfg,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id);
REG_UPDATE_5(MPCC_CONTROL[mpcc_id],
MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode,
MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha,
MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only,
MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha,
MPCC_GLOBAL_GAIN, blnd_cfg->global_gain);
mpcc->blnd_cfg = *blnd_cfg;
}
void mpc1_update_stereo_mix(
struct mpc *mpc,
struct mpcc_sm_cfg *sm_cfg,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
REG_UPDATE_6(MPCC_SM_CONTROL[mpcc_id],
MPCC_SM_EN, sm_cfg->enable,
MPCC_SM_MODE, sm_cfg->sm_mode,
MPCC_SM_FRAME_ALT, sm_cfg->frame_alt,
MPCC_SM_FIELD_ALT, sm_cfg->field_alt,
MPCC_SM_FORCE_NEXT_FRAME_POL, sm_cfg->force_next_frame_porlarity,
MPCC_SM_FORCE_NEXT_TOP_POL, sm_cfg->force_next_field_polarity);
}
void mpc1_assert_idle_mpcc(struct mpc *mpc, int id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
ASSERT(!(mpc10->mpcc_in_use_mask & 1 << id));
REG_WAIT(MPCC_STATUS[id],
MPCC_IDLE, 1,
1, 100000);
}
struct mpcc *mpc1_get_mpcc(struct mpc *mpc, int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
ASSERT(mpcc_id < mpc10->num_mpcc);
return &(mpc->mpcc_array[mpcc_id]);
}
struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
{
struct mpcc *tmp_mpcc = tree->opp_list;
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
/* avoid circular linked list */
ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
if (tmp_mpcc == tmp_mpcc->mpcc_bot)
break;
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
}
bool mpc1_is_mpcc_idle(struct mpc *mpc, int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
unsigned int top_sel;
unsigned int opp_id;
unsigned int idle;
REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
REG_GET(MPCC_STATUS[mpcc_id], MPCC_IDLE, &idle);
if (top_sel == 0xf && opp_id == 0xf && idle)
return true;
else
return false;
}
void mpc1_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
unsigned int top_sel, mpc_busy, mpc_idle;
REG_GET(MPCC_TOP_SEL[mpcc_id],
MPCC_TOP_SEL, &top_sel);
if (top_sel == 0xf) {
REG_GET_2(MPCC_STATUS[mpcc_id],
MPCC_BUSY, &mpc_busy,
MPCC_IDLE, &mpc_idle);
ASSERT(mpc_busy == 0);
ASSERT(mpc_idle == 1);
}
}
/*
* Insert DPP into MPC tree based on specified blending position.
* Only used for planes that are part of blending chain for OPP output
*
* Parameters:
* [in/out] mpc - MPC context.
* [in/out] tree - MPC tree structure that plane will be added to.
* [in] blnd_cfg - MPCC blending configuration for the new blending layer.
* [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
* stereo mix must disable for the very bottom layer of the tree config.
* [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
* [in] dpp_id - DPP instance for the plane to be added.
* [in] mpcc_id - The MPCC physical instance to use for blending.
*
* Return: struct mpcc* - MPCC that was added.
*/
struct mpcc *mpc1_insert_plane(
struct mpc *mpc,
struct mpc_tree *tree,
struct mpcc_blnd_cfg *blnd_cfg,
struct mpcc_sm_cfg *sm_cfg,
struct mpcc *insert_above_mpcc,
int dpp_id,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
struct mpcc *new_mpcc = NULL;
/* sanity check parameters */
ASSERT(mpcc_id < mpc10->num_mpcc);
ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
if (insert_above_mpcc) {
/* check insert_above_mpcc exist in tree->opp_list */
struct mpcc *temp_mpcc = tree->opp_list;
if (temp_mpcc != insert_above_mpcc)
while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc == NULL)
return NULL;
}
/* Get and update MPCC struct parameters */
new_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
new_mpcc->dpp_id = dpp_id;
/* program mux and MPCC_MODE */
if (insert_above_mpcc) {
new_mpcc->mpcc_bot = insert_above_mpcc;
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, insert_above_mpcc->mpcc_id);
REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING);
} else {
new_mpcc->mpcc_bot = NULL;
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_ONLY);
}
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
/* Configure VUPDATE lock set for this MPCC to map to the OPP */
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
/* update mpc tree mux setting */
if (tree->opp_list == insert_above_mpcc) {
/* insert the toppest mpcc */
tree->opp_list = new_mpcc;
REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, mpcc_id);
} else {
/* find insert position */
struct mpcc *temp_mpcc = tree->opp_list;
while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc && temp_mpcc->mpcc_bot == insert_above_mpcc) {
REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, MPCC_BOT_SEL, mpcc_id);
temp_mpcc->mpcc_bot = new_mpcc;
if (!insert_above_mpcc)
REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id],
MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING);
}
}
/* update the blending configuration */
mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
/* update the stereo mix settings, if provided */
if (sm_cfg != NULL) {
new_mpcc->sm_cfg = *sm_cfg;
mpc1_update_stereo_mix(mpc, sm_cfg, mpcc_id);
}
/* mark this mpcc as in use */
mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
return new_mpcc;
}
/*
* Remove a specified MPCC from the MPC tree.
*
* Parameters:
* [in/out] mpc - MPC context.
* [in/out] tree - MPC tree structure that plane will be removed from.
* [in/out] mpcc - MPCC to be removed from tree.
*
* Return: void
*/
void mpc1_remove_mpcc(
struct mpc *mpc,
struct mpc_tree *tree,
struct mpcc *mpcc_to_remove)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
bool found = false;
int mpcc_id = mpcc_to_remove->mpcc_id;
if (tree->opp_list == mpcc_to_remove) {
found = true;
/* remove MPCC from top of tree */
if (mpcc_to_remove->mpcc_bot) {
/* set the next MPCC in list to be the top MPCC */
tree->opp_list = mpcc_to_remove->mpcc_bot;
REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, tree->opp_list->mpcc_id);
} else {
/* there are no other MPCC is list */
tree->opp_list = NULL;
REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, 0xf);
}
} else {
/* find mpcc to remove MPCC list */
struct mpcc *temp_mpcc = tree->opp_list;
while (temp_mpcc && temp_mpcc->mpcc_bot != mpcc_to_remove)
temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc && temp_mpcc->mpcc_bot == mpcc_to_remove) {
found = true;
temp_mpcc->mpcc_bot = mpcc_to_remove->mpcc_bot;
if (mpcc_to_remove->mpcc_bot) {
/* remove MPCC in middle of list */
REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0,
MPCC_BOT_SEL, mpcc_to_remove->mpcc_bot->mpcc_id);
} else {
/* remove MPCC from bottom of list */
REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0,
MPCC_BOT_SEL, 0xf);
REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id],
MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH);
}
}
}
if (found) {
/* turn off MPCC mux registers */
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
/* mark this mpcc as not in use */
mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
mpcc_to_remove->dpp_id = 0xf;
mpcc_to_remove->mpcc_bot = NULL;
} else {
/* In case of resume from S3/S4, remove mpcc from bios left over */
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
}
}
static void mpc1_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
{
mpcc->mpcc_id = mpcc_inst;
mpcc->dpp_id = 0xf;
mpcc->mpcc_bot = NULL;
mpcc->blnd_cfg.overlap_only = false;
mpcc->blnd_cfg.global_alpha = 0xff;
mpcc->blnd_cfg.global_gain = 0xff;
mpcc->sm_cfg.enable = false;
}
/*
* Reset the MPCC HW status by disconnecting all muxes.
*
* Parameters:
* [in/out] mpc - MPC context.
*
* Return: void
*/
void mpc1_mpc_init(struct mpc *mpc)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
int mpcc_id;
int opp_id;
mpc10->mpcc_in_use_mask = 0;
for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) {
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
}
for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
if (REG(MUX[opp_id]))
REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf);
}
}
void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
int opp_id;
REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf);
}
void mpc1_init_mpcc_list_from_hw(
struct mpc *mpc,
struct mpc_tree *tree)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
unsigned int opp_id;
unsigned int top_sel;
unsigned int bot_sel;
unsigned int out_mux;
struct mpcc *mpcc;
int mpcc_id;
int bot_mpcc_id;
REG_GET(MUX[tree->opp_id], MPC_OUT_MUX, &out_mux);
if (out_mux != 0xf) {
for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) {
REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
REG_GET(MPCC_BOT_SEL[mpcc_id], MPCC_BOT_SEL, &bot_sel);
if (bot_sel == mpcc_id)
bot_sel = 0xf;
if ((opp_id == tree->opp_id) && (top_sel != 0xf)) {
mpcc = mpc1_get_mpcc(mpc, mpcc_id);
mpcc->dpp_id = top_sel;
mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
if (out_mux == mpcc_id)
tree->opp_list = mpcc;
if (bot_sel != 0xf && bot_sel < mpc10->num_mpcc) {
bot_mpcc_id = bot_sel;
REG_GET(MPCC_OPP_ID[bot_mpcc_id], MPCC_OPP_ID, &opp_id);
REG_GET(MPCC_TOP_SEL[bot_mpcc_id], MPCC_TOP_SEL, &top_sel);
if ((opp_id == tree->opp_id) && (top_sel != 0xf)) {
struct mpcc *mpcc_bottom = mpc1_get_mpcc(mpc, bot_mpcc_id);
mpcc->mpcc_bot = mpcc_bottom;
}
}
}
}
}
}
void mpc1_read_mpcc_state(
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
MPCC_BUSY, &s->busy);
}
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
}
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
uint32_t val = 0xf;
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
return val;
}
static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
.wait_for_idle = mpc1_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.update_blending = mpc1_update_blending,
.cursor_lock = mpc1_cursor_lock,
.set_denorm = NULL,
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
.set_output_gamma = NULL,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
struct dc_context *ctx,
const struct dcn_mpc_registers *mpc_regs,
const struct dcn_mpc_shift *mpc_shift,
const struct dcn_mpc_mask *mpc_mask,
int num_mpcc)
{
int i;
mpc10->base.ctx = ctx;
mpc10->base.funcs = &dcn10_mpc_funcs;
mpc10->mpc_regs = mpc_regs;
mpc10->mpc_shift = mpc_shift;
mpc10->mpc_mask = mpc_mask;
mpc10->mpcc_in_use_mask = 0;
mpc10->num_mpcc = num_mpcc;
for (i = 0; i < MAX_MPCC; i++)
mpc1_init_mpcc(&mpc10->base.mpcc_array[i], i);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/delay.h>
#include "dm_services.h"
#include "basics/dc_common.h"
#include "core_types.h"
#include "resource.h"
#include "custom_float.h"
#include "dcn10_hw_sequencer.h"
#include "dcn10_hw_sequencer_debug.h"
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
#include "dcn10_optc.h"
#include "dcn10_dpp.h"
#include "dcn10_mpc.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
#include "mpc.h"
#include "reg_helper.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
#include "dccg.h"
#include "clk_mgr.h"
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dsc.h"
#include "dce/dmub_psr.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
#include "link.h"
#define DC_LOGGER_INIT(logger)
#define CTX \
hws->ctx
#define REG(reg)\
hws->regs->reg
#undef FN
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
/*print is 17 wide, first two characters are spaces*/
#define DTN_INFO_MICRO_SEC(ref_cycle) \
print_microsec(dc_ctx, log_ctx, ref_cycle)
#define GAMMA_HW_POINTS_NUM 256
#define PGFSM_POWER_ON 0
#define PGFSM_POWER_OFF 2
static void print_microsec(struct dc_context *dc_ctx,
struct dc_log_buffer_ctx *log_ctx,
uint32_t ref_cycle)
{
const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
DTN_INFO(" %11d.%03d",
us_x10 / frac,
us_x10 % frac);
}
void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
{
struct pipe_ctx *pipe_ctx;
struct pipe_ctx *old_pipe_ctx;
struct timing_generator *tg;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/*
* Only lock the top pipe's tg to prevent redundant
* (un)locking. Also skip if pipe is disabled.
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
(!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
!tg->funcs->is_tg_enabled(tg))
continue;
if (lock)
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
else
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
}
static void log_mpc_crc(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct dce_hwseq *hws = dc->hwseq;
if (REG(MPC_CRC_RESULT_GB))
DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
}
static void dcn10_log_hubbub_state(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct dcn_hubbub_wm wm;
int i;
memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
" sr_enter sr_exit dram_clk_change\n");
for (i = 0; i < 4; i++) {
struct dcn_hubbub_wm_set *s;
s = &wm.sets[i];
DTN_INFO("WM_Set[%d]:", s->wm_set);
DTN_INFO_MICRO_SEC(s->data_urgent);
DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
DTN_INFO_MICRO_SEC(s->sr_enter);
DTN_INFO_MICRO_SEC(s->sr_exit);
DTN_INFO_MICRO_SEC(s->dram_clk_change);
DTN_INFO("\n");
}
DTN_INFO("\n");
}
static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
int i;
DTN_INFO(
"HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
for (i = 0; i < pool->pipe_count; i++) {
struct hubp *hubp = pool->hubps[i];
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
hubp->funcs->hubp_read_state(hubp);
if (!s->blank_en) {
DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
hubp->inst,
s->pixel_format,
s->inuse_addr_hi,
s->viewport_width,
s->viewport_height,
s->rotation_angle,
s->h_mirror_en,
s->sw_mode,
s->dcc_en,
s->blank_en,
s->clock_en,
s->ttu_disable,
s->underflow_status);
DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
DTN_INFO("\n");
}
}
DTN_INFO("\n=========RQ========\n");
DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
" min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
" min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
}
DTN_INFO("========DLG========\n");
DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
" dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
" vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
" rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
" mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
" rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
" x_rp_dlay x_rr_sfl\n");
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
" %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
" %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
dlg_regs->xfc_reg_remote_surface_flip_latency);
}
DTN_INFO("========TTU========\n");
DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
" rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
" qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
}
DTN_INFO("\n");
}
void dcn10_log_hw_state(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
int i;
DTN_INFO_BEGIN();
dcn10_log_hubbub_state(dc, log_ctx);
dcn10_log_hubp_states(dc, log_ctx);
DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
" GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
"C31 C32 C33 C34\n");
for (i = 0; i < pool->pipe_count; i++) {
struct dpp *dpp = pool->dpps[i];
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
if (!s.is_enabled)
continue;
DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
"%8x %08xh %08xh %08xh %08xh %08xh %08xh",
dpp->inst,
s.igam_input_format,
(s.igam_lut_mode == 0) ? "BypassFixed" :
((s.igam_lut_mode == 1) ? "BypassFloat" :
((s.igam_lut_mode == 2) ? "RAM" :
((s.igam_lut_mode == 3) ? "RAM" :
"Unknown"))),
(s.dgam_lut_mode == 0) ? "Bypass" :
((s.dgam_lut_mode == 1) ? "sRGB" :
((s.dgam_lut_mode == 2) ? "Ycc" :
((s.dgam_lut_mode == 3) ? "RAM" :
((s.dgam_lut_mode == 4) ? "RAM" :
"Unknown")))),
(s.rgam_lut_mode == 0) ? "Bypass" :
((s.rgam_lut_mode == 1) ? "sRGB" :
((s.rgam_lut_mode == 2) ? "Ycc" :
((s.rgam_lut_mode == 3) ? "RAM" :
((s.rgam_lut_mode == 4) ? "RAM" :
"Unknown")))),
s.gamut_remap_mode,
s.gamut_remap_c11_c12,
s.gamut_remap_c13_c14,
s.gamut_remap_c21_c22,
s.gamut_remap_c23_c24,
s.gamut_remap_c31_c32,
s.gamut_remap_c33_c34);
DTN_INFO("\n");
}
DTN_INFO("\n");
DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
for (i = 0; i < pool->pipe_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
if (s.opp_id != 0xf)
DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
s.idle);
}
DTN_INFO("\n");
DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
for (i = 0; i < pool->timing_generator_count; i++) {
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
/* Read shared OTG state registers for all DCNx */
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
/*
* For DCN2 and greater, a register on the OPP is used to
* determine if the CRTC is blanked instead of the OTG. So use
* dpg_is_blanked() if exists, otherwise fallback on otg.
*
* TODO: Implement DCN-specific read_otg_state hooks.
*/
if (pool->opps[i]->funcs->dpg_is_blanked)
s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
else
s.blank_enabled = tg->funcs->is_blanked(tg);
//only print if OTG master is enabled
if ((s.otg_enabled & 1) == 0)
continue;
DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
tg->inst,
s.v_blank_start,
s.v_blank_end,
s.v_sync_a_start,
s.v_sync_a_end,
s.v_sync_a_pol,
s.v_total_max,
s.v_total_min,
s.v_total_max_sel,
s.v_total_min_sel,
s.h_blank_start,
s.h_blank_end,
s.h_sync_a_start,
s.h_sync_a_end,
s.h_sync_a_pol,
s.h_total,
s.v_total,
s.underflow_occurred_status,
s.blank_enabled);
// Clear underflow for debug purposes
// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
// This function is called only from Windows or Diags test environment, hence it's safe to clear
// it from here without affecting the original intent.
tg->funcs->clear_optc_underflow(tg);
}
DTN_INFO("\n");
// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
// TODO: Update golden log header to reflect this name change
DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
for (i = 0; i < pool->res_cap->num_dsc; i++) {
struct display_stream_compressor *dsc = pool->dscs[i];
struct dcn_dsc_state s = {0};
dsc->funcs->dsc_read_state(dsc, &s);
DTN_INFO("[%d]: %-9d %-12d %-10d\n",
dsc->inst,
s.dsc_clock_en,
s.dsc_slice_width,
s.dsc_bits_per_pixel);
DTN_INFO("\n");
}
DTN_INFO("\n");
DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
" VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
for (i = 0; i < pool->stream_enc_count; i++) {
struct stream_encoder *enc = pool->stream_enc[i];
struct enc_state s = {0};
if (enc->funcs->enc_read_state) {
enc->funcs->enc_read_state(enc, &s);
DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
enc->id,
s.dsc_mode,
s.sec_gsp_pps_line_num,
s.vbid6_line_reference,
s.vbid6_line_num,
s.sec_gsp_pps_enable,
s.sec_stream_enable);
DTN_INFO("\n");
}
}
DTN_INFO("\n");
DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
for (i = 0; i < dc->link_count; i++) {
struct link_encoder *lenc = dc->links[i]->link_enc;
struct link_enc_state s = {0};
if (lenc && lenc->funcs->read_state) {
lenc->funcs->read_state(lenc, &s);
DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
i,
s.dphy_fec_en,
s.dphy_fec_ready_shadow,
s.dphy_fec_active_status,
s.dp_link_training_complete);
DTN_INFO("\n");
}
}
DTN_INFO("\n");
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
log_mpc_crc(dc, log_ctx);
{
if (pool->hpo_dp_stream_enc_count > 0) {
DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
hpo_dp_se_state.stream_enc_enabled,
hpo_dp_se_state.otg_inst,
(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
(hpo_dp_se_state.component_depth == 0) ? 6 :
((hpo_dp_se_state.component_depth == 1) ? 8 :
(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
hpo_dp_se_state.vid_stream_enabled,
hpo_dp_se_state.sdp_enabled,
hpo_dp_se_state.compressed_format,
hpo_dp_se_state.mapped_to_link_enc);
}
}
DTN_INFO("\n");
}
/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
if (pool->hpo_dp_link_enc_count) {
DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
if (hpo_dp_link_enc->funcs->read_state) {
hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
hpo_dp_link_enc->inst,
hpo_dp_le_state.link_enc_enabled,
(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
hpo_dp_le_state.lane_count,
hpo_dp_le_state.stream_src[0],
hpo_dp_le_state.slot_count[0],
hpo_dp_le_state.vc_rate_x[0],
hpo_dp_le_state.vc_rate_y[0]);
DTN_INFO("\n");
}
}
DTN_INFO("\n");
}
}
DTN_INFO_END();
}
bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
if (tg->funcs->is_optc_underflow_occurred(tg)) {
tg->funcs->clear_optc_underflow(tg);
return true;
}
if (hubp->funcs->hubp_get_underflow_status(hubp)) {
hubp->funcs->hubp_clear_underflow(hubp);
return true;
}
return false;
}
void dcn10_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
bool force_on = true; /* disable power gating */
if (enable)
force_on = false;
/* DCHUBP0/1/2/3 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
/* DPP0/1/2/3 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
}
void dcn10_disable_vga(
struct dce_hwseq *hws)
{
unsigned int in_vga1_mode = 0;
unsigned int in_vga2_mode = 0;
unsigned int in_vga3_mode = 0;
unsigned int in_vga4_mode = 0;
REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
in_vga3_mode == 0 && in_vga4_mode == 0)
return;
REG_WRITE(D1VGA_CONTROL, 0);
REG_WRITE(D2VGA_CONTROL, 0);
REG_WRITE(D3VGA_CONTROL, 0);
REG_WRITE(D4VGA_CONTROL, 0);
/* HW Engineer's Notes:
* During switch from vga->extended, if we set the VGA_TEST_ENABLE and
* then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
*
* Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
* VGA_TEST_ENABLE, to leave it in the same state as before.
*/
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
}
/**
* dcn10_dpp_pg_control - DPP power gate control.
*
* @hws: dce_hwseq reference.
* @dpp_inst: DPP instance reference.
* @power_on: true if we want to enable power gate, false otherwise.
*
* Enable or disable power gate in the specific DPP instance.
*/
void dcn10_dpp_pg_control(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on)
{
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
if (hws->ctx->dc->debug.disable_dpp_power_gate)
return;
if (REG(DOMAIN1_PG_CONFIG) == 0)
return;
switch (dpp_inst) {
case 0: /* DPP0 */
REG_UPDATE(DOMAIN1_PG_CONFIG,
DOMAIN1_POWER_GATE, power_gate);
REG_WAIT(DOMAIN1_PG_STATUS,
DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 1: /* DPP1 */
REG_UPDATE(DOMAIN3_PG_CONFIG,
DOMAIN3_POWER_GATE, power_gate);
REG_WAIT(DOMAIN3_PG_STATUS,
DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 2: /* DPP2 */
REG_UPDATE(DOMAIN5_PG_CONFIG,
DOMAIN5_POWER_GATE, power_gate);
REG_WAIT(DOMAIN5_PG_STATUS,
DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 3: /* DPP3 */
REG_UPDATE(DOMAIN7_PG_CONFIG,
DOMAIN7_POWER_GATE, power_gate);
REG_WAIT(DOMAIN7_PG_STATUS,
DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
/**
* dcn10_hubp_pg_control - HUBP power gate control.
*
* @hws: dce_hwseq reference.
* @hubp_inst: DPP instance reference.
* @power_on: true if we want to enable power gate, false otherwise.
*
* Enable or disable power gate in the specific HUBP instance.
*/
void dcn10_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
bool power_on)
{
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
if (hws->ctx->dc->debug.disable_hubp_power_gate)
return;
if (REG(DOMAIN0_PG_CONFIG) == 0)
return;
switch (hubp_inst) {
case 0: /* DCHUBP0 */
REG_UPDATE(DOMAIN0_PG_CONFIG,
DOMAIN0_POWER_GATE, power_gate);
REG_WAIT(DOMAIN0_PG_STATUS,
DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 1: /* DCHUBP1 */
REG_UPDATE(DOMAIN2_PG_CONFIG,
DOMAIN2_POWER_GATE, power_gate);
REG_WAIT(DOMAIN2_PG_STATUS,
DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 2: /* DCHUBP2 */
REG_UPDATE(DOMAIN4_PG_CONFIG,
DOMAIN4_POWER_GATE, power_gate);
REG_WAIT(DOMAIN4_PG_STATUS,
DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 3: /* DCHUBP3 */
REG_UPDATE(DOMAIN6_PG_CONFIG,
DOMAIN6_POWER_GATE, power_gate);
REG_WAIT(DOMAIN6_PG_STATUS,
DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
static void power_on_plane_resources(
struct dce_hwseq *hws,
int plane_id)
{
DC_LOGGER_INIT(hws->ctx->logger);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, plane_id, true);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, plane_id, true);
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, plane_id, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
"Un-gated front end for pipe %d\n", plane_id);
}
}
static void undo_DEGVIDCN10_253_wa(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = dc->res_pool->hubps[0];
if (!hws->wa_state.DEGVIDCN10_253_applied)
return;
hubp->funcs->set_blank(hubp, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
hws->funcs.hubp_pg_control(hws, 0, false);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
hws->wa_state.DEGVIDCN10_253_applied = false;
}
static void apply_DEGVIDCN10_253_wa(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = dc->res_pool->hubps[0];
int i;
if (dc->debug.disable_stutter)
return;
if (!hws->wa.DEGVIDCN10_253)
return;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!dc->res_pool->hubps[i]->power_gated)
return;
}
/* all pipe power gated, apply work around to enable stutter. */
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
hws->funcs.hubp_pg_control(hws, 0, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
hubp->funcs->set_hubp_blank_en(hubp, false);
hws->wa_state.DEGVIDCN10_253_applied = true;
}
void dcn10_bios_golden_init(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *bp = dc->ctx->dc_bios;
int i;
bool allow_self_fresh_force_enable = true;
if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
return;
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
allow_self_fresh_force_enable =
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
/* WA for making DF sleep when idle after resume from S0i3.
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
* command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
* before calling command table and it changed to 1 after,
* it should be set back to 0.
*/
/* initialize dcn global */
bp->funcs->enable_disp_power_gating(bp,
CONTROLLER_ID_D0, ASIC_PIPE_INIT);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
/* initialize dcn per pipe */
bp->funcs->enable_disp_power_gating(bp,
CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
}
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
if (allow_self_fresh_force_enable == false &&
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
static void false_optc_underflow_wa(
struct dc *dc,
const struct dc_stream_state *stream,
struct timing_generator *tg)
{
int i;
bool underflow;
if (!dc->hwseq->wa.false_optc_underflow)
return;
underflow = tg->funcs->is_optc_underflow_occurred(tg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (old_pipe_ctx->stream != stream)
continue;
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
}
if (tg->funcs->set_blank_data_double_buffer)
tg->funcs->set_blank_data_double_buffer(tg, true);
if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
tg->funcs->clear_optc_underflow(tg);
}
static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
{
struct pipe_ctx *other_pipe;
int vready_offset = pipe->pipe_dlg_param.vready_offset;
/* Always use the largest vready_offset of all connected pipes */
for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
vready_offset = other_pipe->pipe_dlg_param.vready_offset;
}
for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
vready_offset = other_pipe->pipe_dlg_param.vready_offset;
}
for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
vready_offset = other_pipe->pipe_dlg_param.vready_offset;
}
for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
vready_offset = other_pipe->pipe_dlg_param.vready_offset;
}
return vready_offset;
}
enum dc_status dcn10_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
{
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space;
struct tg_color black_color = {0};
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
* with pipe 0. No program is needed.
*/
if (pipe_ctx->top_pipe != NULL)
return DC_OK;
/* TODO check if timing_changed, disable stream if timing changed */
/* HW program guide assume display already disable
* by unplug sequence. OTG assume stop.
*/
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
}
if (dc_is_hdmi_tmds_signal(stream->signal)) {
stream->link->phy_state.symclk_ref_cnts.otg = 1;
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
else
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
}
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
pipe_ctx->stream->signal,
true);
#if 0 /* move to after enable_crtc */
/* TODO: OPP FMT, ABM. etc. should be done here. */
/* or FPGA now. instance 0 only. TODO: move to opp.c */
inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
pipe_ctx->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
#endif
/* program otg blank color */
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
/*
* The way 420 is packed, 2 channels carry Y component, 1 channel
* alternate between Cb and Cr, so both channels need the pixel
* value for Y
*/
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
black_color.color_r_cr = black_color.color_g_y;
if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
pipe_ctx->stream_res.tg->funcs->set_blank_color(
pipe_ctx->stream_res.tg,
&black_color);
if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
}
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
}
/* TODO program crtc source select for non-virtual signal*/
/* TODO program FMT */
/* TODO setup link_enc */
/* TODO set stream attributes */
/* TODO program audio */
/* TODO enable stream if timing changed */
/* TODO unblank stream if DP */
return DC_OK;
}
static void dcn10_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
int i;
struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
}
link = pipe_ctx->stream->link;
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
* feature. When system resume from S4 with second
* screen only, the dpms_off would be true but
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
if (pipe_ctx->stream_res.audio) {
/*disable az_endpoint*/
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
/*free audio*/
if (dc->caps.dynamic_audio == true) {
/*we have to dynamic arbitrate the audio endpoints*/
/*we free the resource, need reset is_audio_acquired*/
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
if (pipe_ctx->stream_res.abm)
dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
break;
if (i == dc->res_pool->pipe_count)
return;
pipe_ctx->stream = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
static bool dcn10_hw_wa_force_recovery(struct dc *dc)
{
struct hubp *hubp ;
unsigned int i;
bool need_recover = true;
if (!dc->debug.recovery_enabled)
return false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
/* one pipe underflow, we will reset all the pipes*/
need_recover = true;
}
}
}
}
if (!need_recover)
return false;
/*
DCHUBP_CNTL:HUBP_BLANK_EN=1
DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
DCHUBP_CNTL:HUBP_DISABLE=1
DCHUBP_CNTL:HUBP_DISABLE=0
DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
DCSURF_PRIMARY_SURFACE_ADDRESS
DCHUBP_CNTL:HUBP_BLANK_EN=0
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
}
/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
hubbub1_soft_reset(dc->res_pool->hubbub, true);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_DISABLE=1*/
if (hubp != NULL && hubp->funcs->hubp_disable_control)
hubp->funcs->hubp_disable_control(hubp, true);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_DISABLE=0*/
if (hubp != NULL && hubp->funcs->hubp_disable_control)
hubp->funcs->hubp_disable_control(hubp, true);
}
}
/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
hubbub1_soft_reset(dc->res_pool->hubbub, false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
}
return true;
}
void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
static bool should_log_hw_state; /* prevent hw state log by default */
if (!hubbub->funcs->verify_allow_pstate_change_high)
return;
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
int i = 0;
if (should_log_hw_state)
dcn10_log_hw_state(dc, NULL);
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
BREAK_TO_DEBUGGER();
if (dcn10_hw_wa_force_recovery(dc)) {
/*check again*/
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
BREAK_TO_DEBUGGER();
}
}
}
/* trigger HW to start disconnect plane from stream on the next vsync */
void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
int dpp_id = pipe_ctx->plane_res.dpp->inst;
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params;
struct mpcc *mpcc_to_remove = NULL;
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
mpc_tree_params = &(opp->mpc_tree_params);
mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
/*Already reset*/
if (mpcc_to_remove == NULL)
return;
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
// so don't wait for MPCC_IDLE in the programming sequence
if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
if (hubp->funcs->hubp_disconnect)
hubp->funcs->hubp_disconnect(hubp);
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
}
/**
* dcn10_plane_atomic_power_down - Power down plane components.
*
* @dc: dc struct reference. used for grab hwseq.
* @dpp: dpp struct reference.
* @hubp: hubp struct reference.
*
* Keep in mind that this operation requires a power gate configuration;
* however, requests for switch power gate are precisely controlled to avoid
* problems. For this reason, power gate request is usually disabled. This
* function first needs to enable the power gate request before disabling DPP
* and HUBP. Finally, it disables the power gate request again.
*/
void dcn10_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp)
{
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
"Power gated front end %d\n", hubp->inst);
}
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
/* disable HW used by plane.
* note: cannot disable until disconnect is complete
*/
void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
int opp_id = hubp->opp_id;
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
false);
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
pipe_ctx->plane_res.hubp);
pipe_ctx->stream = NULL;
memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
pipe_ctx->top_pipe = NULL;
pipe_ctx->bottom_pipe = NULL;
pipe_ctx->plane_state = NULL;
}
void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
hws->funcs.plane_atomic_disable(dc, pipe_ctx);
apply_DEGVIDCN10_253_wa(dc);
DC_LOG_DC("Power down front end %d\n",
pipe_ctx->pipe_idx);
}
void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
{
int i;
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
bool can_apply_seamless_boot = false;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization) {
can_apply_seamless_boot = true;
break;
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* There is assumption that pipe_ctx is not mapping irregularly
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
continue;
/* Blank controller using driver code instead of
* command table.
*/
if (tg->funcs->is_tg_enabled(tg)) {
if (hws->funcs.init_blank != NULL) {
hws->funcs.init_blank(dc, tg);
tg->funcs->lock(tg);
} else {
tg->funcs->lock(tg);
tg->funcs->set_blank(tg, true);
hwss_wait_for_blank_complete(tg);
}
}
}
/* Reset det size */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = dc->res_pool->hubps[i];
/* Do not need to reset for seamless boot */
if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
continue;
if (hubbub && hubp) {
if (hubbub->funcs->program_det_size)
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
}
}
/* num_opp will be equal to number of mpcc */
for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* Cannot reset the MPC mux if seamless boot */
if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
continue;
dc->res_pool->mpc->funcs->mpc_init_single_inst(
dc->res_pool->mpc, i);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct hubp *hubp = dc->res_pool->hubps[i];
struct dpp *dpp = dc->res_pool->dpps[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* There is assumption that pipe_ctx is not mapping irregularly
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
if (can_apply_seamless_boot &&
pipe_ctx->stream != NULL &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
pipe_ctx->stream_res.tg)) {
// Enable double buffering for OTG_BLANK no matter if
// seamless boot is enabled or not to suppress global sync
// signals when OTG blanked. This is to prevent pipe from
// requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
continue;
}
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
pipe_ctx->pipe_idx = i;
pipe_ctx->plane_res.hubp = hubp;
pipe_ctx->plane_res.dpp = dpp;
pipe_ctx->plane_res.mpcc_inst = dpp->inst;
hubp->mpcc_id = dpp->inst;
hubp->opp_id = OPP_ID_INVALID;
hubp->power_gated = false;
dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
dc->hwss.disable_plane(dc, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
if (tg->funcs->is_tg_enabled(tg)) {
if (tg->funcs->init_odm)
tg->funcs->init_odm(tg);
}
tg->funcs->tg_init(tg);
}
/* Power gate DSCs */
if (hws->funcs.dsc_pg_control != NULL) {
uint32_t num_opps = 0;
uint32_t opp_id_src0 = OPP_ID_INVALID;
uint32_t opp_id_src1 = OPP_ID_INVALID;
// Step 1: To find out which OPTC is running & OPTC DSC is ON
// We can't use res_pool->res_cap->num_timing_generator to check
// Because it records display pipes default setting built in driver,
// not display pipes of the current chip.
// Some ASICs would be fused display pipes less than the default setting.
// In dcnxx_resource_construct function, driver would obatin real information.
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
uint32_t optc_dsc_state = 0;
struct timing_generator *tg = dc->res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg)) {
if (tg->funcs->get_dsc_status)
tg->funcs->get_dsc_status(tg, &optc_dsc_state);
// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
// non-zero value is DSC enabled
if (optc_dsc_state != 0) {
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
break;
}
}
}
// Step 2: To power down DSC but skip DSC of running OPTC
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
struct dcn_dsc_state s = {0};
dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
s.dsc_clock_en && s.dsc_fw_en)
continue;
hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
}
}
}
void dcn10_init_hw(struct dc *dc)
{
int i;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
bool is_optimized_init_done = false;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
/* Align bw context with hw config when system resume. */
if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
}
// Initialize the dccg
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
if (!dcb->funcs->is_accelerated_mode(dcb))
hws->funcs.disable_vga(dc->hwseq);
if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
hws->funcs.bios_golden_init(dc);
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
if (res_pool->dccg && res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else {
// Not all ASICs have DCCG sw component
res_pool->ref_clocks.dccg_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
for (i = 0; i < dc->link_count; i++) {
/* Power up AND update implementation according to the
* required signal (which may be different from the
* default signal on connector).
*/
struct dc_link *link = dc->links[i];
if (!is_optimized_init_done)
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
if (link->link_enc->funcs->fec_is_active &&
link->link_enc->funcs->fec_is_active(link->link_enc))
link->fec_state = dc_link_fec_enabled;
}
}
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
* Otherwise, if taking control is not possible, we need to power
* everything down.
*/
if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
if (!is_optimized_init_done) {
hws->funcs.init_pipes(dc, dc->current_state);
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
}
if (!is_optimized_init_done) {
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
if (link->panel_cntl)
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
if (abm != NULL)
abm->funcs->abm_init(abm, backlight);
if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
}
if (abm != NULL && dmcu != NULL)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
if (!is_optimized_init_done)
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
}
/* In headless boot cases, DIG may be turned
* on which causes HW/SW discrepancies.
* To avoid this, power down hardware on boot
* if DIG is turned on
*/
void dcn10_power_down_on_boot(struct dc *dc)
{
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
int edp_num;
int i = 0;
dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num)
edp_link = edp_links[0];
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwseq->funcs.edp_backlight_control &&
dc->hwss.power_down &&
dc->hwss.edp_power_control) {
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
dc->hwss.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
dc->hwss.power_down) {
dc->hwss.power_down(dc);
break;
}
}
}
/*
* Call update_clocks with empty context
* to send DISPLAY_OFF
* Otherwise DISPLAY_OFF may not be asserted
*/
if (dc->clk_mgr->funcs->set_low_power_state)
dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
}
void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
int i;
struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx_old->stream)
continue;
if (pipe_ctx_old->top_pipe)
continue;
if (!pipe_ctx->stream ||
pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
struct clock_source *old_clk = pipe_ctx_old->clock_source;
dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
}
}
static bool patch_address_for_sbs_tb_stereo(
struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
(pipe_ctx->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE ||
pipe_ctx->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
*addr = plane_state->address.grph_stereo.left_addr;
plane_state->address.grph_stereo.left_addr =
plane_state->address.grph_stereo.right_addr;
return true;
} else {
if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
plane_state->address.grph_stereo.right_addr =
plane_state->address.grph_stereo.left_addr;
plane_state->address.grph_stereo.right_meta_addr =
plane_state->address.grph_stereo.left_meta_addr;
}
}
return false;
}
void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
if (plane_state == NULL)
return;
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
pipe_ctx->plane_res.hubp,
&plane_state->address,
plane_state->flip_immediate);
plane_state->status.requested_address = plane_state->address;
if (plane_state->flip_immediate)
plane_state->status.current_address = plane_state->address;
if (addr_patched)
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
bool result = true;
if (dpp_base == NULL)
return false;
if (plane_state->in_transfer_func)
tf = plane_state->in_transfer_func;
if (plane_state->gamma_correction &&
!dpp_base->ctx->dc->debug.always_use_regamma
&& !plane_state->gamma_correction->is_identity
&& dce_use_lut(plane_state->format))
dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
if (tf == NULL)
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
else if (tf->type == TF_TYPE_PREDEFINED) {
switch (tf->tf) {
case TRANSFER_FUNCTION_SRGB:
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
break;
case TRANSFER_FUNCTION_BT709:
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
break;
case TRANSFER_FUNCTION_LINEAR:
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
result = true;
break;
default:
result = false;
break;
}
} else if (tf->type == TF_TYPE_BYPASS) {
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
} else {
cm_helper_translate_curve_to_degamma_hw_format(tf,
&dpp_base->degamma_params);
dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
&dpp_base->degamma_params);
result = true;
}
return result;
}
#define MAX_NUM_HW_POINTS 0x200
static void log_tf(struct dc_context *ctx,
struct dc_transfer_func *tf, uint32_t hw_points_num)
{
// DC_LOG_GAMMA is default logging of all hw points
// DC_LOG_ALL_GAMMA logs all points, not only hw points
// DC_LOG_ALL_TF_POINTS logs all channels of the tf
int i = 0;
DC_LOGGER_INIT(ctx->logger);
DC_LOG_GAMMA("Gamma Correction TF");
DC_LOG_ALL_GAMMA("Logging all tf points...");
DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
for (i = 0; i < hw_points_num; i++) {
DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
}
bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
if (dpp == NULL)
return false;
dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
if (stream->out_transfer_func &&
stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
stream->out_transfer_func,
&dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(
dpp,
&dpp->regamma_params, OPP_REGAMMA_USER);
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
if (stream != NULL && stream->ctx != NULL &&
stream->out_transfer_func != NULL) {
log_tf(stream->ctx,
stream->out_transfer_func,
dpp->regamma_params.hw_points_num);
}
return true;
}
void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock)
{
struct dce_hwseq *hws = dc->hwseq;
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
if (!pipe || pipe->top_pipe)
return;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
if (lock)
pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
else
pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
}
/**
* delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
*
* Software keepout workaround to prevent cursor update locking from stalling
* out cursor updates indefinitely or from old values from being retained in
* the case where the viewport changes in the same frame as the cursor.
*
* The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
* too close to VUPDATE, then stall out until VUPDATE finishes.
*
* TODO: Optimize cursor programming to be once per frame before VUPDATE
* to avoid the need for this workaround.
*
* @dc: Current DC state
* @pipe_ctx: Pipe_ctx pointer for delayed cursor update
*
* Return: void
*/
static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct crtc_position position;
uint32_t vupdate_start, vupdate_end;
unsigned int lines_to_vupdate, us_to_vupdate, vpos;
unsigned int us_per_line, us_vupdate;
if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
return;
if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
return;
dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
&vupdate_end);
dc->hwss.get_position(&pipe_ctx, 1, &position);
vpos = position.vertical_count;
/* Avoid wraparound calculation issues */
vupdate_start += stream->timing.v_total;
vupdate_end += stream->timing.v_total;
vpos += stream->timing.v_total;
if (vpos <= vupdate_start) {
/* VPOS is in VACTIVE or back porch. */
lines_to_vupdate = vupdate_start - vpos;
} else if (vpos > vupdate_end) {
/* VPOS is in the front porch. */
return;
} else {
/* VPOS is in VUPDATE. */
lines_to_vupdate = 0;
}
/* Calculate time until VUPDATE in microseconds. */
us_per_line =
stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
us_to_vupdate = lines_to_vupdate * us_per_line;
/* 70 us is a conservative estimate of cursor update time*/
if (us_to_vupdate > 70)
return;
/* Stall out until the cursor update completes. */
if (vupdate_end < vupdate_start)
vupdate_end += stream->timing.v_total;
us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
udelay(us_to_vupdate + us_vupdate);
}
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
{
/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
if (!pipe || pipe->top_pipe)
return;
/* Prevent cursor lock from stalling out cursor updates. */
if (lock)
delay_cursor_until_vupdate(dc, pipe);
if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
hw_locks.bits.lock_cursor = 1;
inst_flags.opp_inst = pipe->stream_res.opp->inst;
dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
lock,
&hw_locks,
&inst_flags);
} else
dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
pipe->stream_res.opp->inst, lock);
}
static bool wait_for_reset_trigger_to_occur(
struct dc_context *dc_ctx,
struct timing_generator *tg)
{
bool rc = false;
/* To avoid endless loop we wait at most
* frames_to_wait_on_triggered_reset frames for the reset to occur. */
const uint32_t frames_to_wait_on_triggered_reset = 10;
int i;
for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
if (!tg->funcs->is_counter_moving(tg)) {
DC_ERROR("TG counter is not moving!\n");
break;
}
if (tg->funcs->did_triggered_reset_occur(tg)) {
rc = true;
/* usually occurs at i=1 */
DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
i);
break;
}
/* Wait for one frame. */
tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
}
if (false == rc)
DC_ERROR("GSL: Timeout on reset trigger!\n");
return rc;
}
static uint64_t reduceSizeAndFraction(uint64_t *numerator,
uint64_t *denominator,
bool checkUint32Bounary)
{
int i;
bool ret = checkUint32Bounary == false;
uint64_t max_int32 = 0xffffffff;
uint64_t num, denom;
static const uint16_t prime_numbers[] = {
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
941, 947, 953, 967, 971, 977, 983, 991, 997};
int count = ARRAY_SIZE(prime_numbers);
num = *numerator;
denom = *denominator;
for (i = 0; i < count; i++) {
uint32_t num_remainder, denom_remainder;
uint64_t num_result, denom_result;
if (checkUint32Bounary &&
num <= max_int32 && denom <= max_int32) {
ret = true;
break;
}
do {
num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
if (num_remainder == 0 && denom_remainder == 0) {
num = num_result;
denom = denom_result;
}
} while (num_remainder == 0 && denom_remainder == 0);
}
*numerator = num;
*denominator = denom;
return ret;
}
static bool is_low_refresh_rate(struct pipe_ctx *pipe)
{
uint32_t master_pipe_refresh_rate =
pipe->stream->timing.pix_clk_100hz * 100 /
pipe->stream->timing.h_total /
pipe->stream->timing.v_total;
return master_pipe_refresh_rate <= 30;
}
static uint8_t get_clock_divider(struct pipe_ctx *pipe,
bool account_low_refresh_rate)
{
uint32_t clock_divider = 1;
uint32_t numpipes = 1;
if (account_low_refresh_rate && is_low_refresh_rate(pipe))
clock_divider *= 2;
if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
clock_divider *= 2;
while (pipe->next_odm_pipe) {
pipe = pipe->next_odm_pipe;
numpipes++;
}
clock_divider *= numpipes;
return clock_divider;
}
static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
struct pipe_ctx *grouped_pipes[])
{
struct dc_context *dc_ctx = dc->ctx;
int i, master = -1, embedded = -1;
struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
uint64_t modulo[MAX_PIPES];
unsigned int pclk;
uint32_t embedded_pix_clk_100hz;
uint16_t embedded_h_total;
uint16_t embedded_v_total;
uint32_t dp_ref_clk_100hz =
dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
if (!hw_crtc_timing)
return master;
if (dc->config.vblank_alignment_dto_params &&
dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
embedded_h_total =
(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
embedded_v_total =
(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
embedded_pix_clk_100hz =
dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
for (i = 0; i < group_size; i++) {
grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
grouped_pipes[i]->stream_res.tg,
&hw_crtc_timing[i]);
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
dc->res_pool->dp_clock_source,
grouped_pipes[i]->stream_res.tg->inst,
&pclk);
hw_crtc_timing[i].pix_clk_100hz = pclk;
if (dc_is_embedded_signal(
grouped_pipes[i]->stream->signal)) {
embedded = i;
master = i;
phase[i] = embedded_pix_clk_100hz*100;
modulo[i] = dp_ref_clk_100hz*100;
} else {
phase[i] = (uint64_t)embedded_pix_clk_100hz*
hw_crtc_timing[i].h_total*
hw_crtc_timing[i].v_total;
phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
modulo[i] = (uint64_t)dp_ref_clk_100hz*
embedded_h_total*
embedded_v_total;
if (reduceSizeAndFraction(&phase[i],
&modulo[i], true) == false) {
/*
* this will help to stop reporting
* this timing synchronizable
*/
DC_SYNC_INFO("Failed to reduce DTO parameters\n");
grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
}
}
}
for (i = 0; i < group_size; i++) {
if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
dc->res_pool->dp_clock_source,
grouped_pipes[i]->stream_res.tg->inst,
phase[i], modulo[i]);
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
dc->res_pool->dp_clock_source,
grouped_pipes[i]->stream_res.tg->inst, &pclk);
grouped_pipes[i]->stream->timing.pix_clk_100hz =
pclk*get_clock_divider(grouped_pipes[i], false);
if (master == -1)
master = i;
}
}
}
kfree(hw_crtc_timing);
return master;
}
void dcn10_enable_vblanks_synchronization(
struct dc *dc,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[])
{
struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
int i, width, height, master;
for (i = 1; i < group_size; i++) {
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
if (!tg->funcs->is_tg_enabled(tg)) {
DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
return;
}
if (opp->funcs->opp_program_dpg_dimensions)
opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
}
for (i = 0; i < group_size; i++) {
if (grouped_pipes[i]->stream == NULL)
continue;
grouped_pipes[i]->stream->vblank_synchronized = false;
grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
}
DC_SYNC_INFO("Aligning DP DTOs\n");
master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
DC_SYNC_INFO("Synchronizing VBlanks\n");
if (master >= 0) {
for (i = 0; i < group_size; i++) {
if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
grouped_pipes[master]->stream_res.tg,
grouped_pipes[i]->stream_res.tg,
grouped_pipes[master]->stream->timing.pix_clk_100hz,
grouped_pipes[i]->stream->timing.pix_clk_100hz,
get_clock_divider(grouped_pipes[master], false),
get_clock_divider(grouped_pipes[i], false));
grouped_pipes[i]->stream->vblank_synchronized = true;
}
grouped_pipes[master]->stream->vblank_synchronized = true;
DC_SYNC_INFO("Sync complete\n");
}
for (i = 1; i < group_size; i++) {
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
if (opp->funcs->opp_program_dpg_dimensions)
opp->funcs->opp_program_dpg_dimensions(opp, width, height);
}
}
void dcn10_enable_timing_synchronization(
struct dc *dc,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[])
{
struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
int i, width, height;
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
if (!tg->funcs->is_tg_enabled(tg)) {
DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
return;
}
if (opp->funcs->opp_program_dpg_dimensions)
opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
}
for (i = 0; i < group_size; i++) {
if (grouped_pipes[i]->stream == NULL)
continue;
if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream->vblank_synchronized = false;
}
for (i = 1; i < group_size; i++) {
if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
grouped_pipes[i]->stream_res.tg,
grouped_pipes[0]->stream_res.tg->inst);
}
DC_SYNC_INFO("Waiting for trigger\n");
/* Need to get only check 1 pipe for having reset as all the others are
* synchronized. Look at last pipe programmed to reset.
*/
if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
for (i = 1; i < group_size; i++) {
if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
grouped_pipes[i]->stream_res.tg);
}
for (i = 1; i < group_size; i++) {
if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
if (opp->funcs->opp_program_dpg_dimensions)
opp->funcs->opp_program_dpg_dimensions(opp, width, height);
}
DC_SYNC_INFO("Sync complete\n");
}
void dcn10_enable_per_frame_crtc_position_reset(
struct dc *dc,
int group_size,
struct pipe_ctx *grouped_pipes[])
{
struct dc_context *dc_ctx = dc->ctx;
int i;
DC_SYNC_INFO("Setting up\n");
for (i = 0; i < group_size; i++)
if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
grouped_pipes[i]->stream_res.tg,
0,
&grouped_pipes[i]->stream->triggered_crtc_reset);
DC_SYNC_INFO("Waiting for trigger\n");
for (i = 0; i < group_size; i++)
wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
DC_SYNC_INFO("Multi-display sync is complete\n");
}
static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
struct vm_system_aperture_param *apt,
struct dce_hwseq *hws)
{
PHYSICAL_ADDRESS_LOC physical_page_number;
uint32_t logical_addr_low;
uint32_t logical_addr_high;
REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
LOGICAL_ADDR, &logical_addr_low);
REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
LOGICAL_ADDR, &logical_addr_high);
apt->sys_default.quad_part = physical_page_number.quad_part << 12;
apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
}
/* Temporary read settings, future will get values from kmd directly */
static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
struct vm_context0_param *vm0,
struct dce_hwseq *hws)
{
PHYSICAL_ADDRESS_LOC fb_base;
PHYSICAL_ADDRESS_LOC fb_offset;
uint32_t fb_base_value;
uint32_t fb_offset_value;
REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
/*
* The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
* Therefore we need to do
* DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
* - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
*/
fb_base.quad_part = (uint64_t)fb_base_value << 24;
fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
vm0->pte_base.quad_part += fb_base.quad_part;
vm0->pte_base.quad_part -= fb_offset.quad_part;
}
static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct vm_system_aperture_param apt = {0};
struct vm_context0_param vm0 = {0};
mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
}
static void dcn10_enable_plane(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
if (dc->debug.sanity_checks) {
hws->funcs.verify_allow_pstate_change_high(dc);
}
undo_DEGVIDCN10_253_wa(dc);
power_on_plane_resources(dc->hwseq,
pipe_ctx->plane_res.hubp->inst);
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
true);
if (dc->config.gpu_vm_support)
dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
if (dc->debug.sanity_checks) {
hws->funcs.verify_allow_pstate_change_high(dc);
}
if (!pipe_ctx->top_pipe
&& pipe_ctx->plane_state
&& pipe_ctx->plane_state->flip_int_enabled
&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
}
void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
{
int i = 0;
struct dpp_grph_csc_adjustment adjust;
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
adjust.temperature_matrix[i] =
pipe_ctx->stream->gamut_remap_matrix.matrix[i];
} else if (pipe_ctx->plane_state &&
pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
adjust.temperature_matrix[i] =
pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
}
static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
if (pipe_ctx->top_pipe) {
struct pipe_ctx *top = pipe_ctx->top_pipe;
while (top->top_pipe)
top = top->top_pipe; // Traverse to top pipe_ctx
if (top->plane_state && top->plane_state->layer_index == 0)
return true; // Front MPO plane not hidden
}
}
return false;
}
static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
{
// Override rear plane RGB bias to fix MPO brightness
uint16_t rgb_bias = matrix[3];
matrix[3] = 0;
matrix[7] = 0;
matrix[11] = 0;
pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
matrix[3] = rgb_bias;
matrix[7] = rgb_bias;
matrix[11] = rgb_bias;
}
void dcn10_program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix,
int opp_id)
{
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
/* MPO is broken with RGB colorspaces when OCSC matrix
* brightness offset >= 0 on DCN1 due to OCSC before MPC
* Blending adds offsets from front + rear to rear plane
*
* Fix is to set RGB bias to 0 on rear plane, top plane
* black value pixels add offset instead of rear + front
*/
int16_t rgb_bias = matrix[3];
// matrix[3/7/11] are all the same offset value
if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
} else {
pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
}
}
} else {
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
{
struct dc_bias_and_scale bns_params = {0};
// program the input csc
dpp->funcs->dpp_setup(dpp,
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
plane_state->color_space,
NULL);
//set scale and bias registers
build_prescale_params(&bns_params, plane_state);
if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
void dcn10_update_visual_confirm_color(struct dc *dc,
struct pipe_ctx *pipe_ctx,
int mpcc_id)
{
struct mpc *mpc = dc->res_pool->mpc;
if (mpc->funcs->set_bg_color) {
memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
}
}
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
int mpcc_id;
struct mpcc *new_mpcc;
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
if (per_pixel_alpha) {
/* DCN1.0 has output CM before MPC which seems to screw with
* pre-multiplied alpha.
*/
blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
pipe_ctx->stream->output_color_space)
&& pipe_ctx->plane_state->pre_multiplied_alpha);
if (pipe_ctx->plane_state->global_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
} else {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
}
} else {
blnd_cfg.pre_multiplied_alpha = false;
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
blnd_cfg.global_alpha = 0xff;
/*
* TODO: remove hack
* Note: currently there is a bug in init_hw such that
* on resume from hibernate, BIOS sets up MPCC0, and
* we do mpcc_remove but the mpcc cannot go to idle
* after remove. This cause us to pick mpcc1 here,
* which causes a pstate hang for yet unknown reason.
*/
mpcc_id = hubp->inst;
/* If there is no full update, don't need to touch MPC tree*/
if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
return;
}
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
if (new_mpcc != NULL)
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
else
if (dc->debug.sanity_checks)
mpc->funcs->assert_mpcc_idle_before_connect(
dc->res_pool->mpc, mpcc_id);
/* Call MPC to insert new plane */
new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
mpc_tree_params,
&blnd_cfg,
NULL,
NULL,
hubp->inst,
mpcc_id);
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
ASSERT(new_mpcc != NULL);
hubp->opp_id = pipe_ctx->stream_res.opp->inst;
hubp->mpcc_id = mpcc_id;
}
static void update_scaler(struct pipe_ctx *pipe_ctx)
{
bool per_pixel_alpha =
pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
/* scaler configuration */
pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
}
static void dcn10_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct plane_size size = plane_state->plane_size;
unsigned int compat_level = 0;
bool should_divided_by_2 = false;
/* depends on DML calculation, DPP clock value may change dynamically */
/* If request max dpp clk is lower than current dispclk, no need to
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
/* new calculated dispclk, dppclk are stored in
* context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
* dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
* dcn10_validate_bandwidth compute new dispclk, dppclk.
* dispclk will put in use after optimize_bandwidth when
* ramp_up_dispclk_with_dpp is called.
* there are two places for dppclk be put in use. One location
* is the same as the location as dispclk. Another is within
* update_dchubp_dpp which happens between pre_bandwidth and
* optimize_bandwidth.
* dppclk updated within update_dchubp_dpp will cause new
* clock values of dispclk and dppclk not be in use at the same
* time. when clocks are decreased, this may cause dppclk is
* lower than previous configuration and let pipe stuck.
* for example, eDP + external dp, change resolution of DP from
* 1920x1080x144hz to 1280x960x60hz.
* before change: dispclk = 337889 dppclk = 337889
* change mode, dcn10_validate_bandwidth calculate
* dispclk = 143122 dppclk = 143122
* update_dchubp_dpp be executed before dispclk be updated,
* dispclk = 337889, but dppclk use new value dispclk /2 =
* 168944. this will cause pipe pstate warning issue.
* solution: between pre_bandwidth and optimize_bandwidth, while
* dispclk is going to be decreased, keep dppclk = dispclk
**/
if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
dc->clk_mgr->clks.dispclk_khz)
should_divided_by_2 = false;
else
should_divided_by_2 =
context->bw_ctx.bw.dcn.clk.dppclk_khz <=
dc->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
should_divided_by_2,
true);
if (dc->res_pool->dccg)
dc->res_pool->dccg->funcs->update_dpp_dto(
dc->res_pool->dccg,
dpp->inst,
pipe_ctx->plane_res.bw.dppclk_khz);
else
dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
dc->clk_mgr->clks.dispclk_khz / 2 :
dc->clk_mgr->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
*/
if (plane_state->update_flags.bits.full_update) {
hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
hubp->funcs->hubp_setup(
hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
hubp->funcs->hubp_setup_interdependent(
hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
}
size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.bpp_change)
dcn10_update_dpp(dpp, plane_state);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
plane_state->update_flags.bits.global_alpha_change)
hws->funcs.update_mpcc(dc, pipe_ctx);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
plane_state->update_flags.bits.global_alpha_change ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change) {
update_scaler(pipe_ctx);
}
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change) {
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
&pipe_ctx->plane_res.scl_data.viewport_c);
}
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (plane_state->update_flags.bits.full_update) {
/*gamut remap*/
dc->hwss.program_gamut_remap(pipe_ctx);
dc->hwss.program_output_csc(dc,
pipe_ctx,
pipe_ctx->stream->output_color_space,
pipe_ctx->stream->csc_color_matrix.matrix,
pipe_ctx->stream_res.opp->inst);
}
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.pixel_format_change ||
plane_state->update_flags.bits.horizontal_mirror_change ||
plane_state->update_flags.bits.rotation_change ||
plane_state->update_flags.bits.swizzle_change ||
plane_state->update_flags.bits.dcc_change ||
plane_state->update_flags.bits.bpp_change ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.plane_size_change) {
hubp->funcs->hubp_program_surface_config(
hubp,
plane_state->format,
&plane_state->tiling_info,
&size,
plane_state->rotation,
&plane_state->dcc,
plane_state->horizontal_mirror,
compat_level);
}
hubp->power_gated = false;
hws->funcs.update_plane_addr(dc, pipe_ctx);
if (is_pipe_tree_visible(pipe_ctx))
hubp->funcs->set_blank(hubp, false);
}
void dcn10_blank_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank)
{
enum dc_color_space color_space;
struct tg_color black_color = {0};
struct stream_resource *stream_res = &pipe_ctx->stream_res;
struct dc_stream_state *stream = pipe_ctx->stream;
/* program otg blank color */
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
/*
* The way 420 is packed, 2 channels carry Y component, 1 channel
* alternate between Cb and Cr, so both channels need the pixel
* value for Y
*/
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
black_color.color_r_cr = black_color.color_g_y;
if (stream_res->tg->funcs->set_blank_color)
stream_res->tg->funcs->set_blank_color(
stream_res->tg,
&black_color);
if (!blank) {
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
if (stream_res->abm) {
dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
} else {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (stream_res->tg->funcs->set_blank) {
stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
}
}
}
void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
{
struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
struct custom_float_format fmt;
fmt.exponenta_bits = 6;
fmt.mantissa_bits = 12;
fmt.sign = true;
if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
pipe_ctx->plane_res.dpp, hw_mult);
}
void dcn10_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
if (pipe_ctx->top_pipe == NULL) {
bool blank = !is_pipe_tree_visible(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
}
if (pipe_ctx->plane_state->update_flags.bits.full_update)
dcn10_enable_plane(dc, pipe_ctx, context);
dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->plane_state->update_flags.bits.full_update ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
* only do gamma programming for full update.
* TODO: This can be further optimized/cleaned up
* Always call this for now since it does memcmp inside before
* doing heavy calculation and programming
*/
if (pipe_ctx->plane_state->update_flags.bits.full_update)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
}
void dcn10_wait_for_pending_cleared(struct dc *dc,
struct dc_state *context)
{
struct pipe_ctx *pipe_ctx;
struct timing_generator *tg;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/*
* Only wait for top pipe's tg penindg bit
* Also skip if pipe is disabled.
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream || !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
/*
* Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
* For some reason waiting for OTG_UPDATE_PENDING cleared
* seems to not trigger the update right away, and if we
* lock again before VUPDATE then we don't get a separated
* operation.
*/
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
}
}
void dcn10_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context)
{
int i;
DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->top_pipe &&
!pipe_ctx->prev_odm_pipe &&
pipe_ctx->stream) {
struct timing_generator *tg = pipe_ctx->stream_res.tg;
if (context->stream_status[i].plane_count == 0)
false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
dc->hwss.optimize_bandwidth(dc, context);
break;
}
if (dc->hwseq->wa.DEGVIDCN10_254)
hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
}
static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
{
uint8_t i;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->timing.timing_3d_format
== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
/*
* Disable stutter
*/
hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
break;
}
}
}
void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
if (context->stream_count == 0)
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
false);
dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
dcn_get_soc_clks(
dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
dcn_bw_notify_pplib_of_wm_ranges(
dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
}
void dcn10_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
if (context->stream_count == 0)
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
true);
hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
dcn_get_soc_clks(
dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
dcn_bw_notify_pplib_of_wm_ranges(
dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
}
void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust)
{
int i = 0;
struct drr_params params = {0};
// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
unsigned int event_triggers = 0x800;
// Note DRR trigger events are generated regardless of whether num frames met.
unsigned int num_frames = 2;
params.vertical_total_max = adjust.v_total_max;
params.vertical_total_min = adjust.v_total_min;
params.vertical_total_mid = adjust.v_total_mid;
params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
/* TODO: If multiple pipes are to be supported, you need
* some GSL stuff. Static screen triggers may be programmed differently
* as well.
*/
for (i = 0; i < num_pipes; i++) {
if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
pipe_ctx[i]->stream_res.tg->funcs->set_drr(
pipe_ctx[i]->stream_res.tg, ¶ms);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
event_triggers, num_frames);
}
}
}
void dcn10_get_position(struct pipe_ctx **pipe_ctx,
int num_pipes,
struct crtc_position *position)
{
int i = 0;
/* TODO: handle pipes > 1
*/
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
}
void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
unsigned int triggers = 0;
if (params->triggers.surface_update)
triggers |= 0x80;
if (params->triggers.cursor_update)
triggers |= 0x2;
if (params->triggers.force_trigger)
triggers |= 0x1;
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
set_static_screen_control(pipe_ctx[i]->stream_res.tg,
triggers, params->num_frames);
}
static void dcn10_config_stereo_parameters(
struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
{
enum view_3d_format view_format = stream->view_format;
enum dc_timing_3d_format timing_3d_format =\
stream->timing.timing_3d_format;
bool non_stereo_timing = false;
if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
non_stereo_timing = true;
if (non_stereo_timing == false &&
view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
flags->PROGRAM_STEREO = 1;
flags->PROGRAM_POLARITY = 1;
if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
if (stream->link && stream->link->ddc) {
enum display_dongle_type dongle = \
stream->link->ddc->dongle_type;
if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
flags->DISABLE_STEREO_DP_SYNC = 1;
}
}
flags->RIGHT_EYE_POLARITY =\
stream->timing.flags.RIGHT_EYE_3D_POLARITY;
if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
flags->FRAME_PACKED = 1;
}
return;
}
void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
{
struct crtc_stereo_flags flags = { 0 };
struct dc_stream_state *stream = pipe_ctx->stream;
dcn10_config_stereo_parameters(stream, &flags);
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
} else {
dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
}
pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
pipe_ctx->stream_res.opp,
flags.PROGRAM_STEREO == 1,
&stream->timing);
pipe_ctx->stream_res.tg->funcs->program_stereo(
pipe_ctx->stream_res.tg,
&stream->timing,
&flags);
return;
}
static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
{
int i;
for (i = 0; i < res_pool->pipe_count; i++) {
if (res_pool->hubps[i]->inst == mpcc_inst)
return res_pool->hubps[i];
}
ASSERT(false);
return NULL;
}
void dcn10_wait_for_mpcc_disconnect(
struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
int mpcc_inst;
if (dc->debug.sanity_checks) {
hws->funcs.verify_allow_pstate_change_high(dc);
}
if (!pipe_ctx->stream_res.opp)
return;
for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
if (pipe_ctx->stream_res.tg &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
}
}
if (dc->debug.sanity_checks) {
hws->funcs.verify_allow_pstate_change_high(dc);
}
}
bool dcn10_dummy_display_power_gating(
struct dc *dc,
uint8_t controller_id,
struct dc_bios *dcb,
enum pipe_gating_control power_gating)
{
return true;
}
void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
bool flip_pending;
struct dc *dc = pipe_ctx->stream->ctx->dc;
if (plane_state == NULL)
return;
flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
pipe_ctx->plane_res.hubp);
plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
if (!flip_pending)
plane_state->status.current_address = plane_state->status.requested_address;
if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
tg->funcs->is_stereo_left_eye) {
plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
}
if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
struct dce_hwseq *hwseq = dc->hwseq;
struct timing_generator *tg = dc->res_pool->timing_generators[0];
unsigned int cur_frame = tg->funcs->get_frame_count(tg);
if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
struct hubbub *hubbub = dc->res_pool->hubbub;
hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
}
}
}
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
{
struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
/* In DCN, this programming sequence is owned by the hubbub */
hubbub->funcs->update_dchub(hubbub, dh_data);
}
static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
{
struct pipe_ctx *test_pipe, *split_pipe;
const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
struct rect r1 = scl_data->recout, r2, r2_half;
int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
int cur_layer = pipe_ctx->plane_state->layer_index;
/**
* Disable the cursor if there's another pipe above this with a
* plane that contains this pipe's viewport to prevent double cursor
* and incorrect scaling artifacts.
*/
for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) {
// Skip invisible layer and pipe-split plane on same layer
if (!test_pipe->plane_state ||
!test_pipe->plane_state->visible ||
test_pipe->plane_state->layer_index == cur_layer)
continue;
r2 = test_pipe->plane_res.scl_data.recout;
r2_r = r2.x + r2.width;
r2_b = r2.y + r2.height;
split_pipe = test_pipe;
/**
* There is another half plane on same layer because of
* pipe-split, merge together per same height.
*/
for (split_pipe = pipe_ctx->top_pipe; split_pipe;
split_pipe = split_pipe->top_pipe)
if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
r2_half = split_pipe->plane_res.scl_data.recout;
r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
r2.width = r2.width + r2_half.width;
r2_r = r2.x + r2.width;
break;
}
if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
return true;
}
return false;
}
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
bool pipe_split_on = false;
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
(pipe_ctx->prev_odm_pipe != NULL);
int x_plane = pipe_ctx->plane_state->dst_rect.x;
int y_plane = pipe_ctx->plane_state->dst_rect.y;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
pipe_split_on = true;
}
}
/**
* DC cursor is stream space, HW cursor is plane space and drawn
* as part of the framebuffer.
*
* Cursor position can't be negative, but hotspot can be used to
* shift cursor out of the plane bounds. Hotspot must be smaller
* than the cursor size.
*/
/**
* Translate cursor from stream space to plane space.
*
* If the cursor is scaled then we need to scale the position
* to be in the approximately correct place. We can't do anything
* about the actual size being incorrect, that's a limitation of
* the hardware.
*/
if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
pipe_ctx->plane_state->dst_rect.height;
} else {
x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
}
/**
* If the cursor's source viewport is clipped then we need to
* translate the cursor to appear in the correct position on
* the screen.
*
* This translation isn't affected by scaling so it needs to be
* done *after* we adjust the position for the scale factor.
*
* This is only done by opt-in for now since there are still
* some usecases like tiled display that might enable the
* cursor on both streams while expecting dc to clip it.
*/
if (pos_cpy.translate_by_source) {
x_pos += pipe_ctx->plane_state->src_rect.x;
y_pos += pipe_ctx->plane_state->src_rect.y;
}
/**
* If the position is negative then we need to add to the hotspot
* to shift the cursor outside the plane.
*/
if (x_pos < 0) {
pos_cpy.x_hotspot -= x_pos;
x_pos = 0;
}
if (y_pos < 0) {
pos_cpy.y_hotspot -= y_pos;
y_pos = 0;
}
pos_cpy.x = (uint32_t)x_pos;
pos_cpy.y = (uint32_t)y_pos;
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
if (param.rotation == ROTATION_ANGLE_0) {
int viewport_width =
pipe_ctx->plane_res.scl_data.viewport.width;
int viewport_x =
pipe_ctx->plane_res.scl_data.viewport.x;
if (param.mirror) {
if (pipe_split_on || odm_combine_on) {
if (pos_cpy.x >= viewport_width + viewport_x) {
pos_cpy.x = 2 * viewport_width
- pos_cpy.x + 2 * viewport_x;
} else {
uint32_t temp_x = pos_cpy.x;
pos_cpy.x = 2 * viewport_x - pos_cpy.x;
if (temp_x >= viewport_x +
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
pos_cpy.x = temp_x + viewport_width;
}
}
} else {
pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
}
}
// Swap axis and mirror horizontally
else if (param.rotation == ROTATION_ANGLE_90) {
uint32_t temp_x = pos_cpy.x;
pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
pos_cpy.y = temp_x;
}
// Swap axis and mirror vertically
else if (param.rotation == ROTATION_ANGLE_270) {
uint32_t temp_y = pos_cpy.y;
int viewport_height =
pipe_ctx->plane_res.scl_data.viewport.height;
int viewport_y =
pipe_ctx->plane_res.scl_data.viewport.y;
/**
* Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
* For pipe split cases:
* - apply offset of viewport.y to normalize pos_cpy.x
* - calculate the pos_cpy.y as before
* - shift pos_cpy.y back by same offset to get final value
* - since we iterate through both pipes, use the lower
* viewport.y for offset
* For non pipe split cases, use the same calculation for
* pos_cpy.y as the 180 degree rotation case below,
* but use pos_cpy.x as our input because we are rotating
* 270 degrees
*/
if (pipe_split_on || odm_combine_on) {
int pos_cpy_x_offset;
int other_pipe_viewport_y;
if (pipe_split_on) {
if (pipe_ctx->bottom_pipe) {
other_pipe_viewport_y =
pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
} else {
other_pipe_viewport_y =
pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
}
} else {
if (pipe_ctx->next_odm_pipe) {
other_pipe_viewport_y =
pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
} else {
other_pipe_viewport_y =
pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
}
}
pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
other_pipe_viewport_y : viewport_y;
pos_cpy.x -= pos_cpy_x_offset;
if (pos_cpy.x > viewport_height) {
pos_cpy.x = pos_cpy.x - viewport_height;
pos_cpy.y = viewport_height - pos_cpy.x;
} else {
pos_cpy.y = 2 * viewport_height - pos_cpy.x;
}
pos_cpy.y += pos_cpy_x_offset;
} else {
pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
}
pos_cpy.x = temp_y;
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
int viewport_width =
pipe_ctx->plane_res.scl_data.viewport.width;
int viewport_x =
pipe_ctx->plane_res.scl_data.viewport.x;
if (!param.mirror) {
if (pipe_split_on || odm_combine_on) {
if (pos_cpy.x >= viewport_width + viewport_x) {
pos_cpy.x = 2 * viewport_width
- pos_cpy.x + 2 * viewport_x;
} else {
uint32_t temp_x = pos_cpy.x;
pos_cpy.x = 2 * viewport_x - pos_cpy.x;
if (temp_x >= viewport_x +
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {
pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
}
/**
* Display groups that are 1xnY, have pos_cpy.y > viewport.height
* Calculation:
* delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
* pos_cpy.y_new = viewport.y + delta_from_bottom
* Simplify it as:
* pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
*/
pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
}
hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
}
void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.hubp, attributes);
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.dpp, attributes);
}
void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
{
uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
struct fixed31_32 multiplier;
struct dpp_cursor_attributes opt_attr = { 0 };
uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
struct custom_float_format fmt;
if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
return;
fmt.exponenta_bits = 5;
fmt.mantissa_bits = 10;
fmt.sign = true;
if (sdr_white_level > 80) {
multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
}
opt_attr.scale = hw_scale;
opt_attr.bias = 0;
pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
pipe_ctx->plane_res.dpp, &opt_attr);
}
/*
* apply_front_porch_workaround TODO FPGA still need?
*
* This is a workaround for a bug that has existed since R5xx and has not been
* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
*/
static void apply_front_porch_workaround(
struct dc_crtc_timing *timing)
{
if (timing->flags.INTERLACE == 1) {
if (timing->v_front_porch < 2)
timing->v_front_porch = 2;
} else {
if (timing->v_front_porch < 1)
timing->v_front_porch = 1;
}
}
int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
{
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
struct dc_crtc_timing patched_crtc_timing;
int vesa_sync_start;
int asic_blank_end;
int interlace_factor;
patched_crtc_timing = *dc_crtc_timing;
apply_front_porch_workaround(&patched_crtc_timing);
interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
vesa_sync_start = patched_crtc_timing.v_addressable +
patched_crtc_timing.v_border_bottom +
patched_crtc_timing.v_front_porch;
asic_blank_end = (patched_crtc_timing.v_total -
vesa_sync_start -
patched_crtc_timing.v_border_top)
* interlace_factor;
return asic_blank_end -
pipe_ctx->pipe_dlg_param.vstartup_start + 1;
}
void dcn10_calc_vupdate_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
uint32_t *end_line)
{
const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (vupdate_pos >= 0)
*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
else
*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
*end_line = (*start_line + 2) % timing->v_total;
}
static void dcn10_cal_vline_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
uint32_t *end_line)
{
const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
if (vline_pos > 0)
vline_pos--;
else if (vline_pos < 0)
vline_pos++;
vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (vline_pos >= 0)
*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
else
*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
*end_line = (*start_line + 2) % timing->v_total;
} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
// vsync is line 0 so start_line is just the requested line offset
*start_line = vline_pos;
*end_line = (*start_line + 2) % timing->v_total;
} else
ASSERT(0);
}
void dcn10_setup_periodic_interrupt(
struct dc *dc,
struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
uint32_t start_line = 0;
uint32_t end_line = 0;
dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
}
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (start_line < 0) {
ASSERT(0);
start_line = 0;
}
if (tg->funcs->setup_vertical_interrupt2)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, true);
}
}
void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
const uint8_t *custom_sdp_message,
unsigned int sdp_message_size)
{
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
pipe_ctx->stream_res.stream_enc,
custom_sdp_message,
sdp_message_size);
}
}
enum dc_status dcn10_set_clock(struct dc *dc,
enum dc_clock_type clock_type,
uint32_t clk_khz,
uint32_t stepping)
{
struct dc_state *context = dc->current_state;
struct dc_clock_config clock_cfg = {0};
struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
return DC_FAIL_UNSUPPORTED_1;
dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
context, clock_type, &clock_cfg);
if (clk_khz > clock_cfg.max_clock_khz)
return DC_FAIL_CLK_EXCEED_MAX;
if (clk_khz < clock_cfg.min_clock_khz)
return DC_FAIL_CLK_BELOW_MIN;
if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
/*update internal request clock for update clock use*/
if (clock_type == DC_CLOCK_TYPE_DISPCLK)
current_clocks->dispclk_khz = clk_khz;
else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
current_clocks->dppclk_khz = clk_khz;
else
return DC_ERROR_UNEXPECTED;
if (dc->clk_mgr->funcs->update_clocks)
dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
context, true);
return DC_OK;
}
void dcn10_get_clock(struct dc *dc,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg)
{
struct dc_state *context = dc->current_state;
if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
}
void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
{
struct resource_pool *pool = dc->res_pool;
int i;
for (i = 0; i < pool->pipe_count; i++) {
struct hubp *hubp = pool->hubps[i];
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
hubp->funcs->hubp_read_state(hubp);
if (!s->blank_en)
dcc_en_bits[i] = s->dcc_en ? 1 : 0;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc_bios_types.h"
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
#include "link.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
#define DC_LOGGER \
enc1->base.ctx->logger
#define REG(reg)\
(enc1->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc1->se_shift->field_name, enc1->se_mask->field_name
#define VBI_LINE_0 0
#define DP_BLANK_MAX_RETRY 20
#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
enum {
DP_MST_UPDATE_MAX_RETRY = 50
};
#define CTX \
enc1->base.ctx
void enc1_update_generic_info_packet(
struct dcn10_stream_encoder *enc1,
uint32_t packet_index,
const struct dc_info_packet *info_packet)
{
/* TODOFPGA Figure out a proper number for max_retries polling for lock
* use 50 for now.
*/
uint32_t max_retries = 50;
/*we need turn on clock before programming AFMT block*/
REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
if (packet_index >= 8)
ASSERT(0);
/* poll dig_update_lock is not locked -> asic internal signal
* assume otg master lock will unlock it
*/
/* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
0, 10, max_retries);*/
/* check if HW reading GSP memory */
REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
0, 10, max_retries);
/* HW does is not reading GSP memory not reading too long ->
* something wrong. clear GPS memory access and notify?
* hw SW is writing to GSP memory
*/
REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
/* choose which generic packet to use */
REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
AFMT_GENERIC_INDEX, packet_index);
/* write generic packet header
* (4th byte is for GENERIC0 only)
*/
REG_SET_4(AFMT_GENERIC_HDR, 0,
AFMT_GENERIC_HB0, info_packet->hb0,
AFMT_GENERIC_HB1, info_packet->hb1,
AFMT_GENERIC_HB2, info_packet->hb2,
AFMT_GENERIC_HB3, info_packet->hb3);
/* write generic packet contents
* (we never use last 4 bytes)
* there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
*/
{
const uint32_t *content =
(const uint32_t *) &info_packet->sb[0];
REG_WRITE(AFMT_GENERIC_0, *content++);
REG_WRITE(AFMT_GENERIC_1, *content++);
REG_WRITE(AFMT_GENERIC_2, *content++);
REG_WRITE(AFMT_GENERIC_3, *content++);
REG_WRITE(AFMT_GENERIC_4, *content++);
REG_WRITE(AFMT_GENERIC_5, *content++);
REG_WRITE(AFMT_GENERIC_6, *content++);
REG_WRITE(AFMT_GENERIC_7, *content);
}
switch (packet_index) {
case 0:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
break;
case 1:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
break;
case 2:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
break;
case 3:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
break;
case 4:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
break;
case 5:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
break;
case 6:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
break;
case 7:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
break;
default:
break;
}
}
static void enc1_update_hdmi_info_packet(
struct dcn10_stream_encoder *enc1,
uint32_t packet_index,
const struct dc_info_packet *info_packet)
{
uint32_t cont, send, line;
if (info_packet->valid) {
enc1_update_generic_info_packet(
enc1,
packet_index,
info_packet);
/* enable transmission of packet(s) -
* packet transmission begins on the next frame
*/
cont = 1;
/* send packet(s) every frame */
send = 1;
/* select line number to send packets on */
line = 2;
} else {
cont = 0;
send = 0;
line = 0;
}
/* choose which generic packet control to use */
switch (packet_index) {
case 0:
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
HDMI_GENERIC0_CONT, cont,
HDMI_GENERIC0_SEND, send,
HDMI_GENERIC0_LINE, line);
break;
case 1:
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
HDMI_GENERIC1_CONT, cont,
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
case 2:
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
HDMI_GENERIC0_CONT, cont,
HDMI_GENERIC0_SEND, send,
HDMI_GENERIC0_LINE, line);
break;
case 3:
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
HDMI_GENERIC1_CONT, cont,
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
case 4:
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
HDMI_GENERIC0_CONT, cont,
HDMI_GENERIC0_SEND, send,
HDMI_GENERIC0_LINE, line);
break;
case 5:
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
HDMI_GENERIC1_CONT, cont,
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
case 6:
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
HDMI_GENERIC0_CONT, cont,
HDMI_GENERIC0_SEND, send,
HDMI_GENERIC0_LINE, line);
break;
case 7:
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
HDMI_GENERIC1_CONT, cont,
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
default:
/* invalid HW packet index */
DC_LOG_WARNING(
"Invalid HW packet index: %s()\n",
__func__);
return;
}
}
/* setup stream encoder in dp mode */
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
uint32_t misc1 = 0;
uint32_t h_blank;
uint32_t h_back_porch;
uint8_t synchronous_clock = 0; /* asynchronous mode */
uint8_t colorimetry_bpc;
uint8_t dp_pixel_encoding = 0;
uint8_t dp_component_depth = 0;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct dc_crtc_timing hw_crtc_timing = *crtc_timing;
if (hw_crtc_timing.flags.INTERLACE) {
/*the input timing is in VESA spec format with Interlace flag =1*/
hw_crtc_timing.v_total /= 2;
hw_crtc_timing.v_border_top /= 2;
hw_crtc_timing.v_addressable /= 2;
hw_crtc_timing.v_border_bottom /= 2;
hw_crtc_timing.v_front_porch /= 2;
hw_crtc_timing.v_sync_width /= 2;
}
/* set pixel encoding */
switch (hw_crtc_timing.pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
break;
case PIXEL_ENCODING_YCBCR444:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
if (hw_crtc_timing.flags.Y_ONLY)
if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666)
/* HW testing only, no use case yet.
* Color depth of Y-only could be
* 8, 10, 12, 16 bits
*/
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_Y_ONLY;
/* Note: DP_MSA_MISC1 bit 7 is the indicator
* of Y-only mode.
* This bit is set in HW if register
* DP_PIXEL_ENCODING is programmed to 0x4
*/
break;
case PIXEL_ENCODING_YCBCR420:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
break;
default:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
break;
}
misc1 = REG_READ(DP_MSA_MISC);
/* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
* When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
* Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
* and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
*/
if (use_vsc_sdp_for_colorimetry)
misc1 = misc1 | 0x40;
else
misc1 = misc1 & ~0x40;
/* set color depth */
switch (hw_crtc_timing.display_color_depth) {
case COLOR_DEPTH_666:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
case COLOR_DEPTH_888:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_8BPC;
break;
case COLOR_DEPTH_101010:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_10BPC;
break;
case COLOR_DEPTH_121212:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_12BPC;
break;
case COLOR_DEPTH_161616:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_16BPC;
break;
default:
dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
}
/* Set DP pixel encoding and component depth */
REG_UPDATE_2(DP_PIXEL_FORMAT,
DP_PIXEL_ENCODING, dp_pixel_encoding,
DP_COMPONENT_DEPTH, dp_component_depth);
/* set dynamic range and YCbCr range */
switch (hw_crtc_timing.display_color_depth) {
case COLOR_DEPTH_666:
colorimetry_bpc = 0;
break;
case COLOR_DEPTH_888:
colorimetry_bpc = 1;
break;
case COLOR_DEPTH_101010:
colorimetry_bpc = 2;
break;
case COLOR_DEPTH_121212:
colorimetry_bpc = 3;
break;
default:
colorimetry_bpc = 0;
break;
}
misc0 = misc0 | synchronous_clock;
misc0 = colorimetry_bpc << 5;
switch (output_color_space) {
case COLOR_SPACE_SRGB:
misc1 = misc1 & ~0x80; /* bit7 = 0*/
break;
case COLOR_SPACE_SRGB_LIMITED:
misc0 = misc0 | 0x8; /* bit3=1 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
break;
case COLOR_SPACE_YCBCR601:
case COLOR_SPACE_YCBCR601_LIMITED:
misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
break;
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR709_LIMITED:
misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
case COLOR_SPACE_2020_YCBCR:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
case COLOR_SPACE_DCIP3:
case COLOR_SPACE_XV_YCC_709:
case COLOR_SPACE_XV_YCC_601:
case COLOR_SPACE_DISPLAYNATIVE:
case COLOR_SPACE_DOLBYVISION:
case COLOR_SPACE_APPCTRL:
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
/* do nothing */
break;
}
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */
/* dcn new register
* dc_crtc_timing is vesa dmt struct. data from edid
*/
REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
DP_MSA_HTOTAL, hw_crtc_timing.h_total,
DP_MSA_VTOTAL, hw_crtc_timing.v_total);
/* calculate from vesa timing parameters
* h_active_start related to leading edge of sync
*/
h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left -
hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right;
h_back_porch = h_blank - hw_crtc_timing.h_front_porch -
hw_crtc_timing.h_sync_width;
/* start at beginning of left border */
h_active_start = hw_crtc_timing.h_sync_width + h_back_porch;
v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top -
hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom -
hw_crtc_timing.v_front_porch;
/* start at beginning of left border */
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
DP_MSA_HSTART, h_active_start,
DP_MSA_VSTART, v_active_start);
REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
DP_MSA_HSYNCWIDTH,
hw_crtc_timing.h_sync_width,
DP_MSA_HSYNCPOLARITY,
!hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY,
DP_MSA_VSYNCWIDTH,
hw_crtc_timing.v_sync_width,
DP_MSA_VSYNCPOLARITY,
!hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY);
/* HWDITH include border or overscan */
REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
DP_MSA_HWIDTH, hw_crtc_timing.h_border_left +
hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right,
DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top +
hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom);
}
void enc1_stream_encoder_set_stream_attribute_helper(
struct dcn10_stream_encoder *enc1,
struct dc_crtc_timing *crtc_timing)
{
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
break;
default:
REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
break;
}
REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
}
/* setup stream encoder in hdmi mode */
void enc1_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
int actual_pix_clk_khz,
bool enable_audio)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct bp_encoder_control cntl = {0};
cntl.action = ENCODER_CONTROL_SETUP;
cntl.engine_id = enc1->base.id;
cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
cntl.enable_dp_audio = enable_audio;
cntl.pixel_clock = actual_pix_clk_khz;
cntl.lanes_number = LANE_COUNT_FOUR;
if (enc1->base.bp->funcs->encoder_control(
enc1->base.bp, &cntl) != BP_RESULT_OK)
return;
enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
/* setup HDMI engine */
REG_UPDATE_6(HDMI_CONTROL,
HDMI_PACKET_GEN_VERSION, 1,
HDMI_KEEPOUT_MODE, 1,
HDMI_DEEP_COLOR_ENABLE, 0,
HDMI_DATA_SCRAMBLE_EN, 0,
HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1,
HDMI_CLOCK_CHANNEL_RATE, 0);
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_888:
REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
DC_LOG_DEBUG("HDMI source set to 24BPP deep color depth\n");
break;
case COLOR_DEPTH_101010:
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 1,
HDMI_DEEP_COLOR_ENABLE, 0);
DC_LOG_DEBUG("HDMI source 30BPP deep color depth" \
"disabled for YCBCR422 pixel encoding\n");
} else {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 1,
HDMI_DEEP_COLOR_ENABLE, 1);
DC_LOG_DEBUG("HDMI source 30BPP deep color depth" \
"enabled for YCBCR422 non-pixel encoding\n");
}
break;
case COLOR_DEPTH_121212:
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 2,
HDMI_DEEP_COLOR_ENABLE, 0);
DC_LOG_DEBUG("HDMI source 36BPP deep color depth" \
"disabled for YCBCR422 pixel encoding\n");
} else {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 2,
HDMI_DEEP_COLOR_ENABLE, 1);
DC_LOG_DEBUG("HDMI source 36BPP deep color depth" \
"enabled for non-pixel YCBCR422 encoding\n");
}
break;
case COLOR_DEPTH_161616:
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 3,
HDMI_DEEP_COLOR_ENABLE, 1);
DC_LOG_DEBUG("HDMI source deep color depth enabled in" \
"reserved mode\n");
break;
default:
break;
}
if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
/* enable HDMI data scrambler
* HDMI_CLOCK_CHANNEL_RATE_MORE_340M
* Clock channel frequency is 1/4 of character rate.
*/
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DATA_SCRAMBLE_EN, 1,
HDMI_CLOCK_CHANNEL_RATE, 1);
} else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
/* TODO: New feature for DCE11, still need to implement */
/* enable HDMI data scrambler
* HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
* Clock channel frequency is the same
* as character rate
*/
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DATA_SCRAMBLE_EN, 1,
HDMI_CLOCK_CHANNEL_RATE, 0);
}
REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
HDMI_GC_CONT, 1,
HDMI_GC_SEND, 1,
HDMI_NULL_SEND, 1);
REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0);
/* following belongs to audio */
REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
VBI_LINE_0 + 2);
REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
}
/* setup stream encoder in dvi mode */
void enc1_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct bp_encoder_control cntl = {0};
cntl.action = ENCODER_CONTROL_SETUP;
cntl.engine_id = enc1->base.id;
cntl.signal = is_dual_link ?
SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
cntl.enable_dp_audio = false;
cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10;
cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
if (enc1->base.bp->funcs->encoder_control(
enc1->base.bp, &cntl) != BP_RESULT_OK)
return;
ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
}
void enc1_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t x = dc_fixpt_floor(
avg_time_slots_per_mtp);
uint32_t y = dc_fixpt_ceil(
dc_fixpt_shl(
dc_fixpt_sub_int(
avg_time_slots_per_mtp,
x),
26));
// If y rounds up to integer, carry it over to x.
if (y >> 26) {
x += 1;
y = 0;
}
REG_SET_2(DP_MSE_RATE_CNTL, 0,
DP_MSE_RATE_X, x,
DP_MSE_RATE_Y, y);
/* wait for update to be completed on the link */
/* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
/* is reset to 0 (not pending) */
REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
0,
10, DP_MST_UPDATE_MAX_RETRY);
}
static void enc1_stream_encoder_update_hdmi_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
/* for bring up, disable dp double TODO */
REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
/*Always add mandatory packets first followed by optional ones*/
enc1_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
enc1_update_hdmi_info_packet(enc1, 1, &info_frame->hfvsif);
enc1_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
enc1_update_hdmi_info_packet(enc1, 3, &info_frame->vendor);
enc1_update_hdmi_info_packet(enc1, 4, &info_frame->spd);
enc1_update_hdmi_info_packet(enc1, 5, &info_frame->hdrsmd);
}
static void enc1_stream_encoder_stop_hdmi_info_packets(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
/* stop generic packets 0 & 1 on HDMI */
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
HDMI_GENERIC1_CONT, 0,
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0,
HDMI_GENERIC0_CONT, 0,
HDMI_GENERIC0_LINE, 0,
HDMI_GENERIC0_SEND, 0);
/* stop generic packets 2 & 3 on HDMI */
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
HDMI_GENERIC0_CONT, 0,
HDMI_GENERIC0_LINE, 0,
HDMI_GENERIC0_SEND, 0,
HDMI_GENERIC1_CONT, 0,
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
/* stop generic packets 2 & 3 on HDMI */
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
HDMI_GENERIC0_CONT, 0,
HDMI_GENERIC0_LINE, 0,
HDMI_GENERIC0_SEND, 0,
HDMI_GENERIC1_CONT, 0,
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0,
HDMI_GENERIC0_CONT, 0,
HDMI_GENERIC0_LINE, 0,
HDMI_GENERIC0_SEND, 0,
HDMI_GENERIC1_CONT, 0,
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
}
void enc1_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t value = 0;
if (info_frame->vsc.valid)
enc1_update_generic_info_packet(
enc1,
0, /* packetIndex */
&info_frame->vsc);
/* VSC SDP at packetIndex 1 is used by PSR in DMCUB FW.
* Note that the enablement of GSP1 is not done below,
* it's done in FW.
*/
if (info_frame->vsc.valid)
enc1_update_generic_info_packet(
enc1,
1, /* packetIndex */
&info_frame->vsc);
if (info_frame->spd.valid)
enc1_update_generic_info_packet(
enc1,
2, /* packetIndex */
&info_frame->spd);
if (info_frame->hdrsmd.valid)
enc1_update_generic_info_packet(
enc1,
3, /* packetIndex */
&info_frame->hdrsmd);
/* packetIndex 4 is used for send immediate sdp message, and please
* use other packetIndex (such as 5,6) for other info packet
*/
if (info_frame->adaptive_sync.valid)
enc1_update_generic_info_packet(
enc1,
5, /* packetIndex */
&info_frame->adaptive_sync);
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
* this master bit must also be set.
* This register shared with audio info frame.
* Therefore we need to enable master bit
* if at least on of the fields is not 0
*/
value = REG_READ(DP_SEC_CNTL);
if (value)
REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
}
void enc1_stream_encoder_send_immediate_sdp_message(
struct stream_encoder *enc,
const uint8_t *custom_sdp_message,
unsigned int sdp_message_size)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t value = 0;
/* TODOFPGA Figure out a proper number for max_retries polling for lock
* use 50 for now.
*/
uint32_t max_retries = 50;
/* check if GSP4 is transmitted */
REG_WAIT(DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING,
0, 10, max_retries);
/* disable GSP4 transmitting */
REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND, 0);
/* transmit GSP4 at the earliest time in a frame */
REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, 1);
/*we need turn on clock before programming AFMT block*/
REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
/* check if HW reading GSP memory */
REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
0, 10, max_retries);
/* HW does is not reading GSP memory not reading too long ->
* something wrong. clear GPS memory access and notify?
* hw SW is writing to GSP memory
*/
REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
/* use generic packet 4 for immediate sdp message */
REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
AFMT_GENERIC_INDEX, 4);
/* write generic packet header
* (4th byte is for GENERIC0 only)
*/
REG_SET_4(AFMT_GENERIC_HDR, 0,
AFMT_GENERIC_HB0, custom_sdp_message[0],
AFMT_GENERIC_HB1, custom_sdp_message[1],
AFMT_GENERIC_HB2, custom_sdp_message[2],
AFMT_GENERIC_HB3, custom_sdp_message[3]);
/* write generic packet contents
* (we never use last 4 bytes)
* there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
*/
{
const uint32_t *content =
(const uint32_t *) &custom_sdp_message[4];
REG_WRITE(AFMT_GENERIC_0, *content++);
REG_WRITE(AFMT_GENERIC_1, *content++);
REG_WRITE(AFMT_GENERIC_2, *content++);
REG_WRITE(AFMT_GENERIC_3, *content++);
REG_WRITE(AFMT_GENERIC_4, *content++);
REG_WRITE(AFMT_GENERIC_5, *content++);
REG_WRITE(AFMT_GENERIC_6, *content++);
REG_WRITE(AFMT_GENERIC_7, *content);
}
/* check whether GENERIC4 registers double buffer update in immediate mode
* is pending
*/
REG_WAIT(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING,
0, 10, max_retries);
/* atomically update double-buffered GENERIC4 registers in immediate mode
* (update immediately)
*/
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
/* enable GSP4 transmitting */
REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND, 1);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
* this master bit must also be set.
* This register shared with audio info frame.
* Therefore we need to enable master bit
* if at least on of the fields is not 0
*/
value = REG_READ(DP_SEC_CNTL);
if (value)
REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
}
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc)
{
/* stop generic packets on DP */
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t value = 0;
REG_SET_10(DP_SEC_CNTL, 0,
DP_SEC_GSP0_ENABLE, 0,
DP_SEC_GSP1_ENABLE, 0,
DP_SEC_GSP2_ENABLE, 0,
DP_SEC_GSP3_ENABLE, 0,
DP_SEC_GSP4_ENABLE, 0,
DP_SEC_GSP5_ENABLE, 0,
DP_SEC_GSP6_ENABLE, 0,
DP_SEC_GSP7_ENABLE, 0,
DP_SEC_MPG_ENABLE, 0,
DP_SEC_STREAM_ENABLE, 0);
/* this register shared with audio info frame.
* therefore we need to keep master enabled
* if at least one of the fields is not 0 */
value = REG_READ(DP_SEC_CNTL);
if (value)
REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
}
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
/* Note: For CZ, we are changing driver default to disable
* stream deferred to next VBLANK. If results are positive, we
* will make the same change to all DCE versions. There are a
* handful of panels that cannot handle disable stream at
* HBLANK and will result in a white line flash across the
* screen on stream disable.
*/
REG_GET(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, ®1);
if ((reg1 & 0x1) == 0)
/*stream not enabled*/
return;
/* Specify the video stream disable point
* (2 = start of the next vertical blank)
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
* 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode +
* a little more because we may not trust delay accuracy.
*/
max_retries = DP_BLANK_MAX_RETRY * 501;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
/* the encoder stops sending the video stream
* at the start of the vertical blanking.
* Poll for DP_VID_STREAM_STATUS == 0
*/
REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
0,
10, max_retries);
/* Tell the DP encoder to ignore timing from CRTC, must be done after
* the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
* complete, stream status will be stuck in video stream enabled state,
* i.e. DP_VID_STREAM_STATUS stuck at 1.
*/
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
}
/* output video stream to link encoder */
void enc1_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
uint32_t n_vid = 0x8000;
uint32_t m_vid;
uint32_t n_multiply = 0;
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (param->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
/*this param->pixel_clk_khz is half of 444 rate for 420 already*/
n_multiply = 1;
}
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
*/
m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
m_vid = (uint32_t) m_vid_l;
/* enable auto measurement */
REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
/* auto measurement need 1 full 0x8000 symbol cycle to kick in,
* therefore program initial value for Mvid and Nvid
*/
REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
REG_UPDATE_2(DP_VID_TIMING,
DP_VID_M_N_GEN_EN, 1,
DP_VID_N_MUL, n_multiply);
}
/* set DIG_START to 0x1 to resync FIFO */
REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
/* switch DP encoder to CRTC data */
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
udelay(100);
/* the hardware would start sending video at the start of the next DP
* frame (i.e. rising edge of the vblank).
* NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
* register has no effect on enable transition! HW always guarantees
* VID_STREAM enable at start of next frame, and this is not
* programmable
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
link->dc->link_srv->dp_trace_source_sequence(link,
DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
void enc1_stream_encoder_set_avmute(
struct stream_encoder *enc,
bool enable)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
unsigned int value = enable ? 1 : 0;
REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value);
}
void enc1_reset_hdmi_stream_attribute(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE_5(HDMI_CONTROL,
HDMI_PACKET_GEN_VERSION, 1,
HDMI_KEEPOUT_MODE, 1,
HDMI_DEEP_COLOR_ENABLE, 0,
HDMI_DATA_SCRAMBLE_EN, 0,
HDMI_CLOCK_CHANNEL_RATE, 0);
}
#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
#include "include/audio_types.h"
/* 25.2MHz/1.001*/
/* 25.2MHz/1.001*/
/* 25.2MHz*/
/* 27MHz */
/* 27MHz*1.001*/
/* 27MHz*1.001*/
/* 54MHz*/
/* 54MHz*1.001*/
/* 74.25MHz/1.001*/
/* 74.25MHz*/
/* 148.5MHz/1.001*/
/* 148.5MHz*/
static const struct audio_clock_info audio_clock_info_table[16] = {
{2517, 4576, 28125, 7007, 31250, 6864, 28125},
{2518, 4576, 28125, 7007, 31250, 6864, 28125},
{2520, 4096, 25200, 6272, 28000, 6144, 25200},
{2700, 4096, 27000, 6272, 30000, 6144, 27000},
{2702, 4096, 27027, 6272, 30030, 6144, 27027},
{2703, 4096, 27027, 6272, 30030, 6144, 27027},
{5400, 4096, 54000, 6272, 60000, 6144, 54000},
{5405, 4096, 54054, 6272, 60060, 6144, 54054},
{7417, 11648, 210937, 17836, 234375, 11648, 140625},
{7425, 4096, 74250, 6272, 82500, 6144, 74250},
{14835, 11648, 421875, 8918, 234375, 5824, 140625},
{14850, 4096, 148500, 6272, 165000, 6144, 148500},
{29670, 5824, 421875, 4459, 234375, 5824, 281250},
{29700, 3072, 222750, 4704, 247500, 5120, 247500},
{59340, 5824, 843750, 8918, 937500, 5824, 562500},
{59400, 3072, 445500, 9408, 990000, 6144, 594000}
};
static const struct audio_clock_info audio_clock_info_table_36bpc[14] = {
{2517, 9152, 84375, 7007, 48875, 9152, 56250},
{2518, 9152, 84375, 7007, 48875, 9152, 56250},
{2520, 4096, 37800, 6272, 42000, 6144, 37800},
{2700, 4096, 40500, 6272, 45000, 6144, 40500},
{2702, 8192, 81081, 6272, 45045, 8192, 54054},
{2703, 8192, 81081, 6272, 45045, 8192, 54054},
{5400, 4096, 81000, 6272, 90000, 6144, 81000},
{5405, 4096, 81081, 6272, 90090, 6144, 81081},
{7417, 11648, 316406, 17836, 351562, 11648, 210937},
{7425, 4096, 111375, 6272, 123750, 6144, 111375},
{14835, 11648, 632812, 17836, 703125, 11648, 421875},
{14850, 4096, 222750, 6272, 247500, 6144, 222750},
{29670, 5824, 632812, 8918, 703125, 5824, 421875},
{29700, 4096, 445500, 4704, 371250, 5120, 371250}
};
static const struct audio_clock_info audio_clock_info_table_48bpc[14] = {
{2517, 4576, 56250, 7007, 62500, 6864, 56250},
{2518, 4576, 56250, 7007, 62500, 6864, 56250},
{2520, 4096, 50400, 6272, 56000, 6144, 50400},
{2700, 4096, 54000, 6272, 60000, 6144, 54000},
{2702, 4096, 54054, 6267, 60060, 8192, 54054},
{2703, 4096, 54054, 6272, 60060, 8192, 54054},
{5400, 4096, 108000, 6272, 120000, 6144, 108000},
{5405, 4096, 108108, 6272, 120120, 6144, 108108},
{7417, 11648, 421875, 17836, 468750, 11648, 281250},
{7425, 4096, 148500, 6272, 165000, 6144, 148500},
{14835, 11648, 843750, 8918, 468750, 11648, 281250},
{14850, 4096, 297000, 6272, 330000, 6144, 297000},
{29670, 5824, 843750, 4459, 468750, 5824, 562500},
{29700, 3072, 445500, 4704, 495000, 5120, 495000}
};
static union audio_cea_channels speakers_to_channels(
struct audio_speaker_flags speaker_flags)
{
union audio_cea_channels cea_channels = {0};
/* these are one to one */
cea_channels.channels.FL = speaker_flags.FL_FR;
cea_channels.channels.FR = speaker_flags.FL_FR;
cea_channels.channels.LFE = speaker_flags.LFE;
cea_channels.channels.FC = speaker_flags.FC;
/* if Rear Left and Right exist move RC speaker to channel 7
* otherwise to channel 5
*/
if (speaker_flags.RL_RR) {
cea_channels.channels.RL_RC = speaker_flags.RL_RR;
cea_channels.channels.RR = speaker_flags.RL_RR;
cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
} else {
cea_channels.channels.RL_RC = speaker_flags.RC;
}
/* FRONT Left Right Center and REAR Left Right Center are exclusive */
if (speaker_flags.FLC_FRC) {
cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
} else {
cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
}
return cea_channels;
}
void get_audio_clock_info(
enum dc_color_depth color_depth,
uint32_t crtc_pixel_clock_100Hz,
uint32_t actual_pixel_clock_100Hz,
struct audio_clock_info *audio_clock_info)
{
const struct audio_clock_info *clock_info;
uint32_t index;
uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_100Hz / 100;
uint32_t audio_array_size;
switch (color_depth) {
case COLOR_DEPTH_161616:
clock_info = audio_clock_info_table_48bpc;
audio_array_size = ARRAY_SIZE(
audio_clock_info_table_48bpc);
break;
case COLOR_DEPTH_121212:
clock_info = audio_clock_info_table_36bpc;
audio_array_size = ARRAY_SIZE(
audio_clock_info_table_36bpc);
break;
default:
clock_info = audio_clock_info_table;
audio_array_size = ARRAY_SIZE(
audio_clock_info_table);
break;
}
if (clock_info != NULL) {
/* search for exact pixel clock in table */
for (index = 0; index < audio_array_size; index++) {
if (clock_info[index].pixel_clock_in_10khz >
crtc_pixel_clock_in_10khz)
break; /* not match */
else if (clock_info[index].pixel_clock_in_10khz ==
crtc_pixel_clock_in_10khz) {
/* match found */
*audio_clock_info = clock_info[index];
return;
}
}
}
/* not found */
if (actual_pixel_clock_100Hz == 0)
actual_pixel_clock_100Hz = crtc_pixel_clock_100Hz;
/* See HDMI spec the table entry under
* pixel clock of "Other". */
audio_clock_info->pixel_clock_in_10khz =
actual_pixel_clock_100Hz / 100;
audio_clock_info->cts_32khz = actual_pixel_clock_100Hz / 10;
audio_clock_info->cts_44khz = actual_pixel_clock_100Hz / 10;
audio_clock_info->cts_48khz = actual_pixel_clock_100Hz / 10;
audio_clock_info->n_32khz = 4096;
audio_clock_info->n_44khz = 6272;
audio_clock_info->n_48khz = 6144;
}
static void enc1_se_audio_setup(
struct stream_encoder *enc,
unsigned int az_inst,
struct audio_info *audio_info)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t channels = 0;
ASSERT(audio_info);
if (audio_info == NULL)
/* This should not happen.it does so we don't get BSOD*/
return;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
/* Channel allocation */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
}
static void enc1_se_setup_hdmi_audio(
struct stream_encoder *enc,
const struct audio_crtc_info *crtc_info)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct audio_clock_info audio_clock_info = {0};
/* HDMI_AUDIO_PACKET_CONTROL */
REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
HDMI_AUDIO_DELAY_EN, 1);
/* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
/* AFMT_AUDIO_PACKET_CONTROL2 */
REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
AFMT_AUDIO_LAYOUT_OVRD, 0,
AFMT_60958_OSF_OVRD, 0);
/* HDMI_ACR_PACKET_CONTROL */
REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
HDMI_ACR_AUTO_SEND, 1,
HDMI_ACR_SOURCE, 0,
HDMI_ACR_AUDIO_PRIORITY, 0);
/* Program audio clock sample/regeneration parameters */
get_audio_clock_info(crtc_info->color_depth,
crtc_info->requested_pixel_clock_100Hz,
crtc_info->calculated_pixel_clock_100Hz,
&audio_clock_info);
DC_LOG_HW_AUDIO(
"\n%s:Input::requested_pixel_clock_100Hz = %d" \
"calculated_pixel_clock_100Hz = %d \n", __func__, \
crtc_info->requested_pixel_clock_100Hz, \
crtc_info->calculated_pixel_clock_100Hz);
/* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
/* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
/* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
/* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
/* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
/* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
/* Video driver cannot know in advance which sample rate will
* be used by HD Audio driver
* HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
* programmed below in interruppt callback
*/
/* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK
*/
REG_UPDATE_2(AFMT_60958_0,
AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
AFMT_60958_CS_CLOCK_ACCURACY, 0);
/* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
/* AFMT_60958_2 now keep this settings until
* Programming guide comes out
*/
REG_UPDATE_6(AFMT_60958_2,
AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
}
static void enc1_se_setup_dp_audio(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
/* --- DP Audio packet configurations --- */
/* ATP Configuration */
REG_SET(DP_SEC_AUD_N, 0,
DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
/* Async/auto-calc timestamp mode */
REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
/* --- The following are the registers
* copied from the SetupHDMI ---
*/
/* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
/* AFMT_AUDIO_PACKET_CONTROL2 */
/* Program the ATP and AIP next */
REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
AFMT_AUDIO_LAYOUT_OVRD, 0,
AFMT_60958_OSF_OVRD, 0);
/* AFMT_INFOFRAME_CONTROL0 */
REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
/* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
}
void enc1_se_enable_audio_clock(
struct stream_encoder *enc,
bool enable)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (REG(AFMT_CNTL) == 0)
return; /* DCE8/10 does not have this register */
REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
/* wait for AFMT clock to turn on,
* expectation: this should complete in 1-2 reads
*
* REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
*
* TODO: wait for clock_on does not work well. May need HW
* program sequence. But audio seems work normally even without wait
* for clock_on status change
*/
}
void enc1_se_enable_dp_audio(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
/* Enable Audio packets */
REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
/* Program the ATP and AIP next */
REG_UPDATE_2(DP_SEC_CNTL,
DP_SEC_ATP_ENABLE, 1,
DP_SEC_AIP_ENABLE, 1);
/* Program STREAM_ENABLE after all the other enables. */
REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
}
static void enc1_se_disable_dp_audio(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t value = 0;
/* Disable Audio packets */
REG_UPDATE_5(DP_SEC_CNTL,
DP_SEC_ASP_ENABLE, 0,
DP_SEC_ATP_ENABLE, 0,
DP_SEC_AIP_ENABLE, 0,
DP_SEC_ACM_ENABLE, 0,
DP_SEC_STREAM_ENABLE, 0);
/* This register shared with encoder info frame. Therefore we need to
* keep master enabled if at least on of the fields is not 0
*/
value = REG_READ(DP_SEC_CNTL);
if (value != 0)
REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
}
void enc1_se_audio_mute_control(
struct stream_encoder *enc,
bool mute)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
}
void enc1_se_dp_audio_setup(
struct stream_encoder *enc,
unsigned int az_inst,
struct audio_info *info)
{
enc1_se_audio_setup(enc, az_inst, info);
}
void enc1_se_dp_audio_enable(
struct stream_encoder *enc)
{
enc1_se_enable_audio_clock(enc, true);
enc1_se_setup_dp_audio(enc);
enc1_se_enable_dp_audio(enc);
}
void enc1_se_dp_audio_disable(
struct stream_encoder *enc)
{
enc1_se_disable_dp_audio(enc);
enc1_se_enable_audio_clock(enc, false);
}
void enc1_se_hdmi_audio_setup(
struct stream_encoder *enc,
unsigned int az_inst,
struct audio_info *info,
struct audio_crtc_info *audio_crtc_info)
{
enc1_se_enable_audio_clock(enc, true);
enc1_se_setup_hdmi_audio(enc, audio_crtc_info);
enc1_se_audio_setup(enc, az_inst, info);
}
void enc1_se_hdmi_audio_disable(
struct stream_encoder *enc)
{
if (enc->afmt && enc->afmt->funcs->afmt_powerdown)
enc->afmt->funcs->afmt_powerdown(enc->afmt);
enc1_se_enable_audio_clock(enc, false);
}
void enc1_setup_stereo_sync(
struct stream_encoder *enc,
int tg_inst, bool enable)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst);
REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
}
void enc1_dig_connect_to_otg(
struct stream_encoder *enc,
int tg_inst)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst);
}
unsigned int enc1_dig_source_otg(
struct stream_encoder *enc)
{
uint32_t tg_inst = 0;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst);
return tg_inst;
}
bool enc1_stream_encoder_dp_get_pixel_format(
struct stream_encoder *enc,
enum dc_pixel_encoding *encoding,
enum dc_color_depth *depth)
{
uint32_t hw_encoding = 0;
uint32_t hw_depth = 0;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (enc == NULL ||
encoding == NULL ||
depth == NULL)
return false;
REG_GET_2(DP_PIXEL_FORMAT,
DP_PIXEL_ENCODING, &hw_encoding,
DP_COMPONENT_DEPTH, &hw_depth);
switch (hw_depth) {
case DP_COMPONENT_PIXEL_DEPTH_6BPC:
*depth = COLOR_DEPTH_666;
break;
case DP_COMPONENT_PIXEL_DEPTH_8BPC:
*depth = COLOR_DEPTH_888;
break;
case DP_COMPONENT_PIXEL_DEPTH_10BPC:
*depth = COLOR_DEPTH_101010;
break;
case DP_COMPONENT_PIXEL_DEPTH_12BPC:
*depth = COLOR_DEPTH_121212;
break;
case DP_COMPONENT_PIXEL_DEPTH_16BPC:
*depth = COLOR_DEPTH_161616;
break;
default:
*depth = COLOR_DEPTH_UNDEFINED;
break;
}
switch (hw_encoding) {
case DP_PIXEL_ENCODING_TYPE_RGB444:
*encoding = PIXEL_ENCODING_RGB;
break;
case DP_PIXEL_ENCODING_TYPE_YCBCR422:
*encoding = PIXEL_ENCODING_YCBCR422;
break;
case DP_PIXEL_ENCODING_TYPE_YCBCR444:
case DP_PIXEL_ENCODING_TYPE_Y_ONLY:
*encoding = PIXEL_ENCODING_YCBCR444;
break;
case DP_PIXEL_ENCODING_TYPE_YCBCR420:
*encoding = PIXEL_ENCODING_YCBCR420;
break;
default:
*encoding = PIXEL_ENCODING_UNDEFINED;
break;
}
return true;
}
static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dp_set_stream_attribute =
enc1_stream_encoder_dp_set_stream_attribute,
.hdmi_set_stream_attribute =
enc1_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
enc1_stream_encoder_dvi_set_stream_attribute,
.set_throttled_vcp_size =
enc1_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
enc1_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc1_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets =
enc1_stream_encoder_update_dp_info_packets,
.send_immediate_sdp_message =
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
enc1_stream_encoder_dp_unblank,
.audio_mute_control = enc1_se_audio_mute_control,
.dp_audio_setup = enc1_se_dp_audio_setup,
.dp_audio_enable = enc1_se_dp_audio_enable,
.dp_audio_disable = enc1_se_dp_audio_disable,
.hdmi_audio_setup = enc1_se_hdmi_audio_setup,
.hdmi_audio_disable = enc1_se_hdmi_audio_disable,
.setup_stereo_sync = enc1_setup_stereo_sync,
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.dig_source_otg = enc1_dig_source_otg,
.dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
};
void dcn10_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
struct dc_bios *bp,
enum engine_id eng_id,
const struct dcn10_stream_enc_registers *regs,
const struct dcn10_stream_encoder_shift *se_shift,
const struct dcn10_stream_encoder_mask *se_mask)
{
enc1->base.funcs = &dcn10_str_enc_funcs;
enc1->base.ctx = ctx;
enc1->base.id = eng_id;
enc1->base.bp = bp;
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn10_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
#define VERT_MAX_TAPS 8
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
enum pixel_format_description {
PIXEL_FORMAT_FIXED = 0,
PIXEL_FORMAT_FIXED16,
PIXEL_FORMAT_FLOAT
};
enum dcn10_coef_filter_type_sel {
SCL_COEF_LUMA_VERT_FILTER = 0,
SCL_COEF_LUMA_HORZ_FILTER = 1,
SCL_COEF_CHROMA_VERT_FILTER = 2,
SCL_COEF_CHROMA_HORZ_FILTER = 3,
SCL_COEF_ALPHA_VERT_FILTER = 4,
SCL_COEF_ALPHA_HORZ_FILTER = 5
};
enum dscl_autocal_mode {
AUTOCAL_MODE_OFF = 0,
/* Autocal calculate the scaling ratio and initial phase and the
* DSCL_MODE_SEL must be set to 1
*/
AUTOCAL_MODE_AUTOSCALE = 1,
/* Autocal perform auto centering without replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOCENTER = 2,
/* Autocal perform auto centering and auto replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOREPLICATE = 3
};
enum dscl_mode_sel {
DSCL_MODE_SCALING_444_BYPASS = 0,
DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
DSCL_MODE_DSCL_BYPASS = 6
};
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_GET(DPP_CONTROL,
DPP_CLOCK_ENABLE, &s->is_enabled);
REG_GET(CM_IGAM_CONTROL,
CM_IGAM_LUT_MODE, &s->igam_lut_mode);
REG_GET(CM_IGAM_CONTROL,
CM_IGAM_INPUT_FORMAT, &s->igam_input_format);
REG_GET(CM_DGAM_CONTROL,
CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
REG_GET(CM_RGAM_CONTROL,
CM_RGAM_LUT_MODE, &s->rgam_lut_mode);
REG_GET(CM_GAMUT_REMAP_CONTROL,
CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
if (s->gamut_remap_mode) {
s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
}
}
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
bool dpp1_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->format == PIXEL_FORMAT_FP16 &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
scl_data->ratios.horz.value != dc_fixpt_one.value &&
scl_data->ratios.vert.value != dc_fixpt_one.value)
return false;
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
return false;
/* TODO: add lb check */
/* No support for programming ratio of 4, drop to 3.99999.. */
if (scl_data->ratios.horz.value == (4ll << 32))
scl_data->ratios.horz.value--;
if (scl_data->ratios.vert.value == (4ll << 32))
scl_data->ratios.vert.value--;
if (scl_data->ratios.horz_c.value == (4ll << 32))
scl_data->ratios.horz_c.value--;
if (scl_data->ratios.vert_c.value == (4ll << 32))
scl_data->ratios.vert_c.value--;
/* Set default taps if none are provided */
if (in_taps->h_taps == 0)
scl_data->taps.h_taps = 4;
else
scl_data->taps.h_taps = in_taps->h_taps;
if (in_taps->v_taps == 0)
scl_data->taps.v_taps = 4;
else
scl_data->taps.v_taps = in_taps->v_taps;
if (in_taps->v_taps_c == 0)
scl_data->taps.v_taps_c = 2;
else
scl_data->taps.v_taps_c = in_taps->v_taps_c;
if (in_taps->h_taps_c == 0)
scl_data->taps.h_taps_c = 2;
/* Only 1 and even h_taps_c are supported by hw */
else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
else
scl_data->taps.h_taps_c = in_taps->h_taps_c;
if (!dpp->ctx->dc->debug.always_scale) {
if (IDENTITY_RATIO(scl_data->ratios.horz))
scl_data->taps.h_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert))
scl_data->taps.v_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
scl_data->taps.h_taps_c = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
scl_data->taps.v_taps_c = 1;
}
return true;
}
void dpp_reset(struct dpp *dpp_base)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
dpp->filter_h_c = NULL;
dpp->filter_v_c = NULL;
dpp->filter_h = NULL;
dpp->filter_v = NULL;
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
}
static void dpp1_cm_set_regamma_pwl(
struct dpp *dpp_base, const struct pwl_params *params, enum opp_regamma mode)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
uint32_t re_mode = 0;
switch (mode) {
case OPP_REGAMMA_BYPASS:
re_mode = 0;
break;
case OPP_REGAMMA_SRGB:
re_mode = 1;
break;
case OPP_REGAMMA_XVYCC:
re_mode = 2;
break;
case OPP_REGAMMA_USER:
re_mode = dpp->is_write_to_ram_a_safe ? 4 : 3;
if (memcmp(&dpp->pwl_data, params, sizeof(*params)) == 0)
break;
dpp1_cm_power_on_regamma_lut(dpp_base, true);
dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe);
if (dpp->is_write_to_ram_a_safe)
dpp1_cm_program_regamma_luta_settings(dpp_base, params);
else
dpp1_cm_program_regamma_lutb_settings(dpp_base, params);
dpp1_cm_program_regamma_lut(dpp_base, params->rgb_resulted,
params->hw_points_num);
dpp->pwl_data = *params;
re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4;
dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe;
break;
default:
break;
}
REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode);
}
static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\
enum pixel_format_description *fmt)
{
if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F ||
input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F)
*fmt = PIXEL_FORMAT_FLOAT;
else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ||
input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616)
*fmt = PIXEL_FORMAT_FIXED16;
else
*fmt = PIXEL_FORMAT_FIXED;
}
static void dpp1_set_degamma_format_float(
struct dpp *dpp_base,
bool is_float)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (is_float) {
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 3);
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 1);
} else {
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 2);
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 0);
}
}
void dpp1_cnv_setup (
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut)
{
uint32_t pixel_format;
uint32_t alpha_en;
enum pixel_format_description fmt ;
enum dc_color_space color_space;
enum dcn10_input_csc_select select;
bool is_float;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
bool force_disable_cursor = false;
struct out_csc_color_matrix tbl_entry;
int i = 0;
dpp1_setup_format_flags(format, &fmt);
alpha_en = 1;
pixel_format = 0;
color_space = COLOR_SPACE_SRGB;
select = INPUT_CSC_SELECT_BYPASS;
is_float = false;
switch (fmt) {
case PIXEL_FORMAT_FIXED:
case PIXEL_FORMAT_FIXED16:
/*when output is float then FORMAT_CONTROL__OUTPUT_FP=1*/
REG_SET_3(FORMAT_CONTROL, 0,
CNVC_BYPASS, 0,
FORMAT_EXPANSION_MODE, mode,
OUTPUT_FP, 0);
break;
case PIXEL_FORMAT_FLOAT:
REG_SET_3(FORMAT_CONTROL, 0,
CNVC_BYPASS, 0,
FORMAT_EXPANSION_MODE, mode,
OUTPUT_FP, 1);
is_float = true;
break;
default:
break;
}
dpp1_set_degamma_format_float(dpp_base, is_float);
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
pixel_format = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
pixel_format = 3;
alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
pixel_format = 8;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
pixel_format = 10;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
force_disable_cursor = false;
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
force_disable_cursor = true;
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
force_disable_cursor = true;
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
force_disable_cursor = true;
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
pixel_format = 24;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
pixel_format = 25;
break;
default:
break;
}
/* Set default color space based on format if none is given. */
color_space = input_color_space ? input_color_space : color_space;
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
// if input adjustments exist, program icsc with those values
if (input_csc_color_matrix.enable_adjustment
== true) {
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
tbl_entry.color_space = color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = INPUT_CSC_SELECT_ICSC;
else
select = INPUT_CSC_SELECT_BYPASS;
dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry);
} else
dpp1_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, 0);
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, 0);
}
}
void dpp1_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes)
{
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_UPDATE_2(CURSOR0_CONTROL,
CUR0_MODE, color_format,
CUR0_EXPANSION_MODE, 0);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
REG_UPDATE(CURSOR0_COLOR0,
CUR0_COLOR0, 0x00000000);
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
}
void dpp1_set_cursor_position(
struct dpp *dpp_base,
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param,
uint32_t width,
uint32_t height)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
int x_pos = pos->x - param->viewport.x;
int y_pos = pos->y - param->viewport.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
int src_x_offset = x_pos - pos->x_hotspot;
int src_y_offset = y_pos - pos->y_hotspot;
int cursor_height = (int)height;
int cursor_width = (int)width;
uint32_t cur_en = pos->enable ? 1 : 0;
// Transform cursor width / height and hotspots for offset calculations
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
swap(cursor_height, cursor_width);
swap(x_hotspot, y_hotspot);
if (param->rotation == ROTATION_ANGLE_90) {
// hotspot = (-y, x)
src_x_offset = x_pos - (cursor_width - x_hotspot);
src_y_offset = y_pos - y_hotspot;
} else if (param->rotation == ROTATION_ANGLE_270) {
// hotspot = (y, -x)
src_x_offset = x_pos - x_hotspot;
src_y_offset = y_pos - (cursor_height - y_hotspot);
}
} else if (param->rotation == ROTATION_ANGLE_180) {
// hotspot = (-x, -y)
if (!param->mirror)
src_x_offset = x_pos - (cursor_width - x_hotspot);
src_y_offset = y_pos - (cursor_height - y_hotspot);
}
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + cursor_width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, cur_en);
dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
struct dpp *dpp_base,
struct dpp_cursor_attributes *attr)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (attr) {
REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
}
}
void dpp1_dppclk_control(
struct dpp *dpp_base,
bool dppclk_div,
bool enable)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (enable) {
if (dpp->tf_mask->DPPCLK_RATE_CONTROL)
REG_UPDATE_2(DPP_CONTROL,
DPPCLK_RATE_CONTROL, dppclk_div,
DPP_CLOCK_ENABLE, 1);
else
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1);
} else
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
}
static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
.dpp_set_csc_default = dpp1_cm_set_output_csc_default,
.dpp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut,
.dpp_program_regamma_lut = dpp1_cm_program_regamma_lut,
.dpp_configure_regamma_lut = dpp1_cm_configure_regamma_lut,
.dpp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings,
.dpp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings,
.dpp_program_regamma_pwl = dpp1_cm_set_regamma_pwl,
.dpp_program_bias_and_scale = dpp1_program_bias_and_scale,
.dpp_set_degamma = dpp1_set_degamma,
.dpp_program_input_lut = dpp1_program_input_lut,
.dpp_program_degamma_pwl = dpp1_set_degamma_pwl,
.dpp_setup = dpp1_cnv_setup,
.dpp_full_bypass = dpp1_full_bypass,
.set_cursor_attributes = dpp1_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
.dpp_program_blnd_lut = NULL,
.dpp_program_shaper_lut = NULL,
.dpp_program_3dlut = NULL
};
static struct dpp_caps dcn10_dpp_cap = {
.dscl_data_proc_format = DSCL_DATA_PRCESSING_FIXED_FORMAT,
.dscl_calc_lb_num_partitions = dpp1_dscl_calc_lb_num_partitions,
};
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
void dpp1_construct(
struct dcn10_dpp *dpp,
struct dc_context *ctx,
uint32_t inst,
const struct dcn_dpp_registers *tf_regs,
const struct dcn_dpp_shift *tf_shift,
const struct dcn_dpp_mask *tf_mask)
{
dpp->base.ctx = ctx;
dpp->base.inst = inst;
dpp->base.funcs = &dcn10_dpp_funcs;
dpp->base.caps = &dcn10_dpp_cap;
dpp->tf_regs = tf_regs;
dpp->tf_shift = tf_shift;
dpp->tf_mask = tf_mask;
dpp->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
LB_PIXEL_DEPTH_30BPP |
LB_PIXEL_DEPTH_36BPP;
dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c |
/*
* Copyright 2016-2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "hw_sequencer_private.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10_hw_sequencer.h"
#include "dcn20/dcn20_hwseq.h"
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn10_init_hw,
.power_down_on_boot = dcn10_power_down_on_boot,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
.post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
.program_output_csc = dcn10_program_output_csc,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
.update_info_frame = dce110_update_info_frame,
.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
.unblank_stream = dcn10_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn10_pipe_control_lock,
.cursor_lock = dcn10_cursor_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.power_down = dce110_power_down,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
.disable_link_output = dce110_disable_link_output,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
.init_pipes = dcn10_init_pipes,
.update_plane_addr = dcn10_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.program_pipe = dcn10_program_pipe,
.update_mpcc = dcn10_update_mpcc,
.set_input_transfer_func = dcn10_set_input_transfer_func,
.set_output_transfer_func = dcn10_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn10_blank_pixel_data,
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = NULL,
.disable_vga = dcn10_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn10_plane_atomic_disable,
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn10_enable_power_gating_plane,
.dpp_pg_control = dcn10_dpp_pg_control,
.hubp_pg_control = dcn10_hubp_pg_control,
.dsc_pg_control = NULL,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
};
void dcn10_hw_sequencer_construct(struct dc *dc)
{
dc->hwss = dcn10_funcs;
dc->hwseq->funcs = dcn10_private_funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "resource.h"
#include "custom_float.h"
#include "dcn10_hw_sequencer.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
#include "dcn10_optc.h"
#include "dcn10/dcn10_dpp.h"
#include "dcn10/dcn10_mpc.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
#include "mpc.h"
#include "reg_helper.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
#include "clk_mgr.h"
__printf(3, 4)
unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...)
{
int ret_vsnprintf;
unsigned int chars_printed;
va_list args;
va_start(args, fmt);
ret_vsnprintf = vsnprintf(pbuf, bufsize, fmt, args);
va_end(args);
if (ret_vsnprintf > 0) {
if (ret_vsnprintf < bufsize)
chars_printed = ret_vsnprintf;
else
chars_printed = bufsize - 1;
} else
chars_printed = 0;
return chars_printed;
}
static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned int bufSize)
{
struct dc_context *dc_ctx = dc->ctx;
struct dcn_hubbub_wm wm;
int i;
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_change\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
for (i = 0; i < 4; i++) {
struct dcn_hubbub_wm_set *s;
s = &wm.sets[i];
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d.%03d,%d.%03d,%d.%03d,%d.%03d,%d.%03d\n",
s->wm_set,
(s->data_urgent * frac) / ref_clk_mhz / frac, (s->data_urgent * frac) / ref_clk_mhz % frac,
(s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / ref_clk_mhz % frac,
(s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % frac,
(s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % frac,
(s->dram_clk_change * frac) / ref_clk_mhz / frac, (s->dram_clk_change * frac) / ref_clk_mhz % frac);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
return bufSize - remaining_buffer;
}
static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned int bufSize, bool invarOnly)
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
int i;
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
if (invarOnly)
chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow,"
"min_ttu_vblank,qos_low_wm,qos_high_wm"
"\n");
else
chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,addr_lo,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow,"
"min_ttu_vblank,qos_low_wm,qos_high_wm"
"\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
for (i = 0; i < pool->pipe_count; i++) {
struct hubp *hubp = pool->hubps[i];
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
hubp->funcs->hubp_read_state(hubp);
if (!s->blank_en) {
if (invarOnly)
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x,"
"%d.%03d,%d.%03d,%d.%03d"
"\n",
hubp->inst,
s->pixel_format,
s->inuse_addr_hi,
s->viewport_width,
s->viewport_height,
s->rotation_angle,
s->h_mirror_en,
s->sw_mode,
s->dcc_en,
s->blank_en,
s->ttu_disable,
s->underflow_status,
(s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac,
(s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac,
(s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac);
else
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x,"
"%d.%03d,%d.%03d,%d.%03d"
"\n",
hubp->inst,
s->pixel_format,
s->inuse_addr_hi,
s->inuse_addr_lo,
s->viewport_width,
s->viewport_height,
s->rotation_angle,
s->h_mirror_en,
s->sw_mode,
s->dcc_en,
s->blank_en,
s->ttu_disable,
s->underflow_status,
(s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac,
(s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac,
(s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
}
return bufSize - remaining_buffer;
}
static unsigned int dcn10_get_rq_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
struct resource_pool *pool = dc->res_pool;
int i;
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,drq_exp_m,prq_exp_m,mrq_exp_m,crq_exp_m,plane1_ba,"
"luma_chunk_s,luma_min_chu_s,luma_meta_ch_s,luma_min_m_c_s,luma_dpte_gr_s,luma_mpte_gr_s,luma_swath_hei,luma_pte_row_h,"
"chroma_chunk_s,chroma_min_chu_s,chroma_meta_ch_s,chroma_min_m_c_s,chroma_dpte_gr_s,chroma_mpte_gr_s,chroma_swath_hei,chroma_pte_row_h"
"\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
if (!s->blank_en) {
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x,%x,%x"
"\n",
pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
}
return bufSize - remaining_buffer;
}
static unsigned int dcn10_get_dlg_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
struct resource_pool *pool = dc->res_pool;
int i;
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,rc_hbe,dlg_vbe,min_d_y_n,rc_per_ht,rc_x_a_s,"
"dst_y_a_s,dst_y_pf,dst_y_vvb,dst_y_rvb,dst_y_vfl,dst_y_rfl,rf_pix_fq,"
"vratio_pf,vrat_pf_c,rc_pg_vbl,rc_pg_vbc,rc_mc_vbl,rc_mc_vbc,rc_pg_fll,"
"rc_pg_flc,rc_mc_fll,rc_mc_flc,pr_nom_l,pr_nom_c,rc_pg_nl,rc_pg_nc,"
"mr_nom_l,mr_nom_c,rc_mc_nl,rc_mc_nc,rc_ld_pl,rc_ld_pc,rc_ld_l,"
"rc_ld_c,cha_cur0,ofst_cur1,cha_cur1,vr_af_vc0,ddrq_limt,x_rt_dlay,x_rp_dlay,x_rr_sfl"
"\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
if (!s->blank_en) {
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x,%x,%x,%x,%x"
"\n",
pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
dlg_regs->xfc_reg_remote_surface_flip_latency);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
}
return bufSize - remaining_buffer;
}
static unsigned int dcn10_get_ttu_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
struct resource_pool *pool = dc->res_pool;
int i;
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,qos_ll_wm,qos_lh_wm,mn_ttu_vb,qos_l_flp,rc_rd_p_l,rc_rd_l,rc_rd_p_c,"
"rc_rd_c,rc_rd_c0,rc_rd_pc0,rc_rd_c1,rc_rd_pc1,qos_lf_l,qos_rds_l,"
"qos_lf_c,qos_rds_c,qos_lf_c0,qos_rds_c0,qos_lf_c1,qos_rds_c1"
"\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
if (!s->blank_en) {
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x,%x,"
"%x,%x,%x,%x,%x,%x"
"\n",
pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
}
return bufSize - remaining_buffer;
}
static unsigned int dcn10_get_cm_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
struct resource_pool *pool = dc->res_pool;
int i;
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,igam_format,igam_mode,dgam_mode,rgam_mode,gamut_mode,"
"c11_c12,c13_c14,c21_c22,c23_c24,c31_c32,c33_c34"
"\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
for (i = 0; i < pool->pipe_count; i++) {
struct dpp *dpp = pool->dpps[i];
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
if (s.is_enabled) {
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,"
"%s,%s,%s,"
"%x,%08x,%08x,%08x,%08x,%08x,%08x"
"\n",
dpp->inst, s.igam_input_format,
(s.igam_lut_mode == 0) ? "BypassFixed" :
((s.igam_lut_mode == 1) ? "BypassFloat" :
((s.igam_lut_mode == 2) ? "RAM" :
((s.igam_lut_mode == 3) ? "RAM" :
"Unknown"))),
(s.dgam_lut_mode == 0) ? "Bypass" :
((s.dgam_lut_mode == 1) ? "sRGB" :
((s.dgam_lut_mode == 2) ? "Ycc" :
((s.dgam_lut_mode == 3) ? "RAM" :
((s.dgam_lut_mode == 4) ? "RAM" :
"Unknown")))),
(s.rgam_lut_mode == 0) ? "Bypass" :
((s.rgam_lut_mode == 1) ? "sRGB" :
((s.rgam_lut_mode == 2) ? "Ycc" :
((s.rgam_lut_mode == 3) ? "RAM" :
((s.rgam_lut_mode == 4) ? "RAM" :
"Unknown")))),
s.gamut_remap_mode, s.gamut_remap_c11_c12,
s.gamut_remap_c13_c14, s.gamut_remap_c21_c22, s.gamut_remap_c23_c24,
s.gamut_remap_c31_c32, s.gamut_remap_c33_c34);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
}
return bufSize - remaining_buffer;
}
static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
struct resource_pool *pool = dc->res_pool;
int i;
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,opp,dpp,mpccbot,mode,alpha_mode,premult,overlap_only,idle\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
for (i = 0; i < pool->pipe_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
if (s.opp_id != 0xf) {
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,%x,%x,%x\n",
i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
s.idle);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
}
return bufSize - remaining_buffer;
}
static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
struct resource_pool *pool = dc->res_pool;
int i;
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,v_bs,v_be,v_ss,v_se,vpol,vmax,vmin,vmax_sel,vmin_sel,"
"h_bs,h_be,h_ss,h_se,hpol,htot,vtot,underflow,pixelclk[khz]\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
for (i = 0; i < pool->timing_generator_count; i++) {
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
int pix_clk = 0;
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
pix_clk = dc->current_state->res_ctx.pipe_ctx[i].stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
//only print if OTG master is enabled
if (s.otg_enabled & 1) {
chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d,%d,%d,%d,%d,%d,%d,%d,%d,"
"%d,%d,%d,%d,%d,%d,%d,%d,%d"
"\n",
tg->inst,
s.v_blank_start,
s.v_blank_end,
s.v_sync_a_start,
s.v_sync_a_end,
s.v_sync_a_pol,
s.v_total_max,
s.v_total_min,
s.v_total_max_sel,
s.v_total_min_sel,
s.h_blank_start,
s.h_blank_end,
s.h_sync_a_start,
s.h_sync_a_end,
s.h_sync_a_pol,
s.h_total,
s.v_total,
s.underflow_occurred_status,
pix_clk);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
}
return bufSize - remaining_buffer;
}
static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
"dppclk,fclk,socclk\n"
"%d,%d,%d,%d,%d,%d\n",
dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
return bufSize - remaining_buffer;
}
static void dcn10_clear_otpc_underflow(struct dc *dc)
{
struct resource_pool *pool = dc->res_pool;
int i;
for (i = 0; i < pool->timing_generator_count; i++) {
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
if (s.otg_enabled & 1)
tg->funcs->clear_optc_underflow(tg);
}
}
static void dcn10_clear_hubp_underflow(struct dc *dc)
{
struct resource_pool *pool = dc->res_pool;
int i;
for (i = 0; i < pool->pipe_count; i++) {
struct hubp *hubp = pool->hubps[i];
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
hubp->funcs->hubp_read_state(hubp);
if (!s->blank_en)
hubp->funcs->hubp_clear_underflow(hubp);
}
}
void dcn10_clear_status_bits(struct dc *dc, unsigned int mask)
{
/*
* Mask Format
* Bit 0 - 31: Status bit to clear
*
* Mask = 0x0 means clear all status bits
*/
const unsigned int DC_HW_STATE_MASK_HUBP_UNDERFLOW = 0x1;
const unsigned int DC_HW_STATE_MASK_OTPC_UNDERFLOW = 0x2;
if (mask == 0x0)
mask = 0xFFFFFFFF;
if (mask & DC_HW_STATE_MASK_HUBP_UNDERFLOW)
dcn10_clear_hubp_underflow(dc);
if (mask & DC_HW_STATE_MASK_OTPC_UNDERFLOW)
dcn10_clear_otpc_underflow(dc);
}
void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask)
{
/*
* Mask Format
* Bit 0 - 15: Hardware block mask
* Bit 15: 1 = Invariant Only, 0 = All
*/
const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
const unsigned int DC_HW_STATE_MASK_CM = 0x20;
const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
unsigned int chars_printed = 0;
unsigned int remaining_buf_size = bufSize;
if (mask == 0x0)
mask = 0xFFFF; // Default, capture all, invariant only
if ((mask & DC_HW_STATE_MASK_HUBBUB) && remaining_buf_size > 0) {
chars_printed = dcn10_get_hubbub_state(dc, pBuf, remaining_buf_size);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
if ((mask & DC_HW_STATE_MASK_HUBP) && remaining_buf_size > 0) {
chars_printed = dcn10_get_hubp_states(dc, pBuf, remaining_buf_size, mask & DC_HW_STATE_INVAR_ONLY);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
if ((mask & DC_HW_STATE_MASK_RQ) && remaining_buf_size > 0) {
chars_printed = dcn10_get_rq_states(dc, pBuf, remaining_buf_size);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
if ((mask & DC_HW_STATE_MASK_DLG) && remaining_buf_size > 0) {
chars_printed = dcn10_get_dlg_states(dc, pBuf, remaining_buf_size);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
if ((mask & DC_HW_STATE_MASK_TTU) && remaining_buf_size > 0) {
chars_printed = dcn10_get_ttu_states(dc, pBuf, remaining_buf_size);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
if ((mask & DC_HW_STATE_MASK_CM) && remaining_buf_size > 0) {
chars_printed = dcn10_get_cm_states(dc, pBuf, remaining_buf_size);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
if ((mask & DC_HW_STATE_MASK_MPCC) && remaining_buf_size > 0) {
chars_printed = dcn10_get_mpcc_states(dc, pBuf, remaining_buf_size);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
if ((mask & DC_HW_STATE_MASK_OTG) && remaining_buf_size > 0) {
chars_printed = dcn10_get_otg_states(dc, pBuf, remaining_buf_size);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) {
chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size);
pBuf += chars_printed;
remaining_buf_size -= chars_printed;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dcn10_ipp.h"
#include "reg_helper.h"
#define REG(reg) \
(ippn10->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
ippn10->ipp_shift->field_name, ippn10->ipp_mask->field_name
#define CTX \
ippn10->base.ctx
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
static void dcn10_ipp_destroy(struct input_pixel_processor **ipp)
{
kfree(TO_DCN10_IPP(*ipp));
*ipp = NULL;
}
static const struct ipp_funcs dcn10_ipp_funcs = {
.ipp_destroy = dcn10_ipp_destroy
};
static const struct ipp_funcs dcn20_ipp_funcs = {
.ipp_destroy = dcn10_ipp_destroy
};
void dcn10_ipp_construct(
struct dcn10_ipp *ippn10,
struct dc_context *ctx,
int inst,
const struct dcn10_ipp_registers *regs,
const struct dcn10_ipp_shift *ipp_shift,
const struct dcn10_ipp_mask *ipp_mask)
{
ippn10->base.ctx = ctx;
ippn10->base.inst = inst;
ippn10->base.funcs = &dcn10_ipp_funcs;
ippn10->regs = regs;
ippn10->ipp_shift = ipp_shift;
ippn10->ipp_mask = ipp_mask;
}
void dcn20_ipp_construct(
struct dcn10_ipp *ippn10,
struct dc_context *ctx,
int inst,
const struct dcn10_ipp_registers *regs,
const struct dcn10_ipp_shift *ipp_shift,
const struct dcn10_ipp_mask *ipp_mask)
{
ippn10->base.ctx = ctx;
ippn10->base.inst = inst;
ippn10->base.funcs = &dcn20_ipp_funcs;
ippn10->regs = regs;
ippn10->ipp_shift = ipp_shift;
ippn10->ipp_mask = ipp_mask;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c |
/*
* Copyright 2012-17 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "resource.h"
#include "dwb.h"
#include "dcn10_dwb.h"
#define REG(reg)\
dwbc10->dwbc_regs->reg
#define CTX \
dwbc10->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dwbc10->dwbc_shift->field_name, dwbc10->dwbc_mask->field_name
#define TO_DCN10_DWBC(dwbc_base) \
container_of(dwbc_base, struct dcn10_dwbc, base)
static bool dwb1_get_caps(struct dwbc *dwbc, struct dwb_caps *caps)
{
if (caps) {
caps->adapter_id = 0; /* we only support 1 adapter currently */
caps->hw_version = DCN_VERSION_1_0;
caps->num_pipes = 2;
memset(&caps->reserved, 0, sizeof(caps->reserved));
memset(&caps->reserved2, 0, sizeof(caps->reserved2));
caps->sw_version = dwb_ver_1_0;
caps->caps.support_dwb = true;
caps->caps.support_ogam = false;
caps->caps.support_wbscl = true;
caps->caps.support_ocsc = false;
return true;
} else {
return false;
}
}
static bool dwb1_enable(struct dwbc *dwbc, struct dc_dwb_params *params)
{
struct dcn10_dwbc *dwbc10 = TO_DCN10_DWBC(dwbc);
/* disable first. */
dwbc->funcs->disable(dwbc);
/* disable power gating */
REG_UPDATE_5(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, 1,
DISPCLK_G_WB_GATE_DIS, 1, DISPCLK_G_WBSCL_GATE_DIS, 1,
WB_LB_LS_DIS, 1, WB_LUT_LS_DIS, 1);
REG_UPDATE(WB_ENABLE, WB_ENABLE, 1);
return true;
}
static bool dwb1_disable(struct dwbc *dwbc)
{
struct dcn10_dwbc *dwbc10 = TO_DCN10_DWBC(dwbc);
/* disable CNV */
REG_UPDATE(CNV_MODE, CNV_FRAME_CAPTURE_EN, 0);
/* disable WB */
REG_UPDATE(WB_ENABLE, WB_ENABLE, 0);
/* soft reset */
REG_UPDATE(WB_SOFT_RESET, WB_SOFT_RESET, 1);
REG_UPDATE(WB_SOFT_RESET, WB_SOFT_RESET, 0);
/* enable power gating */
REG_UPDATE_5(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, 0,
DISPCLK_G_WB_GATE_DIS, 0, DISPCLK_G_WBSCL_GATE_DIS, 0,
WB_LB_LS_DIS, 0, WB_LUT_LS_DIS, 0);
return true;
}
const struct dwbc_funcs dcn10_dwbc_funcs = {
.get_caps = dwb1_get_caps,
.enable = dwb1_enable,
.disable = dwb1_disable,
.update = NULL,
.set_stereo = NULL,
.set_new_content = NULL,
.set_warmup = NULL,
.dwb_set_scaler = NULL,
};
void dcn10_dwbc_construct(struct dcn10_dwbc *dwbc10,
struct dc_context *ctx,
const struct dcn10_dwbc_registers *dwbc_regs,
const struct dcn10_dwbc_shift *dwbc_shift,
const struct dcn10_dwbc_mask *dwbc_mask,
int inst)
{
dwbc10->base.ctx = ctx;
dwbc10->base.inst = inst;
dwbc10->base.funcs = &dcn10_dwbc_funcs;
dwbc10->dwbc_regs = dwbc_regs;
dwbc10->dwbc_shift = dwbc_shift;
dwbc10->dwbc_mask = dwbc_mask;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dcn10_opp.h"
#include "reg_helper.h"
#define REG(reg) \
(oppn10->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
oppn10->opp_shift->field_name, oppn10->opp_mask->field_name
#define CTX \
oppn10->base.ctx
/**
* opp1_set_truncation():
* 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
* 2) enable truncation
* 3) HW remove 12bit FMT support for DCE11 power saving reason.
*
* @oppn10: output_pixel_processor struct instance for dcn10.
* @params: pointer to bit_depth_reduction_params.
*/
static void opp1_set_truncation(
struct dcn10_opp *oppn10,
const struct bit_depth_reduction_params *params)
{
REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
FMT_TRUNCATE_EN, params->flags.TRUNCATE_ENABLED,
FMT_TRUNCATE_DEPTH, params->flags.TRUNCATE_DEPTH,
FMT_TRUNCATE_MODE, params->flags.TRUNCATE_MODE);
}
static void opp1_set_spatial_dither(
struct dcn10_opp *oppn10,
const struct bit_depth_reduction_params *params)
{
/*Disable spatial (random) dithering*/
REG_UPDATE_7(FMT_BIT_DEPTH_CONTROL,
FMT_SPATIAL_DITHER_EN, 0,
FMT_SPATIAL_DITHER_MODE, 0,
FMT_SPATIAL_DITHER_DEPTH, 0,
FMT_TEMPORAL_DITHER_EN, 0,
FMT_HIGHPASS_RANDOM_ENABLE, 0,
FMT_FRAME_RANDOM_ENABLE, 0,
FMT_RGB_RANDOM_ENABLE, 0);
/* only use FRAME_COUNTER_MAX if frameRandom == 1*/
if (params->flags.FRAME_RANDOM == 1) {
if (params->flags.SPATIAL_DITHER_DEPTH == 0 || params->flags.SPATIAL_DITHER_DEPTH == 1) {
REG_UPDATE_2(FMT_CONTROL,
FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15,
FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2);
} else if (params->flags.SPATIAL_DITHER_DEPTH == 2) {
REG_UPDATE_2(FMT_CONTROL,
FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3,
FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1);
} else {
return;
}
} else {
REG_UPDATE_2(FMT_CONTROL,
FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0,
FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0);
}
/*Set seed for random values for
* spatial dithering for R,G,B channels*/
REG_SET(FMT_DITHER_RAND_R_SEED, 0,
FMT_RAND_R_SEED, params->r_seed_value);
REG_SET(FMT_DITHER_RAND_G_SEED, 0,
FMT_RAND_G_SEED, params->g_seed_value);
REG_SET(FMT_DITHER_RAND_B_SEED, 0,
FMT_RAND_B_SEED, params->b_seed_value);
/* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
* offset for the R/Cr channel, lower 4LSB
* is forced to zeros. Typically set to 0
* RGB and 0x80000 YCbCr.
*/
/* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
* offset for the G/Y channel, lower 4LSB is
* forced to zeros. Typically set to 0 RGB
* and 0x80000 YCbCr.
*/
/* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
* offset for the B/Cb channel, lower 4LSB is
* forced to zeros. Typically set to 0 RGB and
* 0x80000 YCbCr.
*/
REG_UPDATE_6(FMT_BIT_DEPTH_CONTROL,
/*Enable spatial dithering*/
FMT_SPATIAL_DITHER_EN, params->flags.SPATIAL_DITHER_ENABLED,
/* Set spatial dithering mode
* (default is Seed patterrn AAAA...)
*/
FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE,
/*Set spatial dithering bit depth*/
FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH,
/*Disable High pass filter*/
FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM,
/*Reset only at startup*/
FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM,
/*Set RGB data dithered with x^28+x^3+1*/
FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM);
}
void opp1_program_bit_depth_reduction(
struct output_pixel_processor *opp,
const struct bit_depth_reduction_params *params)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
opp1_set_truncation(oppn10, params);
opp1_set_spatial_dither(oppn10, params);
/* TODO
* set_temporal_dither(oppn10, params);
*/
}
/**
* opp1_set_pixel_encoding():
* 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
* 1: YCbCr 4:2:2
*
* @oppn10: output_pixel_processor struct instance for dcn10.
* @params: pointer to clamping_and_pixel_encoding_params.
*/
static void opp1_set_pixel_encoding(
struct dcn10_opp *oppn10,
const struct clamping_and_pixel_encoding_params *params)
{
switch (params->pixel_encoding) {
case PIXEL_ENCODING_RGB:
case PIXEL_ENCODING_YCBCR444:
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
REG_UPDATE_3(FMT_CONTROL,
FMT_PIXEL_ENCODING, 1,
FMT_SUBSAMPLING_MODE, 2,
FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
break;
default:
break;
}
}
/**
* opp1_set_clamping():
* 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
* 1 for 8 bpc
* 2 for 10 bpc
* 3 for 12 bpc
* 7 for programable
* 2) Enable clamp if Limited range requested
*
* @oppn10: output_pixel_processor struct instance for dcn10.
* @params: pointer to clamping_and_pixel_encoding_params.
*/
static void opp1_set_clamping(
struct dcn10_opp *oppn10,
const struct clamping_and_pixel_encoding_params *params)
{
REG_UPDATE_2(FMT_CLAMP_CNTL,
FMT_CLAMP_DATA_EN, 0,
FMT_CLAMP_COLOR_FORMAT, 0);
switch (params->clamping_level) {
case CLAMPING_FULL_RANGE:
REG_UPDATE_2(FMT_CLAMP_CNTL,
FMT_CLAMP_DATA_EN, 1,
FMT_CLAMP_COLOR_FORMAT, 0);
break;
case CLAMPING_LIMITED_RANGE_8BPC:
REG_UPDATE_2(FMT_CLAMP_CNTL,
FMT_CLAMP_DATA_EN, 1,
FMT_CLAMP_COLOR_FORMAT, 1);
break;
case CLAMPING_LIMITED_RANGE_10BPC:
REG_UPDATE_2(FMT_CLAMP_CNTL,
FMT_CLAMP_DATA_EN, 1,
FMT_CLAMP_COLOR_FORMAT, 2);
break;
case CLAMPING_LIMITED_RANGE_12BPC:
REG_UPDATE_2(FMT_CLAMP_CNTL,
FMT_CLAMP_DATA_EN, 1,
FMT_CLAMP_COLOR_FORMAT, 3);
break;
case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
/* TODO */
default:
break;
}
}
void opp1_set_dyn_expansion(
struct output_pixel_processor *opp,
enum dc_color_space color_sp,
enum dc_color_depth color_dpth,
enum signal_type signal)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
FMT_DYNAMIC_EXP_EN, 0,
FMT_DYNAMIC_EXP_MODE, 0);
if (opp->dyn_expansion == DYN_EXPANSION_DISABLE)
return;
/*00 - 10-bit -> 12-bit dynamic expansion*/
/*01 - 8-bit -> 12-bit dynamic expansion*/
if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
signal == SIGNAL_TYPE_DISPLAY_PORT ||
signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
signal == SIGNAL_TYPE_VIRTUAL) {
switch (color_dpth) {
case COLOR_DEPTH_888:
REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
FMT_DYNAMIC_EXP_EN, 1,
FMT_DYNAMIC_EXP_MODE, 1);
break;
case COLOR_DEPTH_101010:
REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
FMT_DYNAMIC_EXP_EN, 1,
FMT_DYNAMIC_EXP_MODE, 0);
break;
case COLOR_DEPTH_121212:
REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/
FMT_DYNAMIC_EXP_MODE, 0);
break;
default:
break;
}
}
}
static void opp1_program_clamping_and_pixel_encoding(
struct output_pixel_processor *opp,
const struct clamping_and_pixel_encoding_params *params)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
opp1_set_clamping(oppn10, params);
opp1_set_pixel_encoding(oppn10, params);
}
void opp1_program_fmt(
struct output_pixel_processor *opp,
struct bit_depth_reduction_params *fmt_bit_depth,
struct clamping_and_pixel_encoding_params *clamping)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
REG_UPDATE(FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, 0);
/* dithering is affected by <CrtcSourceSelect>, hence should be
* programmed afterwards */
opp1_program_bit_depth_reduction(
opp,
fmt_bit_depth);
opp1_program_clamping_and_pixel_encoding(
opp,
clamping);
return;
}
void opp1_program_stereo(
struct output_pixel_processor *opp,
bool enable,
const struct dc_crtc_timing *timing)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
uint32_t active_width = timing->h_addressable - timing->h_border_right - timing->h_border_right;
uint32_t space1_size = timing->v_total - timing->v_addressable;
/* TODO: confirm computation of space2_size */
uint32_t space2_size = timing->v_total - timing->v_addressable;
if (!enable) {
active_width = 0;
space1_size = 0;
space2_size = 0;
}
/* TODO: for which cases should FMT_STEREOSYNC_OVERRIDE be set? */
REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, 0);
REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, active_width);
/* Program OPPBUF_3D_VACT_SPACE1_SIZE and OPPBUF_VACT_SPACE2_SIZE registers
* In 3D progressive frames, Vactive space happens only in between the 2 frames,
* so only need to program OPPBUF_3D_VACT_SPACE1_SIZE
* In 3D alternative frames, left and right frames, top and bottom field.
*/
if (timing->timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE)
REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, space2_size);
else
REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
/* TODO: Is programming of OPPBUF_DUMMY_DATA_R/G/B needed? */
/*
REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
OPPBUF_DUMMY_DATA_R, data_r);
REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
OPPBUF_DUMMY_DATA_G, data_g);
REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
OPPBUF_DUMMY_DATA_B, _data_b);
*/
}
void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
uint32_t regval = enable ? 1 : 0;
REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval);
}
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
void opp1_destroy(struct output_pixel_processor **opp)
{
kfree(TO_DCN10_OPP(*opp));
*opp = NULL;
}
static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_dyn_expansion = opp1_set_dyn_expansion,
.opp_program_fmt = opp1_program_fmt,
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
.opp_program_stereo = opp1_program_stereo,
.opp_pipe_clock_control = opp1_pipe_clock_control,
.opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
.opp_destroy = opp1_destroy
};
void dcn10_opp_construct(struct dcn10_opp *oppn10,
struct dc_context *ctx,
uint32_t inst,
const struct dcn10_opp_registers *regs,
const struct dcn10_opp_shift *opp_shift,
const struct dcn10_opp_mask *opp_mask)
{
oppn10->base.ctx = ctx;
oppn10->base.inst = inst;
oppn10->base.funcs = &dcn10_opp_funcs;
oppn10->regs = regs;
oppn10->opp_shift = opp_shift;
oppn10->opp_mask = opp_mask;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dce_calcs.h"
#include "reg_helper.h"
#include "basics/conversion.h"
#include "dcn10_hubp.h"
#define REG(reg)\
hubp1->hubp_regs->reg
#define CTX \
hubp1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubp1->hubp_shift->field_name, hubp1->hubp_mask->field_name
void hubp1_set_blank(struct hubp *hubp, bool blank)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t blank_en = blank ? 1 : 0;
REG_UPDATE_2(DCHUBP_CNTL,
HUBP_BLANK_EN, blank_en,
HUBP_TTU_DISABLE, blank_en);
if (blank) {
uint32_t reg_val = REG_READ(DCHUBP_CNTL);
if (reg_val) {
/* init sequence workaround: in case HUBP is
* power gated, this wait would timeout.
*
* we just wrote reg_val to non-0, if it stay 0
* it means HUBP is gated
*/
REG_WAIT(DCHUBP_CNTL,
HUBP_NO_OUTSTANDING_REQ, 1,
1, 200);
}
hubp->mpcc_id = 0xf;
hubp->opp_id = OPP_ID_INVALID;
}
}
static void hubp1_disconnect(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL,
HUBP_TTU_DISABLE, 1);
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, 0);
}
static void hubp1_disable_control(struct hubp *hubp, bool disable_hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t disable = disable_hubp ? 1 : 0;
REG_UPDATE(DCHUBP_CNTL,
HUBP_DISABLE, disable);
}
static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
{
uint32_t hubp_underflow = 0;
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_GET(DCHUBP_CNTL,
HUBP_UNDERFLOW_STATUS,
&hubp_underflow);
return hubp_underflow;
}
void hubp1_clear_underflow(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
}
static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t blank_en = blank ? 1 : 0;
REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, blank_en);
}
void hubp1_vready_workaround(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
uint32_t value = 0;
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
/* set HBUBREQ_DEBUG_DB[12] = 1 */
value = REG_READ(HUBPREQ_DEBUG_DB);
/* hack mode disable */
value |= 0x100;
value &= ~0x1000;
if ((pipe_dest->vstartup_start - 2*(pipe_dest->vready_offset+pipe_dest->vupdate_width
+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
/* if (eco_fix_needed(otg_global_sync_timing)
* set HBUBREQ_DEBUG_DB[12] = 1 */
value |= 0x1000;
}
REG_WRITE(HUBPREQ_DEBUG_DB, value);
}
void hubp1_program_tiling(
struct hubp *hubp,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE_6(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
NUM_BANKS, log_2(info->gfx9.num_banks),
PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
NUM_SE, log_2(info->gfx9.num_shader_engines),
NUM_RB_PER_SE, log_2(info->gfx9.num_rb_per_se),
MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags));
REG_UPDATE_4(DCSURF_TILING_CONFIG,
SW_MODE, info->gfx9.swizzle,
META_LINEAR, info->gfx9.meta_linear,
RB_ALIGNED, info->gfx9.rb_aligned,
PIPE_ALIGNED, info->gfx9.pipe_aligned);
}
void hubp1_program_size(
struct hubp *hubp,
enum surface_pixel_format format,
const struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
/* Program data and meta surface pitch (calculation from addrlib)
* 444 or 420 luma
*/
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) {
ASSERT(plane_size->chroma_pitch != 0);
/* Chroma pitch zero can cause system hang! */
pitch = plane_size->surface_pitch - 1;
meta_pitch = dcc->meta_pitch - 1;
pitch_c = plane_size->chroma_pitch - 1;
meta_pitch_c = dcc->meta_pitch_c - 1;
} else {
pitch = plane_size->surface_pitch - 1;
meta_pitch = dcc->meta_pitch - 1;
pitch_c = 0;
meta_pitch_c = 0;
}
if (!dcc->enable) {
meta_pitch = 0;
meta_pitch_c = 0;
}
REG_UPDATE_2(DCSURF_SURFACE_PITCH,
PITCH, pitch, META_PITCH, meta_pitch);
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
}
void hubp1_program_rotation(
struct hubp *hubp,
enum dc_rotation_angle rotation,
bool horizontal_mirror)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t mirror;
if (horizontal_mirror)
mirror = 1;
else
mirror = 0;
/* Program rotation angle and horz mirror - no mirror */
if (rotation == ROTATION_ANGLE_0)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, 0,
H_MIRROR_EN, mirror);
else if (rotation == ROTATION_ANGLE_90)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, 1,
H_MIRROR_EN, mirror);
else if (rotation == ROTATION_ANGLE_180)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, 2,
H_MIRROR_EN, mirror);
else if (rotation == ROTATION_ANGLE_270)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, 3,
H_MIRROR_EN, mirror);
}
void hubp1_program_pixel_format(
struct hubp *hubp,
enum surface_pixel_format format)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t red_bar = 3;
uint32_t blue_bar = 2;
/* swap for ABGR format */
if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
red_bar = 2;
blue_bar = 3;
}
REG_UPDATE_2(HUBPRET_CONTROL,
CROSSBAR_SRC_CB_B, blue_bar,
CROSSBAR_SRC_CR_R, red_bar);
/* Mapping is same as ipp programming (cnvc) */
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 1);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 3);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 8);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 24);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 65);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 64);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 67);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 12);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 112);
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 113);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 114);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 118);
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 116,
ALPHA_PLANE_EN, 0);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 116,
ALPHA_PLANE_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
/* don't see the need of program the xbar in DCN 1.0 */
}
bool hubp1_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
//program flip type
REG_UPDATE(DCSURF_FLIP_CONTROL,
SURFACE_FLIP_TYPE, flip_immediate);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
// turn off stereo if not in stereo
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0);
}
/* HW automatically latch rest of address register on write to
* DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
*
* program high first and then the low addr, order matters!
*/
switch (address->type) {
case PLN_ADDR_TYPE_GRAPHICS:
/* DCN1.0 does not support const color
* TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
* base on address->grph.dcc_const_color
* x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
* x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
*/
if (address->grph.addr.quad_part == 0)
break;
REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface);
if (address->grph.meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->grph.meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->grph.meta_addr.low_part);
}
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->grph.addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->grph.addr.low_part);
break;
case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
if (address->video_progressive.luma_addr.quad_part == 0
|| address->video_progressive.chroma_addr.quad_part == 0)
break;
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->video_progressive.luma_meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
address->video_progressive.chroma_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
PRIMARY_META_SURFACE_ADDRESS_C,
address->video_progressive.chroma_meta_addr.low_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->video_progressive.luma_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->video_progressive.luma_meta_addr.low_part);
}
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_SURFACE_ADDRESS_HIGH_C,
address->video_progressive.chroma_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
PRIMARY_SURFACE_ADDRESS_C,
address->video_progressive.chroma_addr.low_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->video_progressive.luma_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->video_progressive.luma_addr.low_part);
break;
case PLN_ADDR_TYPE_GRPH_STEREO:
if (address->grph_stereo.left_addr.quad_part == 0)
break;
if (address->grph_stereo.right_addr.quad_part == 0)
break;
REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, address->tmz_surface,
PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface,
SECONDARY_SURFACE_TMZ, address->tmz_surface,
SECONDARY_SURFACE_TMZ_C, address->tmz_surface,
SECONDARY_META_SURFACE_TMZ, address->tmz_surface,
SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->grph_stereo.right_meta_addr.quad_part != 0) {
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
SECONDARY_META_SURFACE_ADDRESS_HIGH,
address->grph_stereo.right_meta_addr.high_part);
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
SECONDARY_META_SURFACE_ADDRESS,
address->grph_stereo.right_meta_addr.low_part);
}
if (address->grph_stereo.left_meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
address->grph_stereo.left_meta_addr.high_part);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
address->grph_stereo.left_meta_addr.low_part);
}
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
SECONDARY_SURFACE_ADDRESS_HIGH,
address->grph_stereo.right_addr.high_part);
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
SECONDARY_SURFACE_ADDRESS,
address->grph_stereo.right_addr.low_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
address->grph_stereo.left_addr.high_part);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
address->grph_stereo.left_addr.low_part);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
hubp->request_address = *address;
return true;
}
void hubp1_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size independent_64b_blks)
{
uint32_t dcc_en = enable ? 1 : 0;
uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_DCC_EN, dcc_en,
PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk,
SECONDARY_SURFACE_DCC_EN, dcc_en,
SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
}
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level)
{
hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
hubp1_program_tiling(hubp, tiling_info, format);
hubp1_program_size(hubp, format, plane_size, dcc);
hubp1_program_rotation(hubp, rotation, horizontal_mirror);
hubp1_program_pixel_format(hubp, format);
}
void hubp1_program_requestor(
struct hubp *hubp,
struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
REG_SET_4(DCN_EXPANSION_MODE, 0,
DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0,
CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size,
SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
}
void hubp1_program_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
/* DLG - Per hubp */
REG_SET_2(BLANK_OFFSET_0, 0,
REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
DLG_V_BLANK_END, dlg_attr->dlg_vblank_end);
REG_SET(BLANK_OFFSET_1, 0,
MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start);
REG_SET(DST_DIMENSIONS, 0,
REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal);
REG_SET_2(DST_AFTER_SCALER, 0,
REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
/* DLG - Per luma/chroma */
REG_SET(VBLANK_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
if (REG(NOM_PARAMETERS_0))
REG_SET(NOM_PARAMETERS_0, 0,
DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
if (REG(NOM_PARAMETERS_1))
REG_SET(NOM_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
REG_SET(NOM_PARAMETERS_4, 0,
DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
REG_SET(NOM_PARAMETERS_5, 0,
REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
REG_SET_2(PER_LINE_DELIVERY, 0,
REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
REG_SET(VBLANK_PARAMETERS_2, 0,
REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
if (REG(NOM_PARAMETERS_2))
REG_SET(NOM_PARAMETERS_2, 0,
DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
if (REG(NOM_PARAMETERS_3))
REG_SET(NOM_PARAMETERS_3, 0,
REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
REG_SET(NOM_PARAMETERS_6, 0,
DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
REG_SET(NOM_PARAMETERS_7, 0,
REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c);
/* TTU - per hubp */
REG_SET_2(DCN_TTU_QOS_WM, 0,
QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
REG_SET_3(DCN_SURF0_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
}
static void hubp1_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
/* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
hubp1_program_requestor(hubp, rq_regs);
hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
hubp1_vready_workaround(hubp, pipe_dest);
}
static void hubp1_setup_interdependent(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_SET_2(PREFETCH_SETTINS, 0,
DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
REG_SET(PREFETCH_SETTINS_C, 0,
VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
REG_SET_2(VBLANK_PARAMETERS_0, 0,
DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
REG_SET(VBLANK_PARAMETERS_3, 0,
REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
REG_SET(VBLANK_PARAMETERS_4, 0,
REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
REG_SET(DCN_SURF0_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
ttu_attr->refcyc_per_req_delivery_pre_l);
REG_SET(DCN_SURF1_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
ttu_attr->refcyc_per_req_delivery_pre_c);
REG_SET(DCN_CUR0_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
}
bool hubp1_is_flip_pending(struct hubp *hubp)
{
uint32_t flip_pending = 0;
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct dc_plane_address earliest_inuse_address;
if (hubp && hubp->power_gated)
return false;
REG_GET(DCSURF_FLIP_CONTROL,
SURFACE_FLIP_PENDING, &flip_pending);
REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part);
REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part);
if (flip_pending)
return true;
if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
return false;
}
static uint32_t aperture_default_system = 1;
static uint32_t context0_default_system; /* = 0;*/
static void hubp1_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 12;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 12;
REG_SET_2(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 0,
MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, aperture_default_system, /* 1 = system physical memory */
MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mc_vm_apt_default.high_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 0,
MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mc_vm_apt_default.low_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, 0,
MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mc_vm_apt_low.high_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, 0,
MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mc_vm_apt_low.low_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, 0,
MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mc_vm_apt_high.high_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, 0,
MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mc_vm_apt_high.low_part);
}
static void hubp1_set_vm_context0_settings(struct hubp *hubp,
const struct vm_context0_param *vm0)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
/* pte base */
REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, 0,
VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, vm0->pte_base.high_part);
REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, 0,
VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, vm0->pte_base.low_part);
/* pte start */
REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, 0,
VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, vm0->pte_start.high_part);
REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, 0,
VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, vm0->pte_start.low_part);
/* pte end */
REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, 0,
VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, vm0->pte_end.high_part);
REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, 0,
VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, vm0->pte_end.low_part);
/* fault handling */
REG_SET_2(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0,
VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, vm0->fault_default.high_part,
VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, context0_default_system);
REG_SET(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0,
VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, vm0->fault_default.low_part);
/* control: enable VM PTE*/
REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
ENABLE_L1_TLB, 1,
SYSTEM_ACCESS_MODE, 3);
}
void min_set_viewport(
struct hubp *hubp,
const struct rect *viewport,
const struct rect *viewport_c)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
PRI_VIEWPORT_WIDTH, viewport->width,
PRI_VIEWPORT_HEIGHT, viewport->height);
REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
PRI_VIEWPORT_X_START, viewport->x,
PRI_VIEWPORT_Y_START, viewport->y);
/*for stereo*/
REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
SEC_VIEWPORT_WIDTH, viewport->width,
SEC_VIEWPORT_HEIGHT, viewport->height);
REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
SEC_VIEWPORT_X_START, viewport->x,
SEC_VIEWPORT_Y_START, viewport->y);
/* DC supports NV12 only at the moment */
REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
PRI_VIEWPORT_WIDTH_C, viewport_c->width,
PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
PRI_VIEWPORT_X_START_C, viewport_c->x,
PRI_VIEWPORT_Y_START_C, viewport_c->y);
REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
SEC_VIEWPORT_WIDTH_C, viewport_c->width,
SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
SEC_VIEWPORT_X_START_C, viewport_c->x,
SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
void hubp1_read_state_common(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct dcn_hubp_state *s = &hubp1->state;
struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
uint32_t aperture_low_msb, aperture_low_lsb;
uint32_t aperture_high_msb, aperture_high_lsb;
/* Requester */
REG_GET(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
REG_GET_4(DCN_EXPANSION_MODE,
DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB,
MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, &aperture_low_msb);
REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB,
MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, &aperture_low_lsb);
REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB,
MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, &aperture_high_msb);
REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB,
MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, &aperture_high_lsb);
// On DCN1, aperture is broken down into MSB and LSB; only keep bits [47:18] to match later DCN format
rq_regs->aperture_low_addr = (aperture_low_msb << 26) | (aperture_low_lsb >> 6);
rq_regs->aperture_high_addr = (aperture_high_msb << 26) | (aperture_high_lsb >> 6);
/* DLG - Per hubp */
REG_GET_2(BLANK_OFFSET_0,
REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
REG_GET(BLANK_OFFSET_1,
MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
REG_GET(DST_DIMENSIONS,
REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
REG_GET_2(DST_AFTER_SCALER,
REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
if (REG(PREFETCH_SETTINS))
REG_GET_2(PREFETCH_SETTINS,
DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
else
REG_GET_2(PREFETCH_SETTINGS,
DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
REG_GET_2(VBLANK_PARAMETERS_0,
DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
REG_GET(REF_FREQ_TO_PIX_FREQ,
REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
/* DLG - Per luma/chroma */
REG_GET(VBLANK_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
REG_GET(VBLANK_PARAMETERS_3,
REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
if (REG(NOM_PARAMETERS_0))
REG_GET(NOM_PARAMETERS_0,
DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
if (REG(NOM_PARAMETERS_1))
REG_GET(NOM_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
REG_GET(NOM_PARAMETERS_4,
DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
REG_GET(NOM_PARAMETERS_5,
REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
REG_GET_2(PER_LINE_DELIVERY_PRE,
REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
REG_GET_2(PER_LINE_DELIVERY,
REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
if (REG(PREFETCH_SETTINS_C))
REG_GET(PREFETCH_SETTINS_C,
VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
else
REG_GET(PREFETCH_SETTINGS_C,
VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
REG_GET(VBLANK_PARAMETERS_2,
REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
REG_GET(VBLANK_PARAMETERS_4,
REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
if (REG(NOM_PARAMETERS_2))
REG_GET(NOM_PARAMETERS_2,
DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
if (REG(NOM_PARAMETERS_3))
REG_GET(NOM_PARAMETERS_3,
REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
REG_GET(NOM_PARAMETERS_6,
DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
REG_GET(NOM_PARAMETERS_7,
REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
/* TTU - per hubp */
REG_GET_2(DCN_TTU_QOS_WM,
QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
REG_GET_2(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
REG_GET_3(DCN_SURF0_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
REG_GET(DCN_SURF0_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE,
&ttu_attr->refcyc_per_req_delivery_pre_l);
REG_GET_3(DCN_SURF1_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
REG_GET(DCN_SURF1_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE,
&ttu_attr->refcyc_per_req_delivery_pre_c);
/* Rest of hubp */
REG_GET(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, &s->pixel_format);
REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
SURFACE_EARLIEST_INUSE_ADDRESS, &s->inuse_addr_lo);
REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
PRI_VIEWPORT_WIDTH, &s->viewport_width,
PRI_VIEWPORT_HEIGHT, &s->viewport_height);
REG_GET_2(DCSURF_SURFACE_CONFIG,
ROTATION_ANGLE, &s->rotation_angle,
H_MIRROR_EN, &s->h_mirror_en);
REG_GET(DCSURF_TILING_CONFIG,
SW_MODE, &s->sw_mode);
REG_GET(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_DCC_EN, &s->dcc_en);
REG_GET_3(DCHUBP_CNTL,
HUBP_BLANK_EN, &s->blank_en,
HUBP_TTU_DISABLE, &s->ttu_disable,
HUBP_UNDERFLOW_STATUS, &s->underflow_status);
REG_GET(HUBP_CLK_CNTL,
HUBP_CLOCK_ENABLE, &s->clock_en);
REG_GET(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &s->min_ttu_vblank);
REG_GET_2(DCN_TTU_QOS_WM,
QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS,
PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_lo);
REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH,
PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_hi);
REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS,
PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_lo);
REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH,
PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_hi);
}
void hubp1_read_state(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct dcn_hubp_state *s = &hubp1->state;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
hubp1_read_state_common(hubp);
REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
}
enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch)
{
enum cursor_pitch hw_pitch;
switch (pitch) {
case 64:
hw_pitch = CURSOR_PITCH_64_PIXELS;
break;
case 128:
hw_pitch = CURSOR_PITCH_128_PIXELS;
break;
case 256:
hw_pitch = CURSOR_PITCH_256_PIXELS;
break;
default:
DC_ERR("Invalid cursor pitch of %d. "
"Only 64/128/256 is supported on DCN.\n", pitch);
hw_pitch = CURSOR_PITCH_64_PIXELS;
break;
}
return hw_pitch;
}
static enum cursor_lines_per_chunk hubp1_get_lines_per_chunk(
unsigned int cur_width,
enum dc_cursor_color_format format)
{
enum cursor_lines_per_chunk line_per_chunk;
if (format == CURSOR_MODE_MONO)
/* impl B. expansion in CUR Buffer reader */
line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
else if (cur_width <= 32)
line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
else if (cur_width <= 64)
line_per_chunk = CURSOR_LINE_PER_CHUNK_8;
else if (cur_width <= 128)
line_per_chunk = CURSOR_LINE_PER_CHUNK_4;
else
line_per_chunk = CURSOR_LINE_PER_CHUNK_2;
return line_per_chunk;
}
void hubp1_cursor_set_attributes(
struct hubp *hubp,
const struct dc_cursor_attributes *attr)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
enum cursor_lines_per_chunk lpc = hubp1_get_lines_per_chunk(
attr->width, attr->color_format);
hubp->curs_attr = *attr;
REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
REG_UPDATE(CURSOR_SURFACE_ADDRESS,
CURSOR_SURFACE_ADDRESS, attr->address.low_part);
REG_UPDATE_2(CURSOR_SIZE,
CURSOR_WIDTH, attr->width,
CURSOR_HEIGHT, attr->height);
REG_UPDATE_3(CURSOR_CONTROL,
CURSOR_MODE, attr->color_format,
CURSOR_PITCH, hw_pitch,
CURSOR_LINES_PER_CHUNK, lpc);
REG_SET_2(CURSOR_SETTINS, 0,
/* no shift of the cursor HDL schedule */
CURSOR0_DST_Y_OFFSET, 0,
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
}
void hubp1_cursor_set_position(
struct hubp *hubp,
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
int x_pos = pos->x - param->viewport.x;
int y_pos = pos->y - param->viewport.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
int src_x_offset = x_pos - pos->x_hotspot;
int src_y_offset = y_pos - pos->y_hotspot;
int cursor_height = (int)hubp->curs_attr.height;
int cursor_width = (int)hubp->curs_attr.width;
uint32_t dst_x_offset;
uint32_t cur_en = pos->enable ? 1 : 0;
hubp->curs_pos = *pos;
/*
* Guard aganst cursor_set_position() from being called with invalid
* attributes
*
* TODO: Look at combining cursor_set_position() and
* cursor_set_attributes() into cursor_update()
*/
if (hubp->curs_attr.address.quad_part == 0)
return;
// Transform cursor width / height and hotspots for offset calculations
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
swap(cursor_height, cursor_width);
swap(x_hotspot, y_hotspot);
if (param->rotation == ROTATION_ANGLE_90) {
// hotspot = (-y, x)
src_x_offset = x_pos - (cursor_width - x_hotspot);
src_y_offset = y_pos - y_hotspot;
} else if (param->rotation == ROTATION_ANGLE_270) {
// hotspot = (y, -x)
src_x_offset = x_pos - x_hotspot;
src_y_offset = y_pos - (cursor_height - y_hotspot);
}
} else if (param->rotation == ROTATION_ANGLE_180) {
// hotspot = (-x, -y)
if (!param->mirror)
src_x_offset = x_pos - (cursor_width - x_hotspot);
src_y_offset = y_pos - (cursor_height - y_hotspot);
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
dst_x_offset *= param->ref_clk_khz;
dst_x_offset /= param->pixel_clk_khz;
ASSERT(param->h_scale_ratio.value);
if (param->h_scale_ratio.value)
dst_x_offset = dc_fixpt_floor(dc_fixpt_div(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + cursor_width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, pos->x,
CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
CURSOR_HOT_SPOT_X, pos->x_hotspot,
CURSOR_HOT_SPOT_Y, pos->y_hotspot);
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
}
/**
* hubp1_clk_cntl - Disable or enable clocks for DCHUBP
*
* @hubp: hubp struct reference.
* @enable: Set true for enabling gate clock.
*
* When enabling/disabling DCHUBP clock, we affect dcfclk/dppclk.
*/
void hubp1_clk_cntl(struct hubp *hubp, bool enable)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t clk_enable = enable ? 1 : 0;
REG_UPDATE(HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, clk_enable);
}
void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
bool hubp1_in_blank(struct hubp *hubp)
{
uint32_t in_blank;
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_GET(DCHUBP_CNTL, HUBP_IN_BLANK, &in_blank);
return in_blank ? true : false;
}
void hubp1_soft_reset(struct hubp *hubp, bool reset)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
}
/**
* hubp1_set_flip_int - Enable surface flip interrupt
*
* @hubp: hubp struct reference.
*/
void hubp1_set_flip_int(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE(DCSURF_SURFACE_FLIP_INTERRUPT,
SURFACE_FLIP_INT_MASK, 1);
return;
}
/**
* hubp1_wait_pipe_read_start - wait for hubp ret path starting read.
*
* @hubp: hubp struct reference.
*/
static void hubp1_wait_pipe_read_start(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_WAIT(HUBPRET_READ_LINE_STATUS,
PIPE_READ_VBLANK, 0,
1, 1000);
}
void hubp1_init(struct hubp *hubp)
{
//do nothing
}
static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
.hubp_program_surface_config =
hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp1_setup,
.hubp_setup_interdependent = hubp1_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_hubp_blank_en = hubp1_set_hubp_blank_en,
.set_cursor_attributes = hubp1_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
.hubp_disconnect = hubp1_disconnect,
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
.hubp_read_state = hubp1_read_state,
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
.hubp_init = hubp1_init,
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_in_blank = hubp1_in_blank,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_wait_pipe_read_start = hubp1_wait_pipe_read_start,
};
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
void dcn10_hubp_construct(
struct dcn10_hubp *hubp1,
struct dc_context *ctx,
uint32_t inst,
const struct dcn_mi_registers *hubp_regs,
const struct dcn_mi_shift *hubp_shift,
const struct dcn_mi_mask *hubp_mask)
{
hubp1->base.funcs = &dcn10_hubp_funcs;
hubp1->base.ctx = ctx;
hubp1->hubp_regs = hubp_regs;
hubp1->hubp_shift = hubp_shift;
hubp1->hubp_mask = hubp_mask;
hubp1->base.inst = inst;
hubp1->base.opp_id = OPP_ID_INVALID;
hubp1->base.mpcc_id = 0xf;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "reg_helper.h"
#define CTX \
hubbub1->base.ctx
#define DC_LOGGER \
hubbub1->base.ctx->logger
#define REG(reg)\
hubbub1->regs->reg
#undef FN
#define FN(reg_name, field_name) \
hubbub1->shifts->field_name, hubbub1->masks->field_name
void hubbub1_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
struct dcn_hubbub_wm_set *s;
memset(wm, 0, sizeof(struct dcn_hubbub_wm));
s = &wm->sets[0];
s->wm_set = 0;
s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
}
s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
s = &wm->sets[1];
s->wm_set = 1;
s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
}
s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
s = &wm->sets[2];
s->wm_set = 2;
s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
}
s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
s = &wm->sets[3];
s->wm_set = 3;
s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
}
s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
}
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
/*
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
*/
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
}
bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t enable = 0;
REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
return enable ? true : false;
}
bool hubbub1_verify_allow_pstate_change_high(
struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
/* pstate latency is ~20us so if we wait over 40us and pstate allow
* still not asserted, we are probably stuck and going to hang
*
* TODO: Figure out why it takes ~100us on linux
* pstate takes around ~100us (up to 200us) on linux. Unknown currently
* as to why it takes that long on linux
*/
const unsigned int pstate_wait_timeout_us = 200;
const unsigned int pstate_wait_expected_timeout_us = 180;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
unsigned int debug_data;
unsigned int i;
if (forced_pstate_allow) {
/* we hacked to force pstate allow to prevent hang last time
* we verify_allow_pstate_change_high. so disable force
* here so we can check status
*/
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
forced_pstate_allow = false;
}
/* The following table only applies to DCN1 and DCN2,
* for newer DCNs, need to consult with HW IP folks to read RTL
* HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
* description
* 0: Pipe0 Plane0 Allow Pstate Change
* 1: Pipe0 Plane1 Allow Pstate Change
* 2: Pipe0 Cursor0 Allow Pstate Change
* 3: Pipe0 Cursor1 Allow Pstate Change
* 4: Pipe1 Plane0 Allow Pstate Change
* 5: Pipe1 Plane1 Allow Pstate Change
* 6: Pipe1 Cursor0 Allow Pstate Change
* 7: Pipe1 Cursor1 Allow Pstate Change
* 8: Pipe2 Plane0 Allow Pstate Change
* 9: Pipe2 Plane1 Allow Pstate Change
* 10: Pipe2 Cursor0 Allow Pstate Change
* 11: Pipe2 Cursor1 Allow Pstate Change
* 12: Pipe3 Plane0 Allow Pstate Change
* 13: Pipe3 Plane1 Allow Pstate Change
* 14: Pipe3 Cursor0 Allow Pstate Change
* 15: Pipe3 Cursor1 Allow Pstate Change
* 16: Pipe4 Plane0 Allow Pstate Change
* 17: Pipe4 Plane1 Allow Pstate Change
* 18: Pipe4 Cursor0 Allow Pstate Change
* 19: Pipe4 Cursor1 Allow Pstate Change
* 20: Pipe5 Plane0 Allow Pstate Change
* 21: Pipe5 Plane1 Allow Pstate Change
* 22: Pipe5 Cursor0 Allow Pstate Change
* 23: Pipe5 Cursor1 Allow Pstate Change
* 24: Pipe6 Plane0 Allow Pstate Change
* 25: Pipe6 Plane1 Allow Pstate Change
* 26: Pipe6 Cursor0 Allow Pstate Change
* 27: Pipe6 Cursor1 Allow Pstate Change
* 28: WB0 Allow Pstate Change
* 29: WB1 Allow Pstate Change
* 30: Arbiter's allow_pstate_change
* 31: SOC pstate change request
*/
REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
for (i = 0; i < pstate_wait_timeout_us; i++) {
debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
if (debug_data & (1 << 30)) {
if (i > pstate_wait_expected_timeout_us)
DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
i);
return true;
}
if (max_sampled_pstate_wait_us < i)
max_sampled_pstate_wait_us = i;
udelay(1);
}
/* force pstate allow to prevent system hang
* and break to debugger to investigate
*/
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
forced_pstate_allow = true;
DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
debug_data);
return false;
}
static uint32_t convert_and_clamp(
uint32_t wm_ns,
uint32_t refclk_mhz,
uint32_t clamp_value)
{
uint32_t ret_val = 0;
ret_val = wm_ns * refclk_mhz;
ret_val /= 1000;
if (ret_val > clamp_value)
ret_val = clamp_value;
return ret_val;
}
void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
} else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
wm_pending = true;
if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.pte_meta_urgent_ns, prog_wm_value);
} else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
} else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
wm_pending = true;
if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.pte_meta_urgent_ns, prog_wm_value);
} else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
} else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
wm_pending = true;
if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.pte_meta_urgent_ns, prog_wm_value);
} else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
} else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
wm_pending = true;
if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.pte_meta_urgent_ns, prog_wm_value);
} else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
wm_pending = true;
return wm_pending;
}
bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
watermarks->a.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
} else if (watermarks->a.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
watermarks->b.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
} else if (watermarks->b.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
watermarks->c.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
} else if (watermarks->c.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
watermarks->d.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
} else if (watermarks->d.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
wm_pending = true;
return wm_pending;
}
bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
} else if (watermarks->a.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
} else if (watermarks->b.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
} else if (watermarks->c.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
watermarks->d.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
} else if (watermarks->d.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
wm_pending = true;
return wm_pending;
}
bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
#if 0
REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
#endif
return wm_pending;
}
void hubbub1_update_dchub(
struct hubbub *hubbub,
struct dchub_init_data *dh_data)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
ASSERT(false);
/*should not come here*/
return;
}
/* TODO: port code from dal2 */
switch (dh_data->fb_mode) {
case FRAME_BUFFER_MODE_ZFB_ONLY:
/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
SDPIF_FB_TOP, 0);
REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
SDPIF_FB_BASE, 0x0FFFF);
REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
dh_data->zfb_size_in_byte - 1) >> 22);
break;
case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
dh_data->zfb_size_in_byte - 1) >> 22);
break;
case FRAME_BUFFER_MODE_LOCAL_ONLY:
/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
SDPIF_AGP_BASE, 0);
REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
SDPIF_AGP_BOT, 0X03FFFF);
REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
SDPIF_AGP_TOP, 0);
break;
default:
break;
}
dh_data->dchub_initialzied = true;
dh_data->dchub_info_valid = false;
}
void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t watermark_change_req;
REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
if (watermark_change_req)
watermark_change_req = 0;
else
watermark_change_req = 1;
REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
}
void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t reset_en = reset ? 1 : 0;
REG_UPDATE(DCHUBBUB_SOFT_RESET,
DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
}
static bool hubbub1_dcc_support_swizzle(
enum swizzle_mode_values swizzle,
unsigned int bytes_per_element,
enum segment_order *segment_order_horz,
enum segment_order *segment_order_vert)
{
bool standard_swizzle = false;
bool display_swizzle = false;
switch (swizzle) {
case DC_SW_4KB_S:
case DC_SW_64KB_S:
case DC_SW_VAR_S:
case DC_SW_4KB_S_X:
case DC_SW_64KB_S_X:
case DC_SW_VAR_S_X:
standard_swizzle = true;
break;
case DC_SW_4KB_D:
case DC_SW_64KB_D:
case DC_SW_VAR_D:
case DC_SW_4KB_D_X:
case DC_SW_64KB_D_X:
case DC_SW_VAR_D_X:
display_swizzle = true;
break;
default:
break;
}
if (bytes_per_element == 1 && standard_swizzle) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__na;
return true;
}
if (bytes_per_element == 2 && standard_swizzle) {
*segment_order_horz = segment_order__non_contiguous;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 4 && standard_swizzle) {
*segment_order_horz = segment_order__non_contiguous;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 8 && standard_swizzle) {
*segment_order_horz = segment_order__na;
*segment_order_vert = segment_order__contiguous;
return true;
}
if (bytes_per_element == 8 && display_swizzle) {
*segment_order_horz = segment_order__contiguous;
*segment_order_vert = segment_order__non_contiguous;
return true;
}
return false;
}
static bool hubbub1_dcc_support_pixel_format(
enum surface_pixel_format format,
unsigned int *bytes_per_element)
{
/* DML: get_bytes_per_element */
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
*bytes_per_element = 2;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
*bytes_per_element = 4;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
*bytes_per_element = 8;
return true;
default:
return false;
}
}
static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
unsigned int bytes_per_element)
{
/* copied from DML. might want to refactor DML to leverage from DML */
/* DML : get_blk256_size */
if (bytes_per_element == 1) {
*blk256_width = 16;
*blk256_height = 16;
} else if (bytes_per_element == 2) {
*blk256_width = 16;
*blk256_height = 8;
} else if (bytes_per_element == 4) {
*blk256_width = 8;
*blk256_height = 8;
} else if (bytes_per_element == 8) {
*blk256_width = 8;
*blk256_height = 4;
}
}
static void hubbub1_det_request_size(
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
swath_bytes_horz_wc = width * blk256_height * bpe;
swath_bytes_vert_wc = height * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
false : /* full 256B request */
true; /* half 128b request */
}
static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
struct dc *dc = hubbub1->base.ctx->dc;
/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
enum dcc_control dcc_control;
unsigned int bpe;
enum segment_order segment_order_horz, segment_order_vert;
bool req128_horz_wc, req128_vert_wc;
memset(output, 0, sizeof(*output));
if (dc->debug.disable_dcc == DCC_DISABLE)
return false;
if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe))
return false;
if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
&segment_order_horz, &segment_order_vert))
return false;
hubbub1_det_request_size(input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
dcc_control = dcc_control__256_256_xxx;
} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
if (!req128_horz_wc)
dcc_control = dcc_control__256_256_xxx;
else if (segment_order_horz == segment_order__contiguous)
dcc_control = dcc_control__128_128_xxx;
else
dcc_control = dcc_control__256_64_64;
} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
if (!req128_vert_wc)
dcc_control = dcc_control__256_256_xxx;
else if (segment_order_vert == segment_order__contiguous)
dcc_control = dcc_control__128_128_xxx;
else
dcc_control = dcc_control__256_64_64;
} else {
if ((req128_horz_wc &&
segment_order_horz == segment_order__non_contiguous) ||
(req128_vert_wc &&
segment_order_vert == segment_order__non_contiguous))
/* access_dir not known, must use most constraining */
dcc_control = dcc_control__256_64_64;
else
/* reg128 is true for either horz and vert
* but segment_order is contiguous
*/
dcc_control = dcc_control__128_128_xxx;
}
if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
dcc_control != dcc_control__256_256_xxx)
return false;
switch (dcc_control) {
case dcc_control__256_256_xxx:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 256;
output->grph.rgb.independent_64b_blks = false;
break;
case dcc_control__128_128_xxx:
output->grph.rgb.max_uncompressed_blk_size = 128;
output->grph.rgb.max_compressed_blk_size = 128;
output->grph.rgb.independent_64b_blks = false;
break;
case dcc_control__256_64_64:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
default:
ASSERT(false);
break;
}
output->capable = true;
output->const_color_support = false;
return true;
}
static const struct hubbub_funcs hubbub1_funcs = {
.update_dchub = hubbub1_update_dchub,
.dcc_support_swizzle = hubbub1_dcc_support_swizzle,
.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
.wm_read_state = hubbub1_wm_read_state,
.program_watermarks = hubbub1_program_watermarks,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
};
void hubbub1_construct(struct hubbub *hubbub,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
hubbub1->base.ctx = ctx;
hubbub1->base.funcs = &hubbub1_funcs;
hubbub1->regs = hubbub_regs;
hubbub1->shifts = hubbub_shift;
hubbub1->masks = hubbub_mask;
hubbub1->debug_test_index_pstate = 0x7;
if (ctx->dce_version == DCN_VERSION_1_01)
hubbub1->debug_test_index_pstate = 0xB;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
#include "dcn10_dpp.h"
#include "basics/conversion.h"
#include "dcn10_cm_common.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
#define VERT_MAX_TAPS 8
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
#define REG(reg)\
dpp->tf_regs->reg
#define CTX \
dpp->base.ctx
#undef FN
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
enum dcn10_coef_filter_type_sel {
SCL_COEF_LUMA_VERT_FILTER = 0,
SCL_COEF_LUMA_HORZ_FILTER = 1,
SCL_COEF_CHROMA_VERT_FILTER = 2,
SCL_COEF_CHROMA_HORZ_FILTER = 3,
SCL_COEF_ALPHA_VERT_FILTER = 4,
SCL_COEF_ALPHA_HORZ_FILTER = 5
};
enum dscl_autocal_mode {
AUTOCAL_MODE_OFF = 0,
/* Autocal calculate the scaling ratio and initial phase and the
* DSCL_MODE_SEL must be set to 1
*/
AUTOCAL_MODE_AUTOSCALE = 1,
/* Autocal perform auto centering without replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOCENTER = 2,
/* Autocal perform auto centering and auto replication and the
* DSCL_MODE_SEL must be set to 0
*/
AUTOCAL_MODE_AUTOREPLICATE = 3
};
enum dscl_mode_sel {
DSCL_MODE_SCALING_444_BYPASS = 0,
DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
DSCL_MODE_DSCL_BYPASS = 6
};
static void program_gamut_remap(
struct dcn10_dpp *dpp,
const uint16_t *regval,
enum gamut_remap_select select)
{
uint16_t selection = 0;
struct color_matrices_reg gam_regs;
if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
CM_GAMUT_REMAP_MODE, 0);
return;
}
switch (select) {
case GAMUT_REMAP_COEFF:
selection = 1;
break;
case GAMUT_REMAP_COMA_COEFF:
selection = 2;
break;
case GAMUT_REMAP_COMB_COEFF:
selection = 3;
break;
default:
break;
}
gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
if (select == GAMUT_REMAP_COEFF) {
gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
cm_helper_program_color_matrices(
dpp->base.ctx,
regval,
&gam_regs);
} else if (select == GAMUT_REMAP_COMA_COEFF) {
gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
cm_helper_program_color_matrices(
dpp->base.ctx,
regval,
&gam_regs);
} else {
gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
cm_helper_program_color_matrices(
dpp->base.ctx,
regval,
&gam_regs);
}
REG_SET(
CM_GAMUT_REMAP_CONTROL, 0,
CM_GAMUT_REMAP_MODE, selection);
}
void dpp1_cm_set_gamut_remap(
struct dpp *dpp_base,
const struct dpp_grph_csc_adjustment *adjust)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
int i = 0;
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
/* Bypass if type is bypass or hw */
program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
else {
struct fixed31_32 arr_matrix[12];
uint16_t arr_reg_val[12];
for (i = 0; i < 12; i++)
arr_matrix[i] = adjust->temperature_matrix[i];
convert_float_matrix(
arr_reg_val, arr_matrix, 12);
program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
}
}
static void dpp1_cm_program_color_matrix(
struct dcn10_dpp *dpp,
const uint16_t *regval)
{
uint32_t ocsc_mode;
uint32_t cur_mode;
struct color_matrices_reg gam_regs;
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
/* determine which CSC matrix (ocsc or comb) we are using
* currently. select the alternate set to double buffer
* the CSC update so CSC is updated on frame boundary
*/
REG_SET(CM_TEST_DEBUG_INDEX, 0,
CM_TEST_DEBUG_INDEX, 9);
REG_GET(CM_TEST_DEBUG_DATA,
CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode);
if (cur_mode != 4)
ocsc_mode = 4;
else
ocsc_mode = 5;
gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
if (ocsc_mode == 4) {
gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
} else {
gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
}
cm_helper_program_color_matrices(
dpp->base.ctx,
regval,
&gam_regs);
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
}
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
enum dc_color_space colorspace)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
const uint16_t *regval = NULL;
int arr_size;
regval = find_color_matrix(colorspace, &arr_size);
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
dpp1_cm_program_color_matrix(dpp, regval);
}
static void dpp1_cm_get_reg_field(
struct dcn10_dpp *dpp,
struct xfer_func_reg *reg)
{
reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B;
reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B;
reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B;
reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B;
reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
}
static void dpp1_cm_get_degamma_reg_field(
struct dcn10_dpp *dpp,
struct xfer_func_reg *reg)
{
reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B;
reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B;
reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B;
reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B;
reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
}
void dpp1_cm_set_output_csc_adjustment(
struct dpp *dpp_base,
const uint16_t *regval)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
dpp1_cm_program_color_matrix(dpp, regval);
}
void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
bool power_on)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_SET(CM_MEM_PWR_CTRL, 0,
RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
}
void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
const struct pwl_result_data *rgb,
uint32_t num)
{
uint32_t i;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_SEQ_START();
for (i = 0 ; i < num; i++) {
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
}
}
void dpp1_cm_configure_regamma_lut(
struct dpp *dpp_base,
bool is_ram_a)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
CM_RGAM_LUT_WRITE_EN_MASK, 7);
REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0);
}
/*program re gamma RAM A*/
void dpp1_cm_program_regamma_luta_settings(
struct dpp *dpp_base,
const struct pwl_params *params)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
struct xfer_func_reg gam_regs;
dpp1_cm_get_reg_field(dpp, &gam_regs);
gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B);
gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G);
gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R);
gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B);
gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G);
gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R);
gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B);
gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B);
gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G);
gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G);
gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R);
gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R);
gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1);
gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33);
cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
}
/*program re gamma RAM B*/
void dpp1_cm_program_regamma_lutb_settings(
struct dpp *dpp_base,
const struct pwl_params *params)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
struct xfer_func_reg gam_regs;
dpp1_cm_get_reg_field(dpp, &gam_regs);
gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B);
gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G);
gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R);
gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B);
gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G);
gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R);
gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B);
gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B);
gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G);
gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G);
gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R);
gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R);
gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1);
gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33);
cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
}
void dpp1_program_input_csc(
struct dpp *dpp_base,
enum dc_color_space color_space,
enum dcn10_input_csc_select input_select,
const struct out_csc_color_matrix *tbl_entry)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
int i;
int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
const uint16_t *regval = NULL;
uint32_t cur_select = 0;
enum dcn10_input_csc_select select;
struct color_matrices_reg gam_regs;
if (input_select == INPUT_CSC_SELECT_BYPASS) {
REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
return;
}
if (tbl_entry == NULL) {
for (i = 0; i < arr_size; i++)
if (dpp_input_csc_matrix[i].color_space == color_space) {
regval = dpp_input_csc_matrix[i].regval;
break;
}
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
} else {
regval = tbl_entry->regval;
}
/* determine which CSC matrix (icsc or coma) we are using
* currently. select the alternate set to double buffer
* the CSC update so CSC is updated on frame boundary
*/
REG_SET(CM_TEST_DEBUG_INDEX, 0,
CM_TEST_DEBUG_INDEX, 9);
REG_GET(CM_TEST_DEBUG_DATA,
CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select);
if (cur_select != INPUT_CSC_SELECT_ICSC)
select = INPUT_CSC_SELECT_ICSC;
else
select = INPUT_CSC_SELECT_COMA;
gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
if (select == INPUT_CSC_SELECT_ICSC) {
gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
} else {
gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
}
cm_helper_program_color_matrices(
dpp->base.ctx,
regval,
&gam_regs);
REG_SET(CM_ICSC_CONTROL, 0,
CM_ICSC_MODE, select);
}
//keep here for now, decide multi dce support later
void dpp1_program_bias_and_scale(
struct dpp *dpp_base,
struct dc_bias_and_scale *params)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_SET_2(CM_BNS_VALUES_R, 0,
CM_BNS_SCALE_R, params->scale_red,
CM_BNS_BIAS_R, params->bias_red);
REG_SET_2(CM_BNS_VALUES_G, 0,
CM_BNS_SCALE_G, params->scale_green,
CM_BNS_BIAS_G, params->bias_green);
REG_SET_2(CM_BNS_VALUES_B, 0,
CM_BNS_SCALE_B, params->scale_blue,
CM_BNS_BIAS_B, params->bias_blue);
}
/*program de gamma RAM B*/
void dpp1_program_degamma_lutb_settings(
struct dpp *dpp_base,
const struct pwl_params *params)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
struct xfer_func_reg gam_regs;
dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R);
gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B);
gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G);
gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R);
gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B);
gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B);
gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G);
gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G);
gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R);
gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R);
gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1);
gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15);
cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
}
/*program de gamma RAM A*/
void dpp1_program_degamma_luta_settings(
struct dpp *dpp_base,
const struct pwl_params *params)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
struct xfer_func_reg gam_regs;
dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R);
gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B);
gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G);
gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R);
gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B);
gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B);
gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G);
gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G);
gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R);
gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R);
gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1);
gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15);
cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
}
void dpp1_power_on_degamma_lut(
struct dpp *dpp_base,
bool power_on)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_SET(CM_MEM_PWR_CTRL, 0,
SHARED_MEM_PWR_DIS, power_on ? 0:1);
}
static void dpp1_enable_cm_block(
struct dpp *dpp_base)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8);
REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0);
}
void dpp1_set_degamma(
struct dpp *dpp_base,
enum ipp_degamma_mode mode)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
dpp1_enable_cm_block(dpp_base);
switch (mode) {
case IPP_DEGAMMA_MODE_BYPASS:
/* Setting de gamma bypass for now */
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
break;
case IPP_DEGAMMA_MODE_HW_sRGB:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
break;
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
case IPP_DEGAMMA_MODE_USER_PWL:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
REG_SEQ_SUBMIT();
REG_SEQ_WAIT_DONE();
}
void dpp1_degamma_ram_select(
struct dpp *dpp_base,
bool use_ram_a)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (use_ram_a)
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
else
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4);
}
static bool dpp1_degamma_ram_inuse(
struct dpp *dpp_base,
bool *ram_a_inuse)
{
bool ret = false;
uint32_t status_reg = 0;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
&status_reg);
if (status_reg == 9) {
*ram_a_inuse = true;
ret = true;
} else if (status_reg == 10) {
*ram_a_inuse = false;
ret = true;
}
return ret;
}
void dpp1_program_degamma_lut(
struct dpp *dpp_base,
const struct pwl_result_data *rgb,
uint32_t num,
bool is_ram_a)
{
uint32_t i;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0);
REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
CM_DGAM_LUT_WRITE_EN_MASK, 7);
REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
is_ram_a == true ? 0:1);
REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
for (i = 0 ; i < num; i++) {
REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
REG_SET(CM_DGAM_LUT_DATA, 0,
CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
REG_SET(CM_DGAM_LUT_DATA, 0,
CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
REG_SET(CM_DGAM_LUT_DATA, 0,
CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
}
}
void dpp1_set_degamma_pwl(struct dpp *dpp_base,
const struct pwl_params *params)
{
bool is_ram_a = true;
dpp1_power_on_degamma_lut(dpp_base, true);
dpp1_enable_cm_block(dpp_base);
dpp1_degamma_ram_inuse(dpp_base, &is_ram_a);
if (is_ram_a == true)
dpp1_program_degamma_lutb_settings(dpp_base, params);
else
dpp1_program_degamma_luta_settings(dpp_base, params);
dpp1_program_degamma_lut(dpp_base, params->rgb_resulted,
params->hw_points_num, !is_ram_a);
dpp1_degamma_ram_select(dpp_base, !is_ram_a);
}
void dpp1_full_bypass(struct dpp *dpp_base)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
/* Input pixel format: ARGB8888 */
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, 0x8);
/* Zero expansion */
REG_SET_3(FORMAT_CONTROL, 0,
CNVC_BYPASS, 0,
FORMAT_CONTROL__ALPHA_EN, 0,
FORMAT_EXPANSION_MODE, 0);
/* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
if (dpp->tf_mask->CM_BYPASS_EN)
REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
else
REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
/* Setting degamma bypass for now */
REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
}
static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base,
bool *ram_a_inuse)
{
bool in_use = false;
uint32_t status_reg = 0;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
&status_reg);
// 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB
if (status_reg == 1 || status_reg == 3 || status_reg == 4) {
*ram_a_inuse = true;
in_use = true;
// 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB
} else if (status_reg == 2 || status_reg == 5 || status_reg == 6) {
*ram_a_inuse = false;
in_use = true;
}
return in_use;
}
/*
* Input gamma LUT currently supports 256 values only. This means input color
* can have a maximum of 8 bits per channel (= 256 possible values) in order to
* have a one-to-one mapping with the LUT. Truncation will occur with color
* values greater than 8 bits.
*
* In the future, this function should support additional input gamma methods,
* such as piecewise linear mapping, and input gamma bypass.
*/
void dpp1_program_input_lut(
struct dpp *dpp_base,
const struct dc_gamma *gamma)
{
int i;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
bool rama_occupied = false;
uint32_t ram_num;
// Power on LUT memory.
REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1);
dpp1_enable_cm_block(dpp_base);
// Determine whether to use RAM A or RAM B
dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied);
if (!rama_occupied)
REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0);
else
REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1);
// RW mode is 256-entry LUT
REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0);
// IGAM Input format should be 8 bits per channel.
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0);
// Do not mask any R,G,B values
REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7);
// LUT-256, unsigned, integer, new u0.12 format
REG_UPDATE_3(
CM_IGAM_CONTROL,
CM_IGAM_LUT_FORMAT_R, 3,
CM_IGAM_LUT_FORMAT_G, 3,
CM_IGAM_LUT_FORMAT_B, 3);
// Start at index 0 of IGAM LUT
REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
for (i = 0; i < gamma->num_entries; i++) {
REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
dc_fixpt_round(
gamma->entries.red[i]));
REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
dc_fixpt_round(
gamma->entries.green[i]));
REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
dc_fixpt_round(
gamma->entries.blue[i]));
}
// Power off LUT memory
REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0);
// Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
}
void dpp1_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn10/dcn10_hubp.h"
#include "dcn21_hubp.h"
#include "dm_services.h"
#include "reg_helper.h"
#include "dc_dmub_srv.h"
#define DC_LOGGER_INIT(logger)
#define REG(reg)\
hubp21->hubp_regs->reg
#define CTX \
hubp21->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubp21->hubp_shift->field_name, hubp21->hubp_mask->field_name
/*
* In DCN2.1, the non-double buffered version of the following 4 DLG registers are used in RTL.
* As a result, if S/W updates any of these registers during a mode change,
* the current frame before the mode change will use the new value right away
* and can lead to generating incorrect request deadlines and incorrect TTU/QoS behavior.
*
* REFCYC_PER_VM_GROUP_FLIP[22:0]
* REFCYC_PER_VM_GROUP_VBLANK[22:0]
* REFCYC_PER_VM_REQ_FLIP[22:0]
* REFCYC_PER_VM_REQ_VBLANK[22:0]
*
* REFCYC_PER_VM_*_FLIP affects the deadline of the VM requests generated
* when flipping to a new surface
*
* REFCYC_PER_VM_*_VBLANK affects the deadline of the VM requests generated
* during prefetch period of a frame. The prefetch starts at a pre-determined
* number of lines before the display active per frame
*
* DCN may underflow due to incorrectly programming these registers
* during VM stage of prefetch/iflip. First lines of display active
* or a sub-region of active using a new surface will be corrupted
* until the VM data returns at flip/mode change transitions
*
* Work around:
* workaround is always opt to use the more aggressive settings.
* On any mode switch, if the new reg values are smaller than the current values,
* then update the regs with the new values.
*
* Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142
*
*/
void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
uint32_t refcyc_per_vm_group_vblank;
uint32_t refcyc_per_vm_req_vblank;
uint32_t refcyc_per_vm_group_flip;
uint32_t refcyc_per_vm_req_flip;
const uint32_t uninitialized_hw_default = 0;
REG_GET(VBLANK_PARAMETERS_5,
REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank);
if (refcyc_per_vm_group_vblank == uninitialized_hw_default ||
refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank)
REG_SET(VBLANK_PARAMETERS_5, 0,
REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank);
REG_GET(VBLANK_PARAMETERS_6,
REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank);
if (refcyc_per_vm_req_vblank == uninitialized_hw_default ||
refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank)
REG_SET(VBLANK_PARAMETERS_6, 0,
REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank);
REG_GET(FLIP_PARAMETERS_3,
REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip);
if (refcyc_per_vm_group_flip == uninitialized_hw_default ||
refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip)
REG_SET(FLIP_PARAMETERS_3, 0,
REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip);
REG_GET(FLIP_PARAMETERS_4,
REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip);
if (refcyc_per_vm_req_flip == uninitialized_hw_default ||
refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip)
REG_SET(FLIP_PARAMETERS_4, 0,
REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip);
REG_SET(FLIP_PARAMETERS_5, 0,
REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c);
REG_SET(FLIP_PARAMETERS_6, 0,
REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c);
}
void hubp21_program_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
{
hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
apply_DEDCN21_142_wa_for_hostvm_deadline(hubp, dlg_attr);
}
void hubp21_program_requestor(
struct hubp *hubp,
struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
REG_UPDATE(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
REG_SET_4(DCN_EXPANSION_MODE, 0,
DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
VM_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
REG_SET_7(DCHUBP_REQ_SIZE_CONFIG_C, 0,
CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
}
static void hubp21_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
/* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
hubp21_program_requestor(hubp, rq_regs);
hubp21_program_deadline(hubp, dlg_attr, ttu_attr);
}
static void hubp21_set_viewport(
struct hubp *hubp,
const struct rect *viewport,
const struct rect *viewport_c)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
PRI_VIEWPORT_WIDTH, viewport->width,
PRI_VIEWPORT_HEIGHT, viewport->height);
REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
PRI_VIEWPORT_X_START, viewport->x,
PRI_VIEWPORT_Y_START, viewport->y);
/*for stereo*/
REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
SEC_VIEWPORT_WIDTH, viewport->width,
SEC_VIEWPORT_HEIGHT, viewport->height);
REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
SEC_VIEWPORT_X_START, viewport->x,
SEC_VIEWPORT_Y_START, viewport->y);
/* DC supports NV12 only at the moment */
REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
PRI_VIEWPORT_WIDTH_C, viewport_c->width,
PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
PRI_VIEWPORT_X_START_C, viewport_c->x,
PRI_VIEWPORT_Y_START_C, viewport_c->y);
REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
SEC_VIEWPORT_WIDTH_C, viewport_c->width,
SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
SEC_VIEWPORT_X_START_C, viewport_c->x,
SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
static void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, 0,
MC_VM_SYSTEM_APERTURE_LOW_ADDR, mc_vm_apt_low.quad_part);
REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, 0,
MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mc_vm_apt_high.quad_part);
REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
ENABLE_L1_TLB, 1,
SYSTEM_ACCESS_MODE, 0x3);
}
static void hubp21_validate_dml_output(struct hubp *hubp,
struct dc_context *ctx,
struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
DC_LOGGER_INIT(ctx->logger);
DC_LOG_DEBUG("DML Validation | Running Validation");
/* Requester - Per hubp */
REG_GET(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
REG_GET_4(DCN_EXPANSION_MODE,
DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
VM_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C,
CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:VM_GROUP_SIZE - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
/* DLG - Per hubp */
REG_GET_2(BLANK_OFFSET_0,
REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
REG_GET(BLANK_OFFSET_1,
MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
REG_GET(DST_DIMENSIONS,
REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
REG_GET_2(DST_AFTER_SCALER,
REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
REG_GET(REF_FREQ_TO_PIX_FREQ,
REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
/* DLG - Per luma/chroma */
REG_GET(VBLANK_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
if (REG(NOM_PARAMETERS_0))
REG_GET(NOM_PARAMETERS_0,
DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
if (REG(NOM_PARAMETERS_1))
REG_GET(NOM_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
REG_GET(NOM_PARAMETERS_4,
DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
REG_GET(NOM_PARAMETERS_5,
REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
REG_GET_2(PER_LINE_DELIVERY,
REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
REG_GET_2(PER_LINE_DELIVERY_PRE,
REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
REG_GET(VBLANK_PARAMETERS_2,
REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
if (REG(NOM_PARAMETERS_2))
REG_GET(NOM_PARAMETERS_2,
DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
if (REG(NOM_PARAMETERS_3))
REG_GET(NOM_PARAMETERS_3,
REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
REG_GET(NOM_PARAMETERS_6,
DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
REG_GET(NOM_PARAMETERS_7,
REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
REG_GET(VBLANK_PARAMETERS_3,
REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
REG_GET(VBLANK_PARAMETERS_4,
REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
/* TTU - per hubp */
REG_GET_2(DCN_TTU_QOS_WM,
QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
REG_GET_3(DCN_SURF0_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
REG_GET_3(DCN_SURF1_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
REG_GET_3(DCN_CUR0_TTU_CNTL0,
REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
REG_GET(FLIP_PARAMETERS_1,
REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
REG_GET(DCN_CUR0_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
REG_GET(DCN_CUR1_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
REG_GET(DCN_SURF0_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
REG_GET(DCN_SURF1_TTU_CNTL1,
REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
/* Host VM deadline regs */
REG_GET(VBLANK_PARAMETERS_5,
REFCYC_PER_VM_GROUP_VBLANK, &dlg_attr.refcyc_per_vm_group_vblank);
REG_GET(VBLANK_PARAMETERS_6,
REFCYC_PER_VM_REQ_VBLANK, &dlg_attr.refcyc_per_vm_req_vblank);
REG_GET(FLIP_PARAMETERS_3,
REFCYC_PER_VM_GROUP_FLIP, &dlg_attr.refcyc_per_vm_group_flip);
REG_GET(FLIP_PARAMETERS_4,
REFCYC_PER_VM_REQ_FLIP, &dlg_attr.refcyc_per_vm_req_flip);
REG_GET(FLIP_PARAMETERS_5,
REFCYC_PER_PTE_GROUP_FLIP_C, &dlg_attr.refcyc_per_pte_group_flip_c);
REG_GET(FLIP_PARAMETERS_6,
REFCYC_PER_META_CHUNK_FLIP_C, &dlg_attr.refcyc_per_meta_chunk_flip_c);
REG_GET(FLIP_PARAMETERS_2,
REFCYC_PER_META_CHUNK_FLIP_L, &dlg_attr.refcyc_per_meta_chunk_flip_l);
if (dlg_attr.refcyc_per_vm_group_vblank != dml_dlg_attr->refcyc_per_vm_group_vblank)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_5:REFCYC_PER_VM_GROUP_VBLANK - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_vm_group_vblank, dlg_attr.refcyc_per_vm_group_vblank);
if (dlg_attr.refcyc_per_vm_req_vblank != dml_dlg_attr->refcyc_per_vm_req_vblank)
DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_6:REFCYC_PER_VM_REQ_VBLANK - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_vm_req_vblank, dlg_attr.refcyc_per_vm_req_vblank);
if (dlg_attr.refcyc_per_vm_group_flip != dml_dlg_attr->refcyc_per_vm_group_flip)
DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_3:REFCYC_PER_VM_GROUP_FLIP - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_vm_group_flip, dlg_attr.refcyc_per_vm_group_flip);
if (dlg_attr.refcyc_per_vm_req_flip != dml_dlg_attr->refcyc_per_vm_req_flip)
DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_4:REFCYC_PER_VM_REQ_FLIP - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_vm_req_flip, dlg_attr.refcyc_per_vm_req_flip);
if (dlg_attr.refcyc_per_pte_group_flip_c != dml_dlg_attr->refcyc_per_pte_group_flip_c)
DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_5:REFCYC_PER_PTE_GROUP_FLIP_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_pte_group_flip_c, dlg_attr.refcyc_per_pte_group_flip_c);
if (dlg_attr.refcyc_per_meta_chunk_flip_c != dml_dlg_attr->refcyc_per_meta_chunk_flip_c)
DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_6:REFCYC_PER_META_CHUNK_FLIP_C - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_flip_c, dlg_attr.refcyc_per_meta_chunk_flip_c);
if (dlg_attr.refcyc_per_meta_chunk_flip_l != dml_dlg_attr->refcyc_per_meta_chunk_flip_l)
DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_2:REFCYC_PER_META_CHUNK_FLIP_L - Expected: %u Actual: %u\n",
dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l);
}
static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip_registers *flip_regs)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
REG_UPDATE_3(DCSURF_FLIP_CONTROL,
SURFACE_FLIP_TYPE, flip_regs->immediate,
SURFACE_FLIP_MODE_FOR_STEREOSYNC, flip_regs->grph_stereo,
SURFACE_FLIP_IN_STEREOSYNC, flip_regs->grph_stereo);
REG_UPDATE(VMID_SETTINGS_0,
VMID, flip_regs->vmid);
REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_TMZ, flip_regs->tmz_surface,
PRIMARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
PRIMARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
PRIMARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface,
SECONDARY_SURFACE_TMZ, flip_regs->tmz_surface,
SECONDARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
SECONDARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
SECONDARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
PRIMARY_META_SURFACE_ADDRESS_C,
flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_META_SURFACE_ADDRESS_HIGH,
flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
PRIMARY_META_SURFACE_ADDRESS,
flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS);
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
SECONDARY_META_SURFACE_ADDRESS_HIGH,
flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
SECONDARY_META_SURFACE_ADDRESS,
flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS);
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
SECONDARY_SURFACE_ADDRESS_HIGH,
flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
SECONDARY_SURFACE_ADDRESS,
flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
PRIMARY_SURFACE_ADDRESS_HIGH_C,
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
PRIMARY_SURFACE_ADDRESS_C,
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
PRIMARY_SURFACE_ADDRESS_HIGH,
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
PRIMARY_SURFACE_ADDRESS,
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS);
}
static void dmcub_PLAT_54186_wa(struct hubp *hubp,
struct surface_flip_registers *flip_regs)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
union dmub_rb_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
cmd.PLAT_54186_wa.header.payload_bytes = sizeof(cmd.PLAT_54186_wa.flip);
cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS =
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
cmd.PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
cmd.PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
cmd.PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
cmd.PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
PERF_TRACE(); // TODO: remove after performance is stable.
dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
PERF_TRACE(); // TODO: remove after performance is stable.
}
static bool hubp21_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate)
{
struct surface_flip_registers flip_regs = { 0 };
flip_regs.vmid = address->vmid;
switch (address->type) {
case PLN_ADDR_TYPE_GRAPHICS:
if (address->grph.addr.quad_part == 0) {
BREAK_TO_DEBUGGER();
break;
}
if (address->grph.meta_addr.quad_part != 0) {
flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
address->grph.meta_addr.low_part;
flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
address->grph.meta_addr.high_part;
}
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
address->grph.addr.low_part;
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
address->grph.addr.high_part;
break;
case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
if (address->video_progressive.luma_addr.quad_part == 0
|| address->video_progressive.chroma_addr.quad_part == 0)
break;
if (address->video_progressive.luma_meta_addr.quad_part != 0) {
flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
address->video_progressive.luma_meta_addr.low_part;
flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
address->video_progressive.luma_meta_addr.high_part;
flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_C =
address->video_progressive.chroma_meta_addr.low_part;
flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C =
address->video_progressive.chroma_meta_addr.high_part;
}
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
address->video_progressive.luma_addr.low_part;
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
address->video_progressive.luma_addr.high_part;
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
address->video_progressive.chroma_addr.low_part;
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
address->video_progressive.chroma_addr.high_part;
break;
case PLN_ADDR_TYPE_GRPH_STEREO:
if (address->grph_stereo.left_addr.quad_part == 0)
break;
if (address->grph_stereo.right_addr.quad_part == 0)
break;
flip_regs.grph_stereo = true;
if (address->grph_stereo.right_meta_addr.quad_part != 0) {
flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS =
address->grph_stereo.right_meta_addr.low_part;
flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH =
address->grph_stereo.right_meta_addr.high_part;
}
if (address->grph_stereo.left_meta_addr.quad_part != 0) {
flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
address->grph_stereo.left_meta_addr.low_part;
flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
address->grph_stereo.left_meta_addr.high_part;
}
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
address->grph_stereo.left_addr.low_part;
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
address->grph_stereo.left_addr.high_part;
flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS =
address->grph_stereo.right_addr.low_part;
flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH =
address->grph_stereo.right_addr.high_part;
break;
default:
BREAK_TO_DEBUGGER();
break;
}
flip_regs.tmz_surface = address->tmz_surface;
flip_regs.immediate = flip_immediate;
if (hubp->ctx->dc->debug.enable_dmcub_surface_flip && address->type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
dmcub_PLAT_54186_wa(hubp, &flip_regs);
else
program_surface_flip_and_addr(hubp, &flip_regs);
hubp->request_address = *address;
return true;
}
static void hubp21_init(struct hubp *hubp)
{
// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
// This is a chicken bit to enable the ECO fix.
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
}
static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp21_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
.mem_program_viewport = hubp21_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
.dmdata_set_attributes = hubp2_dmdata_set_attributes,
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp2_read_state,
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp21_init,
.validate_dml_output = hubp21_validate_dml_output,
.hubp_set_flip_int = hubp1_set_flip_int,
};
bool hubp21_construct(
struct dcn21_hubp *hubp21,
struct dc_context *ctx,
uint32_t inst,
const struct dcn_hubp2_registers *hubp_regs,
const struct dcn_hubp2_shift *hubp_shift,
const struct dcn_hubp2_mask *hubp_mask)
{
hubp21->base.funcs = &dcn21_hubp_funcs;
hubp21->base.ctx = ctx;
hubp21->hubp_regs = hubp_regs;
hubp21->hubp_shift = hubp_shift;
hubp21->hubp_mask = hubp_mask;
hubp21->base.inst = inst;
hubp21->base.opp_id = OPP_ID_INVALID;
hubp21->base.mpcc_id = 0xf;
return true;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/delay.h>
#include "dm_services.h"
#include "dcn20/dcn20_hubbub.h"
#include "dcn21_hubbub.h"
#include "reg_helper.h"
#define REG(reg)\
hubbub1->regs->reg
#define DC_LOGGER \
hubbub1->base.ctx->logger
#define CTX \
hubbub1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubbub1->shifts->field_name, hubbub1->masks->field_name
#define REG(reg)\
hubbub1->regs->reg
#define CTX \
hubbub1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
hubbub1->shifts->field_name, hubbub1->masks->field_name
static uint32_t convert_and_clamp(
uint32_t wm_ns,
uint32_t refclk_mhz,
uint32_t clamp_value)
{
uint32_t ret_val = 0;
ret_val = wm_ns * refclk_mhz;
ret_val /= 1000;
if (ret_val > clamp_value)
ret_val = clamp_value;
return ret_val;
}
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t riommu_active;
int i;
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
//Poll until RIOMMU_ACTIVE = 1
for (i = 0; i < 100; i++) {
REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
if (riommu_active)
break;
else
udelay(5);
}
if (riommu_active) {
//Reflect the power status of DCHUBBUB
REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
//Start rIOMMU prefetching
REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
// Enable dynamic clock gating
REG_UPDATE_4(DCHVM_CLK_CTRL,
HVM_DISPCLK_R_GATE_DIS, 0,
HVM_DISPCLK_G_GATE_DIS, 0,
HVM_DCFCLK_R_GATE_DIS, 0,
HVM_DCFCLK_G_GATE_DIS, 0);
//Poll until HOSTVM_PREFETCH_DONE = 1
REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
hubbub->riommu_active = true;
}
}
int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
struct dcn_vmid_page_table_config phys_config;
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
FB_BASE, pa_config->system_aperture.fb_base >> 24);
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
FB_TOP, pa_config->system_aperture.fb_top >> 24);
REG_SET(DCN_VM_FB_OFFSET, 0,
FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
REG_SET(DCN_VM_AGP_BOT, 0,
AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
REG_SET(DCN_VM_AGP_TOP, 0,
AGP_TOP, pa_config->system_aperture.agp_top >> 24);
REG_SET(DCN_VM_AGP_BASE, 0,
AGP_BASE, pa_config->system_aperture.agp_base >> 24);
if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
phys_config.depth = 0;
phys_config.block_size = 0;
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
dcn21_dchvm_init(hubbub);
return hubbub1->num_vmid;
}
bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
} else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
> hubbub1->watermarks.a.frac_urg_bw_flip) {
hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
} else if (watermarks->a.frac_urg_bw_flip
< hubbub1->watermarks.a.frac_urg_bw_flip)
wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
} else if (watermarks->a.frac_urg_bw_nom
< hubbub1->watermarks.a.frac_urg_bw_nom)
wm_pending = true;
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
} else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
} else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
> hubbub1->watermarks.a.frac_urg_bw_flip) {
hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
} else if (watermarks->a.frac_urg_bw_flip
< hubbub1->watermarks.a.frac_urg_bw_flip)
wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
} else if (watermarks->a.frac_urg_bw_nom
< hubbub1->watermarks.a.frac_urg_bw_nom)
wm_pending = true;
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
} else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
} else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
> hubbub1->watermarks.a.frac_urg_bw_flip) {
hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
} else if (watermarks->a.frac_urg_bw_flip
< hubbub1->watermarks.a.frac_urg_bw_flip)
wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
} else if (watermarks->a.frac_urg_bw_nom
< hubbub1->watermarks.a.frac_urg_bw_nom)
wm_pending = true;
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
} else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
} else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
> hubbub1->watermarks.a.frac_urg_bw_flip) {
hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
} else if (watermarks->a.frac_urg_bw_flip
< hubbub1->watermarks.a.frac_urg_bw_flip)
wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
} else if (watermarks->a.frac_urg_bw_nom
< hubbub1->watermarks.a.frac_urg_bw_nom)
wm_pending = true;
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
} else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
wm_pending = true;
return wm_pending;
}
bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
watermarks->a.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
} else if (watermarks->a.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
watermarks->b.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
} else if (watermarks->b.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
watermarks->c.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
} else if (watermarks->c.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
watermarks->d.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
} else if (watermarks->d.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
wm_pending = true;
return wm_pending;
}
bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
} else if (watermarks->a.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
} else if (watermarks->b.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
wm_pending = false;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
} else if (watermarks->c.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
watermarks->d.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
} else if (watermarks->d.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
wm_pending = true;
return wm_pending;
}
bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
/*
* The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
* If the memory controller is fully utilized and the DCHub requestors are
* well ahead of their amortized schedule, then it is safe to prevent the next winner
* from being committed and sent to the fabric.
* The utilization of the memory controller is approximated by ensuring that
* the number of outstanding requests is greater than a threshold specified
* by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
* the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
*
* TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
* to turn off it for now.
*/
REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
void hubbub21_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
struct dcn_hubbub_wm_set *s;
memset(wm, 0, sizeof(struct dcn_hubbub_wm));
s = &wm->sets[0];
s->wm_set = 0;
REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change);
s = &wm->sets[1];
s->wm_set = 1;
REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change);
s = &wm->sets[2];
s->wm_set = 2;
REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change);
s = &wm->sets[3];
s->wm_set = 3;
REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change);
}
static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
}
static const struct hubbub_funcs hubbub21_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub21_init_dchub,
.init_vm_ctx = hubbub2_init_vm_ctx,
.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
.hubbub_read_state = hubbub2_read_state,
};
void hubbub21_construct(struct dcn20_hubbub *hubbub,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask)
{
hubbub->base.ctx = ctx;
hubbub->base.funcs = &hubbub21_funcs;
hubbub->regs = hubbub_regs;
hubbub->shifts = hubbub_shift;
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn21_hwseq.h"
#include "vmid.h"
#include "reg_helper.h"
#include "hw/clk_mgr.h"
#include "dc_dmub_srv.h"
#include "abm.h"
#include "link.h"
#define DC_LOGGER_INIT(logger)
#define CTX \
hws->ctx
#define REG(reg)\
hws->regs->reg
#undef FN
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
/* Temporary read settings, future will get values from kmd directly */
static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config,
struct dce_hwseq *hws)
{
uint32_t page_table_base_hi;
uint32_t page_table_base_lo;
REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi);
REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo);
config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo;
}
int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
struct dcn_hubbub_phys_addr_config config;
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
mmhub_update_page_table_config(&config, hws);
return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
}
// work around for Renoir s0i3, if register is programmed, bypass golden init.
bool dcn21_s0i3_golden_init_wa(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
uint32_t value = 0;
value = REG_READ(MICROSECOND_TIME_BASE_DIV);
return value != 0x00120464;
}
void dcn21_exit_optimized_pwr_state(
const struct dc *dc,
struct dc_state *context)
{
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
false);
}
void dcn21_optimize_pwr_state(
const struct dc *dc,
struct dc_state *context)
{
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
true);
}
/* If user hotplug a HDMI monitor while in monitor off,
* OS will do a mode set (with output timing) but keep output off.
* In this case DAL will ask vbios to power up the pll in the PHY.
* If user unplug the monitor (while we are on monitor off) or
* system attempt to enter modern standby (which we will disable PLL),
* PHY will hang on the next mode set attempt.
* if enable PLL follow by disable PLL (without executing lane enable/disable),
* RDPCS_PHY_DP_MPLLB_STATE remains 1,
* which indicate that PLL disable attempt actually didn't go through.
* As a workaround, insert PHY lane enable/disable before PLL disable.
*/
void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
if (!pipe_ctx->stream->dpms_off)
return;
pipe_ctx->stream->dpms_off = false;
pipe_ctx->stream->ctx->dc->link_srv->set_dpms_on(context, pipe_ctx);
pipe_ctx->stream->ctx->dc->link_srv->set_dpms_off(pipe_ctx);
pipe_ctx->stream->dpms_off = true;
}
static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
uint32_t ramping_boundary = 0xFFFF;
memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp, uint32_t panel_inst)
{
union dmub_rb_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
{
struct abm *abm = pipe_ctx->stream_res.abm;
uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
if (dmcu) {
dce110_set_abm_immediate_disable(pipe_ctx);
return;
}
if (abm && panel_cntl) {
if (abm->funcs && abm->funcs->set_pipe_ex) {
abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
panel_cntl->inst);
} else {
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst);
}
panel_cntl->funcs->store_backlight_level(panel_cntl);
}
}
void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
{
struct abm *abm = pipe_ctx->stream_res.abm;
uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
if (dmcu) {
dce110_set_pipe(pipe_ctx);
return;
}
if (abm && panel_cntl) {
if (abm->funcs && abm->funcs->set_pipe_ex) {
abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
} else {
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
}
}
}
bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp)
{
struct dc_context *dc = pipe_ctx->stream->ctx;
struct abm *abm = pipe_ctx->stream_res.abm;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
if (dc->dc->res_pool->dmcu) {
dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
return true;
}
if (abm != NULL) {
uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
if (abm && panel_cntl) {
if (abm->funcs && abm->funcs->set_pipe_ex) {
abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
} else {
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
}
}
}
if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
frame_ramp, 0, panel_cntl->inst);
else
dmub_abm_set_backlight(dc, backlight_pwm_u16_16, frame_ramp, panel_cntl->inst);
return true;
}
bool dcn21_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream)
{
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream == stream &&
(pipe_ctx->prev_odm_pipe == NULL && pipe_ctx->next_odm_pipe == NULL))
return true;
}
return false;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include <linux/delay.h>
#include "core_types.h"
#include "link_encoder.h"
#include "dcn21_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
enc10->base.ctx->logger
#define REG(reg)\
(enc10->link_regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc10->link_shift->field_name, enc10->link_mask->field_name
#define IND_REG(index) \
(enc10->link_regs->index)
static struct mpll_cfg dcn21_mpll_cfg_ref[] = {
// RBR
{
.hdmimode_enable = 0,
.ref_range = 1,
.ref_clk_mpllb_div = 1,
.mpllb_ssc_en = 1,
.mpllb_div5_clk_en = 1,
.mpllb_multiplier = 238,
.mpllb_fracn_en = 0,
.mpllb_fracn_quot = 0,
.mpllb_fracn_rem = 0,
.mpllb_fracn_den = 1,
.mpllb_ssc_up_spread = 0,
.mpllb_ssc_peak = 44237,
.mpllb_ssc_stepsize = 59454,
.mpllb_div_clk_en = 0,
.mpllb_div_multiplier = 0,
.mpllb_hdmi_div = 0,
.mpllb_tx_clk_div = 2,
.tx_vboost_lvl = 5,
.mpllb_pmix_en = 1,
.mpllb_word_div2_en = 0,
.mpllb_ana_v2i = 2,
.mpllb_ana_freq_vco = 2,
.mpllb_ana_cp_int = 9,
.mpllb_ana_cp_prop = 15,
.hdmi_pixel_clk_div = 0,
},
// HBR
{
.hdmimode_enable = 0,
.ref_range = 1,
.ref_clk_mpllb_div = 1,
.mpllb_ssc_en = 1,
.mpllb_div5_clk_en = 1,
.mpllb_multiplier = 192,
.mpllb_fracn_en = 1,
.mpllb_fracn_quot = 32768,
.mpllb_fracn_rem = 0,
.mpllb_fracn_den = 1,
.mpllb_ssc_up_spread = 0,
.mpllb_ssc_peak = 36864,
.mpllb_ssc_stepsize = 49545,
.mpllb_div_clk_en = 0,
.mpllb_div_multiplier = 0,
.mpllb_hdmi_div = 0,
.mpllb_tx_clk_div = 1,
.tx_vboost_lvl = 5,
.mpllb_pmix_en = 1,
.mpllb_word_div2_en = 0,
.mpllb_ana_v2i = 2,
.mpllb_ana_freq_vco = 3,
.mpllb_ana_cp_int = 9,
.mpllb_ana_cp_prop = 15,
.hdmi_pixel_clk_div = 0,
},
//HBR2
{
.hdmimode_enable = 0,
.ref_range = 1,
.ref_clk_mpllb_div = 1,
.mpllb_ssc_en = 1,
.mpllb_div5_clk_en = 1,
.mpllb_multiplier = 192,
.mpllb_fracn_en = 1,
.mpllb_fracn_quot = 32768,
.mpllb_fracn_rem = 0,
.mpllb_fracn_den = 1,
.mpllb_ssc_up_spread = 0,
.mpllb_ssc_peak = 36864,
.mpllb_ssc_stepsize = 49545,
.mpllb_div_clk_en = 0,
.mpllb_div_multiplier = 0,
.mpllb_hdmi_div = 0,
.mpllb_tx_clk_div = 0,
.tx_vboost_lvl = 5,
.mpllb_pmix_en = 1,
.mpllb_word_div2_en = 0,
.mpllb_ana_v2i = 2,
.mpllb_ana_freq_vco = 3,
.mpllb_ana_cp_int = 9,
.mpllb_ana_cp_prop = 15,
.hdmi_pixel_clk_div = 0,
},
//HBR3
{
.hdmimode_enable = 0,
.ref_range = 1,
.ref_clk_mpllb_div = 1,
.mpllb_ssc_en = 1,
.mpllb_div5_clk_en = 1,
.mpllb_multiplier = 304,
.mpllb_fracn_en = 1,
.mpllb_fracn_quot = 49152,
.mpllb_fracn_rem = 0,
.mpllb_fracn_den = 1,
.mpllb_ssc_up_spread = 0,
.mpllb_ssc_peak = 55296,
.mpllb_ssc_stepsize = 74318,
.mpllb_div_clk_en = 0,
.mpllb_div_multiplier = 0,
.mpllb_hdmi_div = 0,
.mpllb_tx_clk_div = 0,
.tx_vboost_lvl = 5,
.mpllb_pmix_en = 1,
.mpllb_word_div2_en = 0,
.mpllb_ana_v2i = 2,
.mpllb_ana_freq_vco = 1,
.mpllb_ana_cp_int = 7,
.mpllb_ana_cp_prop = 16,
.hdmi_pixel_clk_div = 0,
},
};
static bool update_cfg_data(
struct dcn10_link_encoder *enc10,
const struct dc_link_settings *link_settings,
struct dpcssys_phy_seq_cfg *cfg)
{
int i;
cfg->load_sram_fw = false;
cfg->use_calibration_setting = true;
//TODO: need to implement a proper lane mapping for Renoir.
for (i = 0; i < 4; i++)
cfg->lane_en[i] = true;
switch (link_settings->link_rate) {
case LINK_RATE_LOW:
cfg->mpll_cfg = dcn21_mpll_cfg_ref[0];
break;
case LINK_RATE_HIGH:
cfg->mpll_cfg = dcn21_mpll_cfg_ref[1];
break;
case LINK_RATE_HIGH2:
cfg->mpll_cfg = dcn21_mpll_cfg_ref[2];
break;
case LINK_RATE_HIGH3:
cfg->mpll_cfg = dcn21_mpll_cfg_ref[3];
break;
default:
DC_LOG_ERROR("%s: No supported link rate found %X!\n",
__func__, link_settings->link_rate);
return false;
}
return true;
}
static bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
int value;
if (enc->features.flags.bits.DP_IS_USB_C) {
REG_GET(RDPCSTX_PHY_CNTL6,
RDPCS_PHY_DPALT_DISABLE, &value);
if (value == 1) {
ASSERT(0);
return false;
}
REG_UPDATE(RDPCSTX_PHY_CNTL6,
RDPCS_PHY_DPALT_DISABLE_ACK, 0);
udelay(40);
REG_GET(RDPCSTX_PHY_CNTL6,
RDPCS_PHY_DPALT_DISABLE, &value);
if (value == 1) {
ASSERT(0);
REG_UPDATE(RDPCSTX_PHY_CNTL6,
RDPCS_PHY_DPALT_DISABLE_ACK, 1);
return false;
}
}
REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1);
return true;
}
static void dcn21_link_encoder_release_phy(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
if (enc->features.flags.bits.DP_IS_USB_C) {
REG_UPDATE(RDPCSTX_PHY_CNTL6,
RDPCS_PHY_DPALT_DISABLE_ACK, 1);
}
REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0);
}
void dcn21_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10;
struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg;
if (!dcn21_link_encoder_acquire_phy(enc))
return;
if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
return;
}
if (!update_cfg_data(enc10, link_settings, cfg))
return;
enc1_configure_encoder(enc10, link_settings);
dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
}
static void dcn21_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
if (!dcn21_link_encoder_acquire_phy(enc))
return;
dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
}
static void dcn21_link_encoder_disable_output(struct link_encoder *enc,
enum signal_type signal)
{
dcn10_link_encoder_disable_output(enc, signal);
if (dc_is_dp_signal(signal))
dcn21_link_encoder_release_phy(enc);
}
static const struct link_encoder_funcs dcn21_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
.setup = dcn10_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
.enable_dp_output = dcn21_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output,
.disable_output = dcn21_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
dcn10_link_encoder_update_mst_stream_allocation_table,
.psr_program_dp_dphy_fast_training =
dcn10_psr_program_dp_dphy_fast_training,
.psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
.connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
.enable_hpd = dcn10_link_encoder_enable_hpd,
.disable_hpd = dcn10_link_encoder_disable_hpd,
.is_dig_enabled = dcn10_is_dig_enabled,
.destroy = dcn10_link_encoder_destroy,
.fec_set_enable = enc2_fec_set_enable,
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
.get_dig_frontend = dcn10_get_dig_frontend,
.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn21_link_encoder_construct(
struct dcn21_link_encoder *enc21,
const struct encoder_init_data *init_data,
const struct encoder_feature_support *enc_features,
const struct dcn10_link_enc_registers *link_regs,
const struct dcn10_link_enc_aux_registers *aux_regs,
const struct dcn10_link_enc_hpd_registers *hpd_regs,
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask)
{
struct bp_encoder_cap_info bp_cap_info = {0};
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
struct dcn10_link_encoder *enc10 = &enc21->enc10;
enc10->base.funcs = &dcn21_link_enc_funcs;
enc10->base.ctx = init_data->ctx;
enc10->base.id = init_data->encoder;
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
enc10->base.transmitter = init_data->transmitter;
/* set the flag to indicate whether driver poll the I2C data pin
* while doing the DP sink detect
*/
/* if (dal_adapter_service_is_feature_supported(as,
FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
enc10->base.features.flags.bits.
DP_SINK_DETECT_POLL_DATA_PIN = true;*/
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
SIGNAL_TYPE_LVDS |
SIGNAL_TYPE_DISPLAY_PORT |
SIGNAL_TYPE_DISPLAY_PORT_MST |
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
* DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
* Prefer DIG assignment is decided by board design.
* For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
* and VBIOS will filter out 7 UNIPHY for DCE 8.0.
* By this, adding DIGG should not hurt DCE 8.0.
* This will let DCE 8.1 share DCE 8.0 as much as possible
*/
enc10->link_regs = link_regs;
enc10->aux_regs = aux_regs;
enc10->hpd_regs = hpd_regs;
enc10->link_shift = link_shift;
enc10->link_mask = link_mask;
switch (enc10->base.transmitter) {
case TRANSMITTER_UNIPHY_A:
enc10->base.preferred_engine = ENGINE_ID_DIGA;
break;
case TRANSMITTER_UNIPHY_B:
enc10->base.preferred_engine = ENGINE_ID_DIGB;
break;
case TRANSMITTER_UNIPHY_C:
enc10->base.preferred_engine = ENGINE_ID_DIGC;
break;
case TRANSMITTER_UNIPHY_D:
enc10->base.preferred_engine = ENGINE_ID_DIGD;
break;
case TRANSMITTER_UNIPHY_E:
enc10->base.preferred_engine = ENGINE_ID_DIGE;
break;
case TRANSMITTER_UNIPHY_F:
enc10->base.preferred_engine = ENGINE_ID_DIGF;
break;
case TRANSMITTER_UNIPHY_G:
enc10->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
ASSERT_CRITICAL(false);
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
/* default to one to mirror Windows behavior */
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
enc10->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
if (result == BP_RESULT_OK) {
enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
}
if (enc10->base.ctx->dc->debug.hdmi20_disable) {
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c |
/*
* Copyright 2016-2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn21_hwseq.h"
#include "dcn21_init.h"
static const struct hw_sequencer_funcs dcn21_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn10_init_hw,
.power_down_on_boot = dcn10_power_down_on_boot,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
.program_output_csc = dcn20_program_output_csc,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
.update_info_frame = dce110_update_info_frame,
.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
.enable_stream = dcn20_enable_stream,
.disable_stream = dce110_disable_stream,
.unblank_stream = dcn20_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
.program_triplebuffer = dcn20_program_triple_buffer,
.enable_writeback = dcn20_enable_writeback,
.disable_writeback = dcn20_disable_writeback,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn20_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
.init_sys_ctx = dcn21_init_sys_ctx,
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
#ifndef TRIM_FSFT
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
.disable_link_output = dce110_disable_link_output,
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
.init_pipes = dcn10_init_pipes,
.update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn20_set_input_transfer_func,
.set_output_transfer_func = dcn20_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn20_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
.update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn20_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
.PLAT_58856_wa = dcn21_PLAT_58856_wa,
};
void dcn21_hw_sequencer_construct(struct dc *dc)
{
dc->hwss = dcn21_funcs;
dc->hwseq->funcs = dcn21_private_funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "dcn20/dcn20_dccg.h"
#include "dcn21_dccg.h"
#define TO_DCN_DCCG(dccg)\
container_of(dccg, struct dcn_dccg, base)
#define REG(reg) \
(dccg_dcn->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
#define CTX \
dccg_dcn->base.ctx
#define DC_LOGGER \
dccg->ctx->logger
static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (dccg->ref_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
int modulo = ref_dppclk / 10000;
int phase;
if (req_dppclk) {
/*
* program DPP DTO phase and modulo as below
* phase = ceiling(dpp_pipe_clk_mhz / 10)
* module = trunc(dpp_global_clk_mhz / 10)
*
* storing frequencies in registers allow dmcub fw
* to run time lower clocks when possible for power saving
*
* ceiling phase and truncate modulo guarentees the divided
* down per pipe dpp clock has high enough frequency
*/
phase = (req_dppclk + 9999) / 10000;
if (phase > modulo) {
/* phase > modulo result in screen corruption
* ie phase = 30, mod = 29 for 4k@60 HDMI
* in these case we don't want pipe clock to be divided
*/
phase = modulo;
}
} else {
/*
* set phase to 10 if dpp isn't used to
* prevent hard hang if access dpp register
* on unused pipe
*
* DTO should be on to divide down un-used
* pipe clock for power saving
*/
phase = 10;
}
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, phase,
DPPCLK0_DTO_MODULO, modulo);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
}
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
static const struct dccg_funcs dccg21_funcs = {
.update_dpp_dto = dccg21_update_dpp_dto,
.get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
.set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
.otg_add_pixel = dccg2_otg_add_pixel,
.otg_drop_pixel = dccg2_otg_drop_pixel,
.dccg_init = dccg2_init
};
struct dccg *dccg21_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
base = &dccg_dcn->base;
base->ctx = ctx;
base->funcs = &dccg21_funcs;
dccg_dcn->regs = regs;
dccg_dcn->dccg_shift = dccg_shift;
dccg_dcn->dccg_mask = dccg_mask;
return &dccg_dcn->base;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
* Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "dm_services.h"
#include "dc.h"
#include "dcn21_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "clk_mgr.h"
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
#include "dcn20/dcn20_hubbub.h"
#include "dcn20/dcn20_mpc.h"
#include "dcn20/dcn20_hubp.h"
#include "dcn21_hubp.h"
#include "irq/dcn21/irq_service_dcn21.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn20/dcn20_optc.h"
#include "dcn21/dcn21_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn20/dcn20_opp.h"
#include "dcn20/dcn20_dsc.h"
#include "dcn21/dcn21_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
#include "dml/display_mode_vba.h"
#include "dcn20/dcn20_dccg.h"
#include "dcn21/dcn21_dccg.h"
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "dce/dce_panel_cntl.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
#include "dpcs/dpcs_2_1_0_offset.h"
#include "dpcs/dpcs_2_1_0_sh_mask.h"
#include "renoir_ip_offset.h"
#include "dcn/dcn_2_1_0_offset.h"
#include "dcn/dcn_2_1_0_sh_mask.h"
#include "nbio/nbio_7_0_offset.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "reg_helper.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dcn21_resource.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "dce/dmub_psr.h"
#include "dce/dmub_abm.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
mm ## reg_name ## _ ## block ## id
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIF0_BASE__INST0_SEG ## seg
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
/* MMHUB */
#define MMHUB_BASE_INNER(seg) \
MMHUB_BASE__INST0_SEG ## seg
#define MMHUB_BASE(seg) \
MMHUB_BASE_INNER(seg)
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
mmMM ## reg_name
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN2_1(index, pllid),\
}
static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(0, A),
clk_src_regs(1, B),
clk_src_regs(2, C),
clk_src_regs(3, D),
clk_src_regs(4, E),
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCN20_REG_LIST()
};
static const struct dce_dmcu_shift dmcu_shift = {
DMCU_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dce_dmcu_mask dmcu_mask = {
DMCU_MASK_SH_LIST_DCN10(_MASK)
};
static const struct dce_abm_registers abm_regs = {
ABM_DCN20_REG_LIST()
};
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN20(_MASK)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
}
static const struct dce_audio_registers audio_regs[] = {
audio_regs(0),
audio_regs(1),
audio_regs(2),
audio_regs(3),
audio_regs(4),
audio_regs(5),
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
static const struct dccg_registers dccg_regs = {
DCCG_COMMON_REG_LIST_DCN_BASE()
};
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN2_1(__SHIFT)
};
static const struct dccg_mask dccg_mask = {
DCCG_MASK_SH_LIST_DCN2_1(_MASK)
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN20(id),\
}
static const struct dcn20_opp_registers opp_regs[] = {
opp_regs(0),
opp_regs(1),
opp_regs(2),
opp_regs(3),
opp_regs(4),
opp_regs(5),
};
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
#define tg_regs(id)\
[id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
static const struct dcn_optc_registers tg_regs[] = {
tg_regs(0),
tg_regs(1),
tg_regs(2),
tg_regs(3)
};
static const struct dcn_optc_shift tg_shift = {
TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
static const struct dcn20_mpc_registers mpc_regs = {
MPC_REG_LIST_DCN2_0(0),
MPC_REG_LIST_DCN2_0(1),
MPC_REG_LIST_DCN2_0(2),
MPC_REG_LIST_DCN2_0(3),
MPC_REG_LIST_DCN2_0(4),
MPC_REG_LIST_DCN2_0(5),
MPC_OUT_MUX_REG_LIST_DCN2_0(0),
MPC_OUT_MUX_REG_LIST_DCN2_0(1),
MPC_OUT_MUX_REG_LIST_DCN2_0(2),
MPC_OUT_MUX_REG_LIST_DCN2_0(3),
MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define hubp_regs(id)\
[id] = {\
HUBP_REG_LIST_DCN21(id)\
}
static const struct dcn_hubp2_registers hubp_regs[] = {
hubp_regs(0),
hubp_regs(1),
hubp_regs(2),
hubp_regs(3)
};
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN21(__SHIFT)
};
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN21(_MASK)
};
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN21()
};
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN21(__SHIFT)
};
static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN21(_MASK)
};
#define vmid_regs(id)\
[id] = {\
DCN20_VMID_REG_LIST(id)\
}
static const struct dcn_vmid_registers vmid_regs[] = {
vmid_regs(0),
vmid_regs(1),
vmid_regs(2),
vmid_regs(3),
vmid_regs(4),
vmid_regs(5),
vmid_regs(6),
vmid_regs(7),
vmid_regs(8),
vmid_regs(9),
vmid_regs(10),
vmid_regs(11),
vmid_regs(12),
vmid_regs(13),
vmid_regs(14),
vmid_regs(15)
};
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
};
static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
dsc_regsDCN20(0),
dsc_regsDCN20(1),
dsc_regsDCN20(2),
dsc_regsDCN20(3),
dsc_regsDCN20(4),
dsc_regsDCN20(5)
};
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN20(id),\
}
static const struct dcn10_ipp_registers ipp_regs[] = {
ipp_regs(0),
ipp_regs(1),
ipp_regs(2),
ipp_regs(3),
};
static const struct dcn10_ipp_shift ipp_shift = {
IPP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn10_ipp_mask ipp_mask = {
IPP_MASK_SH_LIST_DCN20(_MASK),
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN20(id),\
}
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST0(id), \
.AUXN_IMPCAL = 0, \
.AUXP_IMPCAL = 0, \
.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
};
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
tf_regs(0),
tf_regs(1),
tf_regs(2),
tf_regs(3),
};
static const struct dcn2_dpp_shift tf_shift = {
TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
TF_REG_LIST_SH_MASK_DCN20(_MASK),
TF_DEBUG_REG_LIST_MASK_DCN20
};
#define stream_enc_regs(id)\
[id] = {\
SE_DCN2_REG_LIST(id)\
}
static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
stream_enc_regs(3),
stream_enc_regs(4),
};
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
};
static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
};
static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn20_ipp_construct(ipp, ctx, inst,
&ipp_regs[inst], &ipp_shift, &ipp_mask);
return &ipp->base;
}
static struct dpp *dcn21_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_dpp *dpp =
kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
if (dpp2_construct(dpp, ctx, inst,
&tf_regs[inst], &tf_shift, &tf_mask))
return &dpp->base;
BREAK_TO_DEBUGGER();
kfree(dpp);
return NULL;
}
static struct dce_aux *dcn21_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
&aux_mask,
&aux_shift,
ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
i2c_inst_regs(1),
i2c_inst_regs(2),
i2c_inst_regs(3),
i2c_inst_regs(4),
i2c_inst_regs(5),
};
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
};
static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
static struct dce_i2c_hw *dcn21_i2c_hw_create(struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
return dce_i2c_hw;
}
static const struct resource_caps res_cap_rn = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
.num_audio = 4, // 4 audio endpoints. 4 audio streams
.num_stream_encoder = 5,
.num_pll = 5, // maybe 3 because the last two used for USB-c
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 16,
.num_dsc = 3,
};
#ifdef DIAGS_BUILD
static const struct resource_caps res_cap_rn_FPGA_4pipe = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
.num_audio = 7,
.num_stream_encoder = 4,
.num_pll = 4,
.num_dwb = 1,
.num_ddc = 4,
.num_dsc = 0,
};
static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
.num_timing_generator = 2,
.num_opp = 2,
.num_video_plane = 2,
.num_audio = 7,
.num_stream_encoder = 2,
.num_pll = 4,
.num_dwb = 1,
.num_ddc = 4,
.num_dsc = 2,
};
#endif
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
.p010 = true
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 16000
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 250
},
64,
64
};
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
.max_downscale_src_width = 4096,
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = true,
.disable_48mhz_pwrdwn = false,
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
.enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
.disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},
};
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_PLL3,
DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
{
unsigned int i;
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
if (pool->base.hubbub != NULL) {
kfree(pool->base.hubbub);
pool->base.hubbub = NULL;
}
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.dpps[i] != NULL)
dcn20_dpp_destroy(&pool->base.dpps[i]);
if (pool->base.ipps[i] != NULL)
pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
if (pool->base.hubps[i] != NULL) {
kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
pool->base.hubps[i] = NULL;
}
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
}
if (pool->base.sw_i2cs[i] != NULL) {
kfree(pool->base.sw_i2cs[i]);
pool->base.sw_i2cs[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.timing_generators[i] != NULL) {
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
if (pool->base.dwbc[i] != NULL) {
kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
pool->base.dwbc[i] = NULL;
}
if (pool->base.mcif_wb[i] != NULL) {
kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
pool->base.mcif_wb[i] = NULL;
}
}
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] != NULL) {
dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
pool->base.clock_sources[i] = NULL;
}
}
if (pool->base.dp_clock_source != NULL) {
dcn20_clock_source_destroy(&pool->base.dp_clock_source);
pool->base.dp_clock_source = NULL;
}
if (pool->base.abm != NULL) {
if (pool->base.abm->ctx->dc->config.disable_dmcu)
dmub_abm_destroy(&pool->base.abm);
else
dce_abm_destroy(&pool->base.abm);
}
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.pp_smu != NULL)
dcn21_pp_smu_destroy(&pool->base.pp_smu);
}
bool dcn21_fast_validate_bw(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
bool fast_validate)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
if (!pipes)
return false;
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
if (!pipe_cnt) {
out = true;
goto validate_out;
}
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
*/
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh_and_mclk_switch;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
if (vlevel > context->bw_ctx.dml.soc.num_states) {
/*
* If mode is unsupported or there's still no p-state support then
* fall back to favoring voltage.
*
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
}
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
if (!pipe->stream)
continue;
/* We only support full screen mpo with ODM */
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& pipe->plane_state && mpo_pipe
&& memcmp(&mpo_pipe->plane_state->clip_rect,
&pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
}
pipe_idx++;
}
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
pipe_split_from[i] = -1;
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
if (!pipe->stream || pipe_split_from[i] >= 0)
continue;
pipe_idx++;
if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
if (!dcn20_split_stream_for_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
dcn20_build_mapped_resource(dc, context, pipe->stream);
}
if (!pipe->plane_state)
continue;
/* Skip 2nd half of already split pipe */
if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
continue;
if (split[i] == 2) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
if (!hsplit_pipe) {
DC_FP_START();
dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true);
DC_FP_END();
continue;
}
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
dcn20_build_mapped_resource(dc, context, pipe->stream);
} else {
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe);
resource_build_scaling_params(pipe);
resource_build_scaling_params(hsplit_pipe);
}
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
/* merge should already have been done */
ASSERT(0);
}
}
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
}
*vlevel_out = vlevel;
out = true;
goto validate_out;
validate_fail:
out = false;
validate_out:
return out;
}
/*
* Some of the functions further below use the FPU, so we need to wrap this
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
DC_FP_START();
voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
DC_FP_END();
return voltage_supported;
}
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
dcn21_resource_destruct(dcn21_pool);
kfree(dcn21_pool);
*pool = NULL;
}
static struct clock_source *dcn21_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
static struct hubp *dcn21_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn21_hubp *hubp21 =
kzalloc(sizeof(struct dcn21_hubp), GFP_KERNEL);
if (!hubp21)
return NULL;
if (hubp21_construct(hubp21, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp21->base;
BREAK_TO_DEBUGGER();
kfree(hubp21);
return NULL;
}
static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
{
int i;
struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
GFP_KERNEL);
if (!hubbub)
return NULL;
hubbub21_construct(hubbub, ctx,
&hubbub_reg,
&hubbub_shift,
&hubbub_mask);
for (i = 0; i < res_cap_rn.num_vmid; i++) {
struct dcn20_vmid *vmid = &hubbub->vmid[i];
vmid->ctx = ctx;
vmid->regs = &vmid_regs[i];
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
hubbub->num_vmid = res_cap_rn.num_vmid;
return &hubbub->base;
}
static struct output_pixel_processor *dcn21_opp_create(struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn20_opp_construct(opp, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
static struct timing_generator *dcn21_timing_generator_create(struct dc_context *ctx,
uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
tgn10->tg_regs = &tg_regs[instance];
tgn10->tg_shift = &tg_shift;
tgn10->tg_mask = &tg_mask;
dcn20_timing_generator_init(tgn10);
return &tgn10->base;
}
static struct mpc *dcn21_mpc_create(struct dc_context *ctx)
{
struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
GFP_KERNEL);
if (!mpc20)
return NULL;
dcn20_mpc_construct(mpc20, ctx,
&mpc_regs,
&mpc_shift,
&mpc_mask,
6);
return &mpc20->base;
}
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_dsc *dsc =
kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
return NULL;
}
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
return &dsc->base;
}
static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
dm_pp_get_funcs(ctx, pp_smu);
if (pp_smu->ctx.ver != PP_SMU_VER_RN)
pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
return pp_smu;
}
static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
*pp_smu = NULL;
}
}
static struct audio *dcn21_create_audio(
struct dc_context *ctx, unsigned int inst)
{
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
static struct stream_encoder *dcn21_stream_encoder_create(enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
if (!enc1)
return NULL;
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
return &enc1->base;
}
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN21_REG_LIST()
};
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN21_MASK_SH_LIST(__SHIFT)
};
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN21_MASK_SH_LIST(_MASK)
};
static struct dce_hwseq *dcn21_hwseq_create(
struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN21 = true;
hws->wa.disallow_self_refresh_during_multi_plane_transition = true;
}
return hws;
}
static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn21_create_audio,
.create_stream_encoder = dcn21_stream_encoder_create,
.create_hwseq = dcn21_hwseq_create,
};
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
.fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true
};
#define link_regs(id, phyid)\
[id] = {\
LE_DCN2_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
DPCS_DCN21_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0, A),
link_regs(1, B),
link_regs(2, C),
link_regs(3, D),
link_regs(4, E),
};
static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
{ DCN_PANEL_CNTL_REG_LIST() }
};
static const struct dce_panel_cntl_shift panel_cntl_shift = {
DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
};
static const struct dce_panel_cntl_mask panel_cntl_mask = {
DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
};
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
}
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
aux_regs(2),
aux_regs(3),
aux_regs(4)
};
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
}
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4)
};
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
DPCS_DCN21_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
DPCS_DCN21_MASK_SH_LIST(_MASK)
};
static int map_transmitter_id_to_phy_instance(
enum transmitter transmitter)
{
switch (transmitter) {
case TRANSMITTER_UNIPHY_A:
return 0;
break;
case TRANSMITTER_UNIPHY_B:
return 1;
break;
case TRANSMITTER_UNIPHY_C:
return 2;
break;
case TRANSMITTER_UNIPHY_D:
return 3;
break;
case TRANSMITTER_UNIPHY_E:
return 4;
break;
default:
ASSERT(0);
return 0;
}
}
static struct link_encoder *dcn21_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
struct dcn21_link_encoder *enc21 =
kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
int link_regs_id;
if (!enc21)
return NULL;
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
dcn21_link_encoder_construct(enc21,
enc_init_data,
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc21->enc10.base;
}
static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dce_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
if (!panel_cntl)
return NULL;
dce_panel_cntl_construct(panel_cntl,
init_data,
&panel_cntl_regs[init_data->inst],
&panel_cntl_shift,
&panel_cntl_mask);
return &panel_cntl->base;
}
static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config)
{
*panel_config = panel_config_defaults;
}
#define CTX ctx
#define REG(reg_name) \
(DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
static uint32_t read_pipe_fuses(struct dc_context *ctx)
{
uint32_t value = REG_READ(CC_DC_PIPE_DIS);
/* RV1 support max 4 pipes */
value = value & 0xf;
return value;
}
static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
plane_state->dcc.enable = 1;
/* align to our worst case block width */
plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
}
return dcn20_patch_unknown_plane_state(plane_state);
}
static const struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
.panel_cntl_create = dcn21_panel_cntl_create,
.validate_bandwidth = dcn21_validate_bandwidth,
.populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
.get_panel_config_defaults = dcn21_get_panel_config_defaults,
};
static bool dcn21_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn21_resource_pool *pool)
{
int i, j;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
uint32_t num_pipes;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_rn;
#ifdef DIAGS_BUILD
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
//pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc;
pool->base.res_cap = &res_cap_rn_FPGA_4pipe;
#endif
pool->base.funcs = &dcn21_res_pool_funcs;
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
/* max pipe num for ASIC before check pipe fuses */
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 0;
dc->caps.color.dpp.icsc = 1;
dc->caps.color.dpp.dgam_ram = 1;
dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.dgam_rom_caps.pq = 0;
dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
dc->caps.color.dpp.post_csc = 0;
dc->caps.color.dpp.gamma_corr = 0;
dc->caps.color.dpp.dgam_rom_for_yuv = 1;
dc->caps.color.dpp.hw_3d_lut = 1;
dc->caps.color.dpp.ogam_ram = 1;
// no OGAM ROM on DCN2
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.ogam_rom_caps.pq = 0;
dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
dc->caps.color.dpp.ocsc = 0;
dc->caps.color.mpc.gamut_remap = 0;
dc->caps.color.mpc.num_3dluts = 0;
dc->caps.color.mpc.shared_3d_lut = 0;
dc->caps.color.mpc.ogam_ram = 1;
dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
/*************************************************
* Create resources *
*************************************************/
pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
/* todo: not reuse phy_pll registers */
pool->base.dp_clock_source =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_ID_DP_DTO,
&clk_src_regs[0], true);
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
pool->base.dccg = dccg21_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
if (pool->base.dccg == NULL) {
dm_error("DC: failed to create dccg!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
if (!dc->config.disable_dmcu) {
pool->base.dmcu = dcn21_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
&dmcu_mask);
if (pool->base.dmcu == NULL) {
dm_error("DC: failed to create dmcu!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
dc->debug.dmub_command_table = false;
}
if (dc->config.disable_dmcu) {
pool->base.psr = dmub_psr_create(ctx);
if (pool->base.psr == NULL) {
dm_error("DC: failed to create psr obj!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
if (dc->config.disable_dmcu)
pool->base.abm = dmub_abm_create(ctx,
&abm_regs,
&abm_shift,
&abm_mask);
else
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
&abm_mask);
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
num_pipes = dcn2_1_ip.max_num_dpp;
for (i = 0; i < dcn2_1_ip.max_num_dpp; i++)
if (pipe_fuses & 1 << i)
num_pipes--;
dcn2_1_ip.max_num_dpp = num_pipes;
dcn2_1_ip.max_num_otg = num_pipes;
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn21_create(&init_data);
if (!pool->base.irqs)
goto create_fail;
j = 0;
/* mem input -> ipp -> dpp -> opp -> TG */
for (i = 0; i < pool->base.pipe_count; i++) {
/* if pipe is disabled, skip instance of HW pipe,
* i.e, skip ASIC register instance
*/
if ((pipe_fuses & (1 << i)) != 0)
continue;
pool->base.hubps[j] = dcn21_hubp_create(ctx, i);
if (pool->base.hubps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create memory input!\n");
goto create_fail;
}
pool->base.ipps[j] = dcn21_ipp_create(ctx, i);
if (pool->base.ipps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create input pixel processor!\n");
goto create_fail;
}
pool->base.dpps[j] = dcn21_dpp_create(ctx, i);
if (pool->base.dpps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
pool->base.opps[j] = dcn21_opp_create(ctx, i);
if (pool->base.opps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
pool->base.timing_generators[j] = dcn21_timing_generator_create(
ctx, i);
if (pool->base.timing_generators[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto create_fail;
}
j++;
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dcn21_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto create_fail;
}
pool->base.hw_i2cs[i] = dcn21_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create hw i2c!!\n");
goto create_fail;
}
pool->base.sw_i2cs[i] = NULL;
}
pool->base.timing_generator_count = j;
pool->base.pipe_count = j;
pool->base.mpcc_count = j;
pool->base.mpc = dcn21_mpc_create(ctx);
if (pool->base.mpc == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mpc!\n");
goto create_fail;
}
pool->base.hubbub = dcn21_hubbub_create(ctx);
if (pool->base.hubbub == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create hubbub!\n");
goto create_fail;
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn21_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create display stream compressor %d!\n", i);
goto create_fail;
}
}
if (!dcn20_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create dwbc!\n");
goto create_fail;
}
if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mcif_wb!\n");
goto create_fail;
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto create_fail;
dcn21_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->cap_funcs = cap_funcs;
return true;
create_fail:
dcn21_resource_destruct(pool);
return false;
}
struct resource_pool *dcn21_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn21_resource_pool *pool =
kzalloc(sizeof(struct dcn21_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
kfree(pool);
return NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc.h"
#include "dcn31/dcn31_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn315_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
#include "dcn31/dcn31_resource.h"
#include "dcn10/dcn10_ipp.h"
#include "dcn30/dcn30_hubbub.h"
#include "dcn31/dcn31_hubbub.h"
#include "dcn30/dcn30_mpc.h"
#include "dcn31/dcn31_hubp.h"
#include "irq/dcn315/irq_service_dcn315.h"
#include "dcn30/dcn30_dpp.h"
#include "dcn31/dcn31_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn30/dcn30_opp.h"
#include "dcn20/dcn20_dsc.h"
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
#include "dcn31/dcn31_afmt.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "clk_mgr.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
#include "dcn/dcn_3_1_5_offset.h"
#include "dcn/dcn_3_1_5_sh_mask.h"
#include "dpcs/dpcs_4_2_2_offset.h"
#include "dpcs/dpcs_4_2_2_sh_mask.h"
#define NBIO_BASE__INST0_SEG0 0x00000000
#define NBIO_BASE__INST0_SEG1 0x00000014
#define NBIO_BASE__INST0_SEG2 0x00000D20
#define NBIO_BASE__INST0_SEG3 0x00010400
#define NBIO_BASE__INST0_SEG4 0x0241B000
#define NBIO_BASE__INST0_SEG5 0x04040000
#define DPCS_BASE__INST0_SEG0 0x00000012
#define DPCS_BASE__INST0_SEG1 0x000000C0
#define DPCS_BASE__INST0_SEG2 0x000034C0
#define DPCS_BASE__INST0_SEG3 0x00009000
#define DPCS_BASE__INST0_SEG4 0x02403C00
#define DPCS_BASE__INST0_SEG5 0
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
#define regBIF_BX_PF2_RSMU_DATA 0x0001
#define regBIF_BX_PF2_RSMU_DATA_BASE_IDX 1
#define regBIF_BX2_BIOS_SCRATCH_6 0x003e
#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1
#define BIF_BX2_BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0
#define BIF_BX2_BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL
#define regBIF_BX2_BIOS_SCRATCH_2 0x003a
#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 1
#define BIF_BX2_BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0
#define BIF_BX2_BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL
#define regBIF_BX2_BIOS_SCRATCH_3 0x003b
#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 1
#define BIF_BX2_BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0
#define BIF_BX2_BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL
#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6
#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "link_enc_cfg.h"
#define DCN3_15_MAX_DET_SIZE 384
#define DCN3_15_CRB_SEGMENT_SIZE_KB 64
#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB)
/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */
#define MIN_RESERVED_DET_SEGS 2
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
DCN31_CLK_SRC_PLL2,
DCN31_CLK_SRC_PLL3,
DCN31_CLK_SRC_PLL4,
DCN30_CLK_SRC_TOTAL
};
/* begin *********************
* macros to expend register list macro defined in HW object header file
*/
/* DCN */
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## temp_name
#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
.field_name = reg_name ## __ ## field_name ## post_fix
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
reg ## reg_name ## _ ## block ## id
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
regBIF_BX2_ ## reg_name
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
}
static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(0, A),
clk_src_regs(1, B),
clk_src_regs(2, C),
clk_src_regs(3, D),
clk_src_regs(4, E)
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
};
static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
};
#define abm_regs(id)\
[id] = {\
ABM_DCN302_REG_LIST(id)\
}
static const struct dce_abm_registers abm_regs[] = {
abm_regs(0),
abm_regs(1),
abm_regs(2),
abm_regs(3),
};
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN30(_MASK)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
}
static const struct dce_audio_registers audio_regs[] = {
audio_regs(0),
audio_regs(1),
audio_regs(2),
audio_regs(3),
audio_regs(4),
audio_regs(5),
audio_regs(6)
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
#define vpg_regs(id)\
[id] = {\
VPG_DCN31_REG_LIST(id)\
}
static const struct dcn31_vpg_registers vpg_regs[] = {
vpg_regs(0),
vpg_regs(1),
vpg_regs(2),
vpg_regs(3),
vpg_regs(4),
vpg_regs(5),
vpg_regs(6),
vpg_regs(7),
vpg_regs(8),
vpg_regs(9),
};
static const struct dcn31_vpg_shift vpg_shift = {
DCN31_VPG_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_vpg_mask vpg_mask = {
DCN31_VPG_MASK_SH_LIST(_MASK)
};
#define afmt_regs(id)\
[id] = {\
AFMT_DCN31_REG_LIST(id)\
}
static const struct dcn31_afmt_registers afmt_regs[] = {
afmt_regs(0),
afmt_regs(1),
afmt_regs(2),
afmt_regs(3),
afmt_regs(4),
afmt_regs(5)
};
static const struct dcn31_afmt_shift afmt_shift = {
DCN31_AFMT_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_afmt_mask afmt_mask = {
DCN31_AFMT_MASK_SH_LIST(_MASK)
};
#define apg_regs(id)\
[id] = {\
APG_DCN31_REG_LIST(id)\
}
static const struct dcn31_apg_registers apg_regs[] = {
apg_regs(0),
apg_regs(1),
apg_regs(2),
apg_regs(3)
};
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
#define stream_enc_regs(id)\
[id] = {\
SE_DCN3_REG_LIST(id)\
}
static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
stream_enc_regs(3),
stream_enc_regs(4)
};
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
}
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
aux_regs(2),
aux_regs(3),
aux_regs(4)
};
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
}
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4)
};
#define link_regs(id, phyid)\
[id] = {\
LE_DCN31_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
DPCS_DCN31_REG_LIST(id), \
}
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
};
static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0, A),
link_regs(1, B),
link_regs(2, C),
link_regs(3, D),
link_regs(4, E)
};
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
DPCS_DCN31_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \
DPCS_DCN31_MASK_SH_LIST(_MASK)
};
#define hpo_dp_stream_encoder_reg_list(id)\
[id] = {\
DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
}
static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
hpo_dp_stream_encoder_reg_list(3),
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
};
#define hpo_dp_link_encoder_reg_list(id)\
[id] = {\
DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
DCN3_1_RDPCSTX_REG_LIST(0),\
DCN3_1_RDPCSTX_REG_LIST(1),\
DCN3_1_RDPCSTX_REG_LIST(2),\
DCN3_1_RDPCSTX_REG_LIST(3),\
DCN3_1_RDPCSTX_REG_LIST(4)\
}
static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
hpo_dp_link_encoder_reg_list(0),
hpo_dp_link_encoder_reg_list(1),
};
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
#define dpp_regs(id)\
[id] = {\
DPP_REG_LIST_DCN30(id),\
}
static const struct dcn3_dpp_registers dpp_regs[] = {
dpp_regs(0),
dpp_regs(1),
dpp_regs(2),
dpp_regs(3)
};
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30(__SHIFT)
};
static const struct dcn3_dpp_mask tf_mask = {
DPP_REG_LIST_SH_MASK_DCN30(_MASK)
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN30(id),\
}
static const struct dcn20_opp_registers opp_regs[] = {
opp_regs(0),
opp_regs(1),
opp_regs(2),
opp_regs(3)
};
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST0(id), \
.AUXN_IMPCAL = 0, \
.AUXP_IMPCAL = 0, \
.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4)
};
#define dwbc_regs_dcn3(id)\
[id] = {\
DWBC_COMMON_REG_LIST_DCN30(id),\
}
static const struct dcn30_dwbc_registers dwbc30_regs[] = {
dwbc_regs_dcn3(0),
};
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define mcif_wb_regs_dcn3(id)\
[id] = {\
MCIF_WB_COMMON_REG_LIST_DCN30(id),\
}
static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
mcif_wb_regs_dcn3(0)
};
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
dsc_regsDCN20(0),
dsc_regsDCN20(1),
dsc_regsDCN20(2)
};
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
MPC_REG_LIST_DCN3_0(0),
MPC_REG_LIST_DCN3_0(1),
MPC_REG_LIST_DCN3_0(2),
MPC_REG_LIST_DCN3_0(3),
MPC_OUT_MUX_REG_LIST_DCN3_0(0),
MPC_OUT_MUX_REG_LIST_DCN3_0(1),
MPC_OUT_MUX_REG_LIST_DCN3_0(2),
MPC_OUT_MUX_REG_LIST_DCN3_0(3),
MPC_DWB_MUX_REG_LIST_DCN3_0(0),
};
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define optc_regs(id)\
[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)}
static const struct dcn_optc_registers optc_regs[] = {
optc_regs(0),
optc_regs(1),
optc_regs(2),
optc_regs(3)
};
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT)
};
static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK)
};
#define hubp_regs(id)\
[id] = {\
HUBP_REG_LIST_DCN30(id)\
}
static const struct dcn_hubp2_registers hubp_regs[] = {
hubp_regs(0),
hubp_regs(1),
hubp_regs(2),
hubp_regs(3)
};
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN31(__SHIFT)
};
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN31(_MASK)
};
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN31(0)
};
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN31(__SHIFT)
};
static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN31(_MASK)
};
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN31()
};
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN31(__SHIFT)
};
static const struct dccg_mask dccg_mask = {
DCCG_MASK_SH_LIST_DCN31(_MASK)
};
#define SRII2(reg_name_pre, reg_name_post, id)\
.reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \
## id ## _ ## reg_name_post ## _BASE_IDX) + \
reg ## reg_name_pre ## id ## _ ## reg_name_post
#define HWSEQ_DCN31_REG_LIST()\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DIO_MEM_PWR_CTRL), \
SR(ODM_MEM_PWR_CTRL3), \
SR(DMU_MEM_PWR_CNTL), \
SR(MMHUBBUB_MEM_PWR_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL2), \
SR(DCFCLK_CNTL),\
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
SRII(PIXEL_RATE_CNTL, OTG, 0), \
SRII(PIXEL_RATE_CNTL, OTG, 1),\
SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\
SR(MICROSECOND_TIME_BASE_DIV), \
SR(MILLISECOND_TIME_BASE_DIV), \
SR(DISPCLK_FREQ_CHANGE_CNTL), \
SR(RBBMIF_TIMEOUT_DIS), \
SR(RBBMIF_TIMEOUT_DIS_2), \
SR(DCHUBBUB_CRC_CTRL), \
SR(DPP_TOP0_DPP_CRC_CTRL), \
SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
SR(MPC_CRC_CTRL), \
SR(MPC_CRC_RESULT_GB), \
SR(MPC_CRC_RESULT_C), \
SR(MPC_CRC_RESULT_AR), \
SR(DOMAIN0_PG_CONFIG), \
SR(DOMAIN1_PG_CONFIG), \
SR(DOMAIN2_PG_CONFIG), \
SR(DOMAIN3_PG_CONFIG), \
SR(DOMAIN16_PG_CONFIG), \
SR(DOMAIN17_PG_CONFIG), \
SR(DOMAIN18_PG_CONFIG), \
SR(DOMAIN0_PG_STATUS), \
SR(DOMAIN1_PG_STATUS), \
SR(DOMAIN2_PG_STATUS), \
SR(DOMAIN3_PG_STATUS), \
SR(DOMAIN16_PG_STATUS), \
SR(DOMAIN17_PG_STATUS), \
SR(DOMAIN18_PG_STATUS), \
SR(D1VGA_CONTROL), \
SR(D2VGA_CONTROL), \
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING), \
SR(HPO_TOP_HW_CONTROL)
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN31_REG_LIST()
};
#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)
};
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN31_MASK_SH_LIST(_MASK)
};
#define vmid_regs(id)\
[id] = {\
DCN20_VMID_REG_LIST(id)\
}
static const struct dcn_vmid_registers vmid_regs[] = {
vmid_regs(0),
vmid_regs(1),
vmid_regs(2),
vmid_regs(3),
vmid_regs(4),
vmid_regs(5),
vmid_regs(6),
vmid_regs(7),
vmid_regs(8),
vmid_regs(9),
vmid_regs(10),
vmid_regs(11),
vmid_regs(12),
vmid_regs(13),
vmid_regs(14),
vmid_regs(15)
};
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
};
static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
static const struct resource_caps res_cap_dcn31 = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
.num_audio = 5,
.num_stream_encoder = 5,
.num_dig_link_enc = 5,
.num_hpo_dp_stream_encoder = 4,
.num_hpo_dp_link_encoder = 2,
.num_pll = 5,
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 16,
.num_mpc_3dlut = 2,
.num_dsc = 3,
};
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
.p010 = true,
.ayuv = false,
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 16000
},
// 6:1 downscaling ratio: 1000/6 = 166.666
.max_downscale_factor = {
.argb8888 = 167,
.nv12 = 167,
.fp16 = 167
},
64,
64
};
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true, /*hw not support it*/
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
.max_downscale_src_width = 4096,/*upto true 4k*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.pstate_enabled = true,
.use_max_lb = true,
.enable_mem_low_power = {
.bits = {
.vga = true,
.i2c = true,
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
.dscl = true,
.cm = true,
.mpc = true,
.optc = true,
.vpg = true,
.afmt = true,
}
},
.enable_legacy_fast_update = true,
.psr_power_use_phy_fsm = 0,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
.disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},
};
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
}
static struct dpp *dcn31_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn3_dpp *dpp =
kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
if (dpp3_construct(dpp, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp->base;
BREAK_TO_DEBUGGER();
kfree(dpp);
return NULL;
}
static struct output_pixel_processor *dcn31_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn20_opp_construct(opp, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
static struct dce_aux *dcn31_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
&aux_mask,
&aux_shift,
ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
i2c_inst_regs(1),
i2c_inst_regs(2),
i2c_inst_regs(3),
i2c_inst_regs(4),
i2c_inst_regs(5),
};
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
static struct dce_i2c_hw *dcn31_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
return dce_i2c_hw;
}
static struct mpc *dcn31_mpc_create(
struct dc_context *ctx,
int num_mpcc,
int num_rmu)
{
struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc),
GFP_KERNEL);
if (!mpc30)
return NULL;
dcn30_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
&mpc_mask,
num_mpcc,
num_rmu);
return &mpc30->base;
}
static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx)
{
int i;
struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
GFP_KERNEL);
if (!hubbub3)
return NULL;
hubbub31_construct(hubbub3, ctx,
&hubbub_reg,
&hubbub_shift,
&hubbub_mask,
dcn3_15_ip.det_buffer_size_kbytes,
dcn3_15_ip.pixel_chunk_size_kbytes,
dcn3_15_ip.config_return_buffer_size_in_kbytes);
for (i = 0; i < res_cap_dcn31.num_vmid; i++) {
struct dcn20_vmid *vmid = &hubbub3->vmid[i];
vmid->ctx = ctx;
vmid->regs = &vmid_regs[i];
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
return &hubbub3->base;
}
static struct timing_generator *dcn31_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
tgn10->tg_regs = &optc_regs[instance];
tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask;
dcn31_timing_generator_init(tgn10);
return &tgn10->base;
}
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
.fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true
};
static struct link_encoder *dcn31_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
if (!enc20)
return NULL;
dcn31_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
&link_enc_regs[enc_init_data->transmitter],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc20->enc10.base;
}
/* Create a minimal link encoder object not associated with a particular
* physical connector.
* resource_funcs.link_enc_create_minimal
*/
static struct link_encoder *dcn31_link_enc_create_minimal(
struct dc_context *ctx, enum engine_id eng_id)
{
struct dcn20_link_encoder *enc20;
if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc)
return NULL;
enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
if (!enc20)
return NULL;
dcn31_link_encoder_construct_minimal(
enc20,
ctx,
&link_enc_feature,
&link_enc_regs[eng_id - ENGINE_ID_DIGA],
eng_id);
return &enc20->enc10.base;
}
static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dcn31_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
if (!panel_cntl)
return NULL;
dcn31_panel_cntl_construct(panel_cntl, init_data);
return &panel_cntl->base;
}
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
static struct audio *dcn31_create_audio(
struct dc_context *ctx, unsigned int inst)
{
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
static struct vpg *dcn31_vpg_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
if (!vpg31)
return NULL;
vpg31_construct(vpg31, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
&vpg_mask);
return &vpg31->base;
}
static struct afmt *dcn31_afmt_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
if (!afmt31)
return NULL;
afmt31_construct(afmt31, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
&afmt_mask);
// Light sleep by default, no need to power down here
return &afmt31->base;
}
static struct apg *dcn31_apg_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
if (!apg31)
return NULL;
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
&apg_mask);
return &apg31->base;
}
static struct stream_encoder *dcn315_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1;
struct vpg *vpg;
struct afmt *afmt;
int vpg_inst;
int afmt_inst;
/*PHYB is wired off in HW, allow front end to remapping, otherwise needs more changes*/
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
if (eng_id <= ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
return NULL;
enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
vpg = dcn31_vpg_create(ctx, vpg_inst);
afmt = dcn31_afmt_create(ctx, afmt_inst);
if (!enc1 || !vpg || !afmt) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
return NULL;
}
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
return &enc1->base;
}
static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
struct vpg *vpg;
struct apg *apg;
uint32_t hpo_dp_inst;
uint32_t vpg_inst;
uint32_t apg_inst;
ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
/* Mapping of VPG register blocks to HPO DP block instance:
* VPG[6] -> HPO_DP[0]
* VPG[7] -> HPO_DP[1]
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
vpg_inst = hpo_dp_inst + 6;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
* APG[1] -> HPO_DP[1]
* APG[2] -> HPO_DP[2]
* APG[3] -> HPO_DP[3]
*/
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
if (!hpo_dp_enc31 || !vpg || !apg) {
kfree(hpo_dp_enc31);
kfree(vpg);
kfree(apg);
return NULL;
}
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
&hpo_dp_se_shift, &hpo_dp_se_mask);
return &hpo_dp_enc31->base;
}
static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
uint8_t inst,
struct dc_context *ctx)
{
struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
return &hpo_dp_enc31->base;
}
static struct dce_hwseq *dcn31_hwseq_create(
struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
}
return hws;
}
static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn31_create_audio,
.create_stream_encoder = dcn315_stream_encoder_create,
.create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
.create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
.create_hwseq = dcn31_hwseq_create,
};
static void dcn315_resource_destruct(struct dcn315_resource_pool *pool)
{
unsigned int i;
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
if (pool->base.stream_enc[i]->vpg != NULL) {
kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
pool->base.stream_enc[i]->vpg = NULL;
}
if (pool->base.stream_enc[i]->afmt != NULL) {
kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
pool->base.stream_enc[i]->afmt = NULL;
}
kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
if (pool->base.hpo_dp_stream_enc[i] != NULL) {
if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
}
if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
pool->base.hpo_dp_stream_enc[i]->apg = NULL;
}
kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
pool->base.hpo_dp_stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
if (pool->base.hpo_dp_link_enc[i] != NULL) {
kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
pool->base.hpo_dp_link_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
if (pool->base.hubbub != NULL) {
kfree(pool->base.hubbub);
pool->base.hubbub = NULL;
}
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.dpps[i] != NULL)
dcn31_dpp_destroy(&pool->base.dpps[i]);
if (pool->base.ipps[i] != NULL)
pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
if (pool->base.hubps[i] != NULL) {
kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
pool->base.hubps[i] = NULL;
}
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
}
if (pool->base.sw_i2cs[i] != NULL) {
kfree(pool->base.sw_i2cs[i]);
pool->base.sw_i2cs[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.timing_generators[i] != NULL) {
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
if (pool->base.dwbc[i] != NULL) {
kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
pool->base.dwbc[i] = NULL;
}
if (pool->base.mcif_wb[i] != NULL) {
kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
pool->base.mcif_wb[i] = NULL;
}
}
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] != NULL) {
dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
pool->base.clock_sources[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
if (pool->base.mpc_lut[i] != NULL) {
dc_3dlut_func_release(pool->base.mpc_lut[i]);
pool->base.mpc_lut[i] = NULL;
}
if (pool->base.mpc_shaper[i] != NULL) {
dc_transfer_func_release(pool->base.mpc_shaper[i]);
pool->base.mpc_shaper[i] = NULL;
}
}
if (pool->base.dp_clock_source != NULL) {
dcn20_clock_source_destroy(&pool->base.dp_clock_source);
pool->base.dp_clock_source = NULL;
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.multiple_abms[i] != NULL)
dce_abm_destroy(&pool->base.multiple_abms[i]);
}
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
static struct hubp *dcn31_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
if (!hubp2)
return NULL;
if (hubp31_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
BREAK_TO_DEBUGGER();
kfree(hubp2);
return NULL;
}
static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
GFP_KERNEL);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
return false;
}
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
&dwbc30_mask,
i);
pool->dwbc[i] = &dwbc30->base;
}
return true;
}
static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
GFP_KERNEL);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
return false;
}
dcn30_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
&mcif_wb30_mask,
i);
pool->mcif_wb[i] = &mcif_wb30->base;
}
return true;
}
static struct display_stream_compressor *dcn31_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
return NULL;
}
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
return &dsc->base;
}
static void dcn315_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn315_resource_pool *dcn31_pool = TO_DCN315_RES_POOL(*pool);
dcn315_resource_destruct(dcn31_pool);
kfree(dcn31_pool);
*pool = NULL;
}
static struct clock_source *dcn31_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
static bool is_dual_plane(enum surface_pixel_format format)
{
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
static int source_format_to_bpp (enum source_format_class SourcePixelFormat)
{
if (SourcePixelFormat == dm_444_64)
return 8;
else if (SourcePixelFormat == dm_444_16)
return 2;
else if (SourcePixelFormat == dm_444_8)
return 1;
else if (SourcePixelFormat == dm_rgbe_alpha)
return 5;
else if (SourcePixelFormat == dm_420_8)
return 3;
else if (SourcePixelFormat == dm_420_12)
return 6;
else
return 4;
}
static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
{
int i;
struct resource_context *res_ctx = &context->res_ctx;
/*Don't apply for single stream*/
if (context->stream_count < 2)
return false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
continue;
/*Don't apply if scaling*/
if (res_ctx->pipe_ctx[i].stream->src.width != res_ctx->pipe_ctx[i].stream->dst.width ||
res_ctx->pipe_ctx[i].stream->src.height != res_ctx->pipe_ctx[i].stream->dst.height ||
(res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->src_rect.width
!= res_ctx->pipe_ctx[i].plane_state->dst_rect.width ||
res_ctx->pipe_ctx[i].plane_state->src_rect.height
!= res_ctx->pipe_ctx[i].plane_state->dst_rect.height)))
return false;
/*Don't apply if MPO to avoid transition issues*/
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state)
return false;
}
return true;
}
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe = NULL;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
if (!res_ctx->pipe_ctx[i].stream)
continue;
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
/*
* Immediate flip can be set dynamically after enabling the plane.
* We need to require support for immediate flip or underflow can be
* intermittently experienced depending on peak b/w requirements.
*/
pipes[pipe_cnt].pipe.src.immediate_flip = true;
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
/* Ceil to crb segment size */
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
&context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB);
if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) {
bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS;
split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc);
split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
/* Minimum 2 segments to allow mpc/odm combine if its used later */
if (approx_det_segs_required_for_pstate < 2)
approx_det_segs_required_for_pstate = 2;
if (split_required)
approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2;
pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate;
remaining_det_segs -= approx_det_segs_required_for_pstate;
} else
remaining_det_segs = -1;
crb_pipes++;
}
DC_FP_END();
if (pipes[pipe_cnt].dout.dsc_enable) {
switch (timing->display_color_depth) {
case COLOR_DEPTH_888:
pipes[pipe_cnt].dout.dsc_input_bpc = 8;
break;
case COLOR_DEPTH_101010:
pipes[pipe_cnt].dout.dsc_input_bpc = 10;
break;
case COLOR_DEPTH_121212:
pipes[pipe_cnt].dout.dsc_input_bpc = 12;
break;
default:
ASSERT(0);
break;
}
}
pipe_cnt++;
}
/* Spread remaining unreserved crb evenly among all pipes*/
if (pixel_rate_crb) {
for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &res_ctx->pipe_ctx[i];
if (!pipe->stream)
continue;
/* Do not use asymetric crb if not enough for pstate support */
if (remaining_det_segs < 0) {
pipes[pipe_cnt].pipe.src.det_size_override = 0;
pipe_cnt++;
continue;
}
if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
if (remaining_det_segs > MIN_RESERVED_DET_SEGS)
pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
/* Clamp to 2 pipe split max det segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
}
if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
/* If we are splitting we must have an even number of segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
/* Convert segments into size for DML use */
pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
crb_idx++;
}
pipe_cnt++;
}
}
if (pipe_cnt)
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB;
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE)
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE;
dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
dc->config.enable_4to1MPC = true;
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB;
} else if (!is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 5120
&& pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) {
/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;
}
}
return pipe_cnt;
}
static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config)
{
*panel_config = panel_config_defaults;
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
static struct resource_funcs dcn315_res_pool_funcs = {
.destroy = dcn315_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
.link_enc_create_minimal = dcn31_link_enc_create_minimal,
.link_encs_assign = link_enc_cfg_link_encs_assign,
.link_enc_unassign = link_enc_cfg_link_enc_unassign,
.panel_cntl_create = dcn31_panel_cntl_create,
.validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn31_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
};
static bool dcn315_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn315_resource_pool *pool)
{
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn31;
pool->base.funcs = &dcn315_res_pool_funcs;
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.max_slave_planes = 2;
dc->caps.max_slave_yuv_planes = 2;
dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->config.forceHBR2CP2520)
dc->caps.force_dp_tps4_for_cp2520 = false;
dc->caps.dp_hpo = true;
dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 0;
dc->caps.color.dpp.icsc = 1;
dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
dc->caps.color.dpp.dgam_rom_caps.pq = 1;
dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
dc->caps.color.dpp.post_csc = 1;
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
dc->caps.color.dpp.hw_3d_lut = 1;
dc->caps.color.dpp.ogam_ram = 1;
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.ogam_rom_caps.pq = 0;
dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
dc->caps.color.dpp.ocsc = 0;
dc->caps.color.mpc.gamut_remap = 1;
dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2
dc->caps.color.mpc.ogam_ram = 1;
dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
enum bp_result bp_query_result;
uint8_t is_vbios_lttpr_enable = 0;
bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
}
/* interop bit is implicit */
{
dc->caps.vbios_lttpr_aware = true;
}
}
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
/*************************************************
* Create resources *
*************************************************/
/* Clock Sources for Pixel Clock*/
pool->base.clock_sources[DCN31_CLK_SRC_PLL0] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL1] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL;
/* todo: not reuse phy_pll registers */
pool->base.dp_clock_source =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_ID_DP_DTO,
&clk_src_regs[0], true);
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
/* TODO: DCCG */
pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
if (pool->base.dccg == NULL) {
dm_error("DC: failed to create dccg!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
/* TODO: IRQ */
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn315_create(&init_data);
if (!pool->base.irqs)
goto create_fail;
/* HUBBUB */
pool->base.hubbub = dcn31_hubbub_create(ctx);
if (pool->base.hubbub == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create hubbub!\n");
goto create_fail;
}
/* HUBPs, DPPs, OPPs and TGs */
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.hubps[i] = dcn31_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create hubps!\n");
goto create_fail;
}
pool->base.dpps[i] = dcn31_dpp_create(ctx, i);
if (pool->base.dpps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool->base.opps[i] = dcn31_opp_create(ctx, i);
if (pool->base.opps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.timing_generators[i] = dcn31_timing_generator_create(
ctx, i);
if (pool->base.timing_generators[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto create_fail;
}
}
pool->base.timing_generator_count = i;
/* PSR */
pool->base.psr = dmub_psr_create(ctx);
if (pool->base.psr == NULL) {
dm_error("DC: failed to create psr obj!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
&abm_regs[i],
&abm_shift,
&abm_mask);
if (pool->base.multiple_abms[i] == NULL) {
dm_error("DC: failed to create abm for pipe %d!\n", i);
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
/* MPC and DSC */
pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
if (pool->base.mpc == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mpc!\n");
goto create_fail;
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn31_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create display stream compressor %d!\n", i);
goto create_fail;
}
}
/* DWB and MMHUBBUB */
if (!dcn31_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create dwbc!\n");
goto create_fail;
}
if (!dcn31_mmhubbub_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mcif_wb!\n");
goto create_fail;
}
/* AUX and I2C */
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dcn31_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto create_fail;
}
pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create hw i2c!!\n");
goto create_fail;
}
pool->base.sw_i2cs[i] = NULL;
}
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto create_fail;
/* HW Sequencer and Plane caps */
dcn31_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp;
return true;
create_fail:
dcn315_resource_destruct(pool);
return false;
}
struct resource_pool *dcn315_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn315_resource_pool *pool =
kzalloc(sizeof(struct dcn315_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
if (dcn315_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
kfree(pool);
return NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "dm_services.h"
#include "include/gpio_interface.h"
#include "include/gpio_service_interface.h"
#include "hw_gpio.h"
#include "hw_translate.h"
#include "hw_factory.h"
#include "gpio_service.h"
/*
* Post-requisites: headers required by this unit
*/
/*
* This unit
*/
/*
* @brief
* Public API
*/
enum gpio_result dal_gpio_open(
struct gpio *gpio,
enum gpio_mode mode)
{
return dal_gpio_open_ex(gpio, mode);
}
enum gpio_result dal_gpio_open_ex(
struct gpio *gpio,
enum gpio_mode mode)
{
if (gpio->pin) {
BREAK_TO_DEBUGGER();
return GPIO_RESULT_ALREADY_OPENED;
}
// No action if allocation failed during gpio construct
if (!gpio->hw_container.ddc) {
BREAK_TO_DEBUGGER();
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
gpio->mode = mode;
return dal_gpio_service_open(gpio);
}
enum gpio_result dal_gpio_get_value(
const struct gpio *gpio,
uint32_t *value)
{
if (!gpio->pin) {
BREAK_TO_DEBUGGER();
return GPIO_RESULT_NULL_HANDLE;
}
return gpio->pin->funcs->get_value(gpio->pin, value);
}
enum gpio_result dal_gpio_set_value(
const struct gpio *gpio,
uint32_t value)
{
if (!gpio->pin) {
BREAK_TO_DEBUGGER();
return GPIO_RESULT_NULL_HANDLE;
}
return gpio->pin->funcs->set_value(gpio->pin, value);
}
enum gpio_mode dal_gpio_get_mode(
const struct gpio *gpio)
{
return gpio->mode;
}
enum gpio_result dal_gpio_lock_pin(
struct gpio *gpio)
{
return dal_gpio_service_lock(gpio->service, gpio->id, gpio->en);
}
enum gpio_result dal_gpio_unlock_pin(
struct gpio *gpio)
{
return dal_gpio_service_unlock(gpio->service, gpio->id, gpio->en);
}
enum gpio_result dal_gpio_change_mode(
struct gpio *gpio,
enum gpio_mode mode)
{
if (!gpio->pin) {
BREAK_TO_DEBUGGER();
return GPIO_RESULT_NULL_HANDLE;
}
return gpio->pin->funcs->change_mode(gpio->pin, mode);
}
enum gpio_id dal_gpio_get_id(
const struct gpio *gpio)
{
return gpio->id;
}
uint32_t dal_gpio_get_enum(
const struct gpio *gpio)
{
return gpio->en;
}
enum gpio_result dal_gpio_set_config(
struct gpio *gpio,
const struct gpio_config_data *config_data)
{
if (!gpio->pin) {
BREAK_TO_DEBUGGER();
return GPIO_RESULT_NULL_HANDLE;
}
return gpio->pin->funcs->set_config(gpio->pin, config_data);
}
enum gpio_result dal_gpio_get_pin_info(
const struct gpio *gpio,
struct gpio_pin_info *pin_info)
{
return gpio->service->translate.funcs->id_to_offset(
gpio->id, gpio->en, pin_info) ?
GPIO_RESULT_OK : GPIO_RESULT_INVALID_DATA;
}
enum sync_source dal_gpio_get_sync_source(
const struct gpio *gpio)
{
switch (gpio->id) {
case GPIO_ID_GENERIC:
switch (gpio->en) {
case GPIO_GENERIC_A:
return SYNC_SOURCE_IO_GENERIC_A;
case GPIO_GENERIC_B:
return SYNC_SOURCE_IO_GENERIC_B;
case GPIO_GENERIC_C:
return SYNC_SOURCE_IO_GENERIC_C;
case GPIO_GENERIC_D:
return SYNC_SOURCE_IO_GENERIC_D;
case GPIO_GENERIC_E:
return SYNC_SOURCE_IO_GENERIC_E;
case GPIO_GENERIC_F:
return SYNC_SOURCE_IO_GENERIC_F;
default:
return SYNC_SOURCE_NONE;
}
break;
case GPIO_ID_SYNC:
switch (gpio->en) {
case GPIO_SYNC_HSYNC_A:
return SYNC_SOURCE_IO_HSYNC_A;
case GPIO_SYNC_VSYNC_A:
return SYNC_SOURCE_IO_VSYNC_A;
case GPIO_SYNC_HSYNC_B:
return SYNC_SOURCE_IO_HSYNC_B;
case GPIO_SYNC_VSYNC_B:
return SYNC_SOURCE_IO_VSYNC_B;
default:
return SYNC_SOURCE_NONE;
}
break;
case GPIO_ID_HPD:
switch (gpio->en) {
case GPIO_HPD_1:
return SYNC_SOURCE_IO_HPD1;
case GPIO_HPD_2:
return SYNC_SOURCE_IO_HPD2;
default:
return SYNC_SOURCE_NONE;
}
break;
case GPIO_ID_GSL:
switch (gpio->en) {
case GPIO_GSL_GENLOCK_CLOCK:
return SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK;
case GPIO_GSL_GENLOCK_VSYNC:
return SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC;
case GPIO_GSL_SWAPLOCK_A:
return SYNC_SOURCE_GSL_IO_SWAPLOCK_A;
case GPIO_GSL_SWAPLOCK_B:
return SYNC_SOURCE_GSL_IO_SWAPLOCK_B;
default:
return SYNC_SOURCE_NONE;
}
break;
default:
return SYNC_SOURCE_NONE;
}
}
enum gpio_pin_output_state dal_gpio_get_output_state(
const struct gpio *gpio)
{
return gpio->output_state;
}
struct hw_ddc *dal_gpio_get_ddc(struct gpio *gpio)
{
return gpio->hw_container.ddc;
}
struct hw_hpd *dal_gpio_get_hpd(struct gpio *gpio)
{
return gpio->hw_container.hpd;
}
struct hw_generic *dal_gpio_get_generic(struct gpio *gpio)
{
return gpio->hw_container.generic;
}
void dal_gpio_close(
struct gpio *gpio)
{
if (!gpio)
return;
dal_gpio_service_close(gpio->service, &gpio->pin);
gpio->mode = GPIO_MODE_UNKNOWN;
}
/*
* @brief
* Creation and destruction
*/
struct gpio *dal_gpio_create(
struct gpio_service *service,
enum gpio_id id,
uint32_t en,
enum gpio_pin_output_state output_state)
{
struct gpio *gpio = kzalloc(sizeof(struct gpio), GFP_KERNEL);
if (!gpio) {
ASSERT_CRITICAL(false);
return NULL;
}
gpio->service = service;
gpio->pin = NULL;
gpio->id = id;
gpio->en = en;
gpio->mode = GPIO_MODE_UNKNOWN;
gpio->output_state = output_state;
//initialize hw_container union based on id
switch (gpio->id) {
case GPIO_ID_DDC_DATA:
gpio->service->factory.funcs->init_ddc_data(&gpio->hw_container.ddc, service->ctx, id, en);
break;
case GPIO_ID_DDC_CLOCK:
gpio->service->factory.funcs->init_ddc_data(&gpio->hw_container.ddc, service->ctx, id, en);
break;
case GPIO_ID_GENERIC:
gpio->service->factory.funcs->init_generic(&gpio->hw_container.generic, service->ctx, id, en);
break;
case GPIO_ID_HPD:
gpio->service->factory.funcs->init_hpd(&gpio->hw_container.hpd, service->ctx, id, en);
break;
// TODO: currently gpio for sync and gsl does not get created, might need it later
case GPIO_ID_SYNC:
break;
case GPIO_ID_GSL:
break;
default:
ASSERT_CRITICAL(false);
gpio->pin = NULL;
}
return gpio;
}
void dal_gpio_destroy(
struct gpio **gpio)
{
if (!gpio || !*gpio) {
ASSERT_CRITICAL(false);
return;
}
switch ((*gpio)->id) {
case GPIO_ID_DDC_DATA:
kfree((*gpio)->hw_container.ddc);
(*gpio)->hw_container.ddc = NULL;
break;
case GPIO_ID_DDC_CLOCK:
//TODO: might want to change it to init_ddc_clock
kfree((*gpio)->hw_container.ddc);
(*gpio)->hw_container.ddc = NULL;
break;
case GPIO_ID_GENERIC:
kfree((*gpio)->hw_container.generic);
(*gpio)->hw_container.generic = NULL;
break;
case GPIO_ID_HPD:
kfree((*gpio)->hw_container.hpd);
(*gpio)->hw_container.hpd = NULL;
break;
// TODO: currently gpio for sync and gsl does not get created, might need it later
case GPIO_ID_SYNC:
break;
case GPIO_ID_GSL:
break;
default:
break;
}
kfree(*gpio);
*gpio = NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "dm_services.h"
#include "include/gpio_interface.h"
#include "include/gpio_service_interface.h"
#include "hw_translate.h"
#include "hw_factory.h"
/*
* Header of this unit
*/
#include "gpio_service.h"
/*
* Post-requisites: headers required by this unit
*/
#include "hw_gpio.h"
/*
* @brief
* Public API.
*/
struct gpio_service *dal_gpio_service_create(
enum dce_version dce_version,
enum dce_environment dce_environment,
struct dc_context *ctx)
{
struct gpio_service *service;
uint32_t index_of_id;
service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
if (!service) {
BREAK_TO_DEBUGGER();
return NULL;
}
if (!dal_hw_translate_init(&service->translate, dce_version,
dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
if (!dal_hw_factory_init(&service->factory, dce_version,
dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
/* allocate and initialize busyness storage */
{
index_of_id = 0;
service->ctx = ctx;
do {
uint32_t number_of_bits =
service->factory.number_of_pins[index_of_id];
uint32_t i = 0;
if (number_of_bits) {
service->busyness[index_of_id] =
kcalloc(number_of_bits, sizeof(char),
GFP_KERNEL);
if (!service->busyness[index_of_id]) {
BREAK_TO_DEBUGGER();
goto failure_2;
}
do {
service->busyness[index_of_id][i] = 0;
++i;
} while (i < number_of_bits);
} else {
service->busyness[index_of_id] = NULL;
}
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
}
return service;
failure_2:
while (index_of_id) {
--index_of_id;
kfree(service->busyness[index_of_id]);
}
failure_1:
kfree(service);
return NULL;
}
struct gpio *dal_gpio_service_create_irq(
struct gpio_service *service,
uint32_t offset,
uint32_t mask)
{
enum gpio_id id;
uint32_t en;
if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
ASSERT_CRITICAL(false);
return NULL;
}
return dal_gpio_create_irq(service, id, en);
}
struct gpio *dal_gpio_service_create_generic_mux(
struct gpio_service *service,
uint32_t offset,
uint32_t mask)
{
enum gpio_id id;
uint32_t en;
struct gpio *generic;
if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
ASSERT_CRITICAL(false);
return NULL;
}
generic = dal_gpio_create(
service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
return generic;
}
void dal_gpio_destroy_generic_mux(
struct gpio **mux)
{
if (!mux || !*mux) {
ASSERT_CRITICAL(false);
return;
}
dal_gpio_destroy(mux);
kfree(*mux);
*mux = NULL;
}
struct gpio_pin_info dal_gpio_get_generic_pin_info(
struct gpio_service *service,
enum gpio_id id,
uint32_t en)
{
struct gpio_pin_info pin;
if (service->translate.funcs->id_to_offset) {
service->translate.funcs->id_to_offset(id, en, &pin);
} else {
pin.mask = 0xFFFFFFFF;
pin.offset = 0xFFFFFFFF;
}
return pin;
}
void dal_gpio_service_destroy(
struct gpio_service **ptr)
{
if (!ptr || !*ptr) {
BREAK_TO_DEBUGGER();
return;
}
/* free business storage */
{
uint32_t index_of_id = 0;
do {
kfree((*ptr)->busyness[index_of_id]);
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
}
kfree(*ptr);
*ptr = NULL;
}
enum gpio_result dal_mux_setup_config(
struct gpio *mux,
struct gpio_generic_mux_config *config)
{
struct gpio_config_data config_data;
if (!config)
return GPIO_RESULT_INVALID_DATA;
config_data.config.generic_mux = *config;
config_data.type = GPIO_CONFIG_TYPE_GENERIC_MUX;
return dal_gpio_set_config(mux, &config_data);
}
/*
* @brief
* Private API.
*/
static bool is_pin_busy(
const struct gpio_service *service,
enum gpio_id id,
uint32_t en)
{
return service->busyness[id][en];
}
static void set_pin_busy(
struct gpio_service *service,
enum gpio_id id,
uint32_t en)
{
service->busyness[id][en] = true;
}
static void set_pin_free(
struct gpio_service *service,
enum gpio_id id,
uint32_t en)
{
service->busyness[id][en] = false;
}
enum gpio_result dal_gpio_service_lock(
struct gpio_service *service,
enum gpio_id id,
uint32_t en)
{
if (!service->busyness[id]) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_OPEN_FAILED;
}
set_pin_busy(service, id, en);
return GPIO_RESULT_OK;
}
enum gpio_result dal_gpio_service_unlock(
struct gpio_service *service,
enum gpio_id id,
uint32_t en)
{
if (!service->busyness[id]) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_OPEN_FAILED;
}
set_pin_free(service, id, en);
return GPIO_RESULT_OK;
}
enum gpio_result dal_gpio_service_open(
struct gpio *gpio)
{
struct gpio_service *service = gpio->service;
enum gpio_id id = gpio->id;
uint32_t en = gpio->en;
enum gpio_mode mode = gpio->mode;
struct hw_gpio_pin **pin = &gpio->pin;
if (!service->busyness[id]) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_OPEN_FAILED;
}
if (is_pin_busy(service, id, en)) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_DEVICE_BUSY;
}
switch (id) {
case GPIO_ID_DDC_DATA:
*pin = service->factory.funcs->get_ddc_pin(gpio);
service->factory.funcs->define_ddc_registers(*pin, en);
break;
case GPIO_ID_DDC_CLOCK:
*pin = service->factory.funcs->get_ddc_pin(gpio);
service->factory.funcs->define_ddc_registers(*pin, en);
break;
case GPIO_ID_GENERIC:
*pin = service->factory.funcs->get_generic_pin(gpio);
service->factory.funcs->define_generic_registers(*pin, en);
break;
case GPIO_ID_HPD:
*pin = service->factory.funcs->get_hpd_pin(gpio);
service->factory.funcs->define_hpd_registers(*pin, en);
break;
//TODO: gsl and sync support? create_sync and create_gsl are NULL
case GPIO_ID_SYNC:
case GPIO_ID_GSL:
break;
default:
ASSERT_CRITICAL(false);
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
if (!*pin) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
if (!(*pin)->funcs->open(*pin, mode)) {
ASSERT_CRITICAL(false);
dal_gpio_service_close(service, pin);
return GPIO_RESULT_OPEN_FAILED;
}
set_pin_busy(service, id, en);
return GPIO_RESULT_OK;
}
void dal_gpio_service_close(
struct gpio_service *service,
struct hw_gpio_pin **ptr)
{
struct hw_gpio_pin *pin;
if (!ptr) {
ASSERT_CRITICAL(false);
return;
}
pin = *ptr;
if (pin) {
set_pin_free(service, pin->id, pin->en);
pin->funcs->close(pin);
*ptr = NULL;
}
}
enum dc_irq_source dal_irq_get_source(
const struct gpio *irq)
{
enum gpio_id id = dal_gpio_get_id(irq);
switch (id) {
case GPIO_ID_HPD:
return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1 +
dal_gpio_get_enum(irq));
case GPIO_ID_GPIO_PAD:
return (enum dc_irq_source)(DC_IRQ_SOURCE_GPIOPAD0 +
dal_gpio_get_enum(irq));
default:
return DC_IRQ_SOURCE_INVALID;
}
}
enum dc_irq_source dal_irq_get_rx_source(
const struct gpio *irq)
{
enum gpio_id id = dal_gpio_get_id(irq);
switch (id) {
case GPIO_ID_HPD:
return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1RX +
dal_gpio_get_enum(irq));
default:
return DC_IRQ_SOURCE_INVALID;
}
}
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config)
{
struct gpio_config_data config_data;
if (!config)
return GPIO_RESULT_INVALID_DATA;
config_data.type = GPIO_CONFIG_TYPE_HPD;
config_data.config.hpd = *config;
return dal_gpio_set_config(irq, &config_data);
}
/*
* @brief
* Creation and destruction
*/
struct gpio *dal_gpio_create_irq(
struct gpio_service *service,
enum gpio_id id,
uint32_t en)
{
struct gpio *irq;
switch (id) {
case GPIO_ID_HPD:
case GPIO_ID_GPIO_PAD:
break;
default:
id = GPIO_ID_HPD;
ASSERT_CRITICAL(false);
return NULL;
}
irq = dal_gpio_create(
service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
if (irq)
return irq;
ASSERT_CRITICAL(false);
return NULL;
}
void dal_gpio_destroy_irq(
struct gpio **irq)
{
if (!irq || !*irq) {
ASSERT_CRITICAL(false);
return;
}
dal_gpio_destroy(irq);
kfree(*irq);
*irq = NULL;
}
struct ddc *dal_gpio_create_ddc(
struct gpio_service *service,
uint32_t offset,
uint32_t mask,
struct gpio_ddc_hw_info *info)
{
enum gpio_id id;
uint32_t en;
struct ddc *ddc;
if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en))
return NULL;
ddc = kzalloc(sizeof(struct ddc), GFP_KERNEL);
if (!ddc) {
BREAK_TO_DEBUGGER();
return NULL;
}
ddc->pin_data = dal_gpio_create(
service, GPIO_ID_DDC_DATA, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
if (!ddc->pin_data) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
ddc->pin_clock = dal_gpio_create(
service, GPIO_ID_DDC_CLOCK, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
if (!ddc->pin_clock) {
BREAK_TO_DEBUGGER();
goto failure_2;
}
ddc->hw_info = *info;
ddc->ctx = service->ctx;
return ddc;
failure_2:
dal_gpio_destroy(&ddc->pin_data);
failure_1:
kfree(ddc);
return NULL;
}
void dal_gpio_destroy_ddc(
struct ddc **ddc)
{
if (!ddc || !*ddc) {
BREAK_TO_DEBUGGER();
return;
}
dal_ddc_close(*ddc);
dal_gpio_destroy(&(*ddc)->pin_data);
dal_gpio_destroy(&(*ddc)->pin_clock);
kfree(*ddc);
*ddc = NULL;
}
enum gpio_result dal_ddc_open(
struct ddc *ddc,
enum gpio_mode mode,
enum gpio_ddc_config_type config_type)
{
enum gpio_result result;
struct gpio_config_data config_data;
struct hw_gpio *hw_data;
struct hw_gpio *hw_clock;
result = dal_gpio_open_ex(ddc->pin_data, mode);
if (result != GPIO_RESULT_OK) {
BREAK_TO_DEBUGGER();
return result;
}
result = dal_gpio_open_ex(ddc->pin_clock, mode);
if (result != GPIO_RESULT_OK) {
BREAK_TO_DEBUGGER();
goto failure;
}
/* DDC clock and data pins should belong
* to the same DDC block id,
* we use the data pin to set the pad mode. */
if (mode == GPIO_MODE_INPUT)
/* this is from detect_sink_type,
* we need extra delay there */
config_data.type = GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE;
else
config_data.type = GPIO_CONFIG_TYPE_DDC;
config_data.config.ddc.type = config_type;
hw_data = FROM_HW_GPIO_PIN(ddc->pin_data->pin);
hw_clock = FROM_HW_GPIO_PIN(ddc->pin_clock->pin);
config_data.config.ddc.data_en_bit_present = hw_data->store.en != 0;
config_data.config.ddc.clock_en_bit_present = hw_clock->store.en != 0;
result = dal_gpio_set_config(ddc->pin_data, &config_data);
if (result == GPIO_RESULT_OK)
return result;
BREAK_TO_DEBUGGER();
dal_gpio_close(ddc->pin_clock);
failure:
dal_gpio_close(ddc->pin_data);
return result;
}
enum gpio_result dal_ddc_change_mode(
struct ddc *ddc,
enum gpio_mode mode)
{
enum gpio_result result;
enum gpio_mode original_mode =
dal_gpio_get_mode(ddc->pin_data);
result = dal_gpio_change_mode(ddc->pin_data, mode);
/* [anaumov] DAL2 code returns GPIO_RESULT_NON_SPECIFIC_ERROR
* in case of failures;
* set_mode() is so that, in case of failure,
* we must explicitly set original mode */
if (result != GPIO_RESULT_OK)
goto failure;
result = dal_gpio_change_mode(ddc->pin_clock, mode);
if (result == GPIO_RESULT_OK)
return result;
dal_gpio_change_mode(ddc->pin_clock, original_mode);
failure:
dal_gpio_change_mode(ddc->pin_data, original_mode);
return result;
}
enum gpio_ddc_line dal_ddc_get_line(
const struct ddc *ddc)
{
return (enum gpio_ddc_line)dal_gpio_get_enum(ddc->pin_data);
}
enum gpio_result dal_ddc_set_config(
struct ddc *ddc,
enum gpio_ddc_config_type config_type)
{
struct gpio_config_data config_data;
config_data.type = GPIO_CONFIG_TYPE_DDC;
config_data.config.ddc.type = config_type;
config_data.config.ddc.data_en_bit_present = false;
config_data.config.ddc.clock_en_bit_present = false;
return dal_gpio_set_config(ddc->pin_data, &config_data);
}
void dal_ddc_close(
struct ddc *ddc)
{
if (ddc != NULL) {
dal_gpio_close(ddc->pin_clock);
dal_gpio_close(ddc->pin_data);
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
/*
* Pre-requisites: headers required by header of this unit
*/
#include "include/gpio_types.h"
/*
* Header of this unit
*/
#include "hw_factory.h"
/*
* Post-requisites: headers required by this unit
*/
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/hw_factory_dce60.h"
#endif
#include "dce80/hw_factory_dce80.h"
#include "dce110/hw_factory_dce110.h"
#include "dce120/hw_factory_dce120.h"
#include "dcn10/hw_factory_dcn10.h"
#include "dcn20/hw_factory_dcn20.h"
#include "dcn21/hw_factory_dcn21.h"
#include "dcn30/hw_factory_dcn30.h"
#include "dcn315/hw_factory_dcn315.h"
#include "dcn32/hw_factory_dcn32.h"
bool dal_hw_factory_init(
struct hw_factory *factory,
enum dce_version dce_version,
enum dce_environment dce_environment)
{
switch (dce_version) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case DCE_VERSION_6_0:
case DCE_VERSION_6_1:
case DCE_VERSION_6_4:
dal_hw_factory_dce60_init(factory);
return true;
#endif
case DCE_VERSION_8_0:
case DCE_VERSION_8_1:
case DCE_VERSION_8_3:
dal_hw_factory_dce80_init(factory);
return true;
case DCE_VERSION_10_0:
dal_hw_factory_dce110_init(factory);
return true;
case DCE_VERSION_11_0:
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
dal_hw_factory_dce110_init(factory);
return true;
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
dal_hw_factory_dce120_init(factory);
return true;
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
dal_hw_factory_dcn10_init(factory);
return true;
case DCN_VERSION_2_0:
dal_hw_factory_dcn20_init(factory);
return true;
case DCN_VERSION_2_01:
case DCN_VERSION_2_1:
dal_hw_factory_dcn21_init(factory);
return true;
case DCN_VERSION_3_0:
case DCN_VERSION_3_01:
case DCN_VERSION_3_02:
case DCN_VERSION_3_03:
case DCN_VERSION_3_1:
case DCN_VERSION_3_14:
case DCN_VERSION_3_16:
dal_hw_factory_dcn30_init(factory);
return true;
case DCN_VERSION_3_15:
dal_hw_factory_dcn315_init(factory);
return true;
case DCN_VERSION_3_2:
case DCN_VERSION_3_21:
dal_hw_factory_dcn32_init(factory);
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/slab.h>
#include "dm_services.h"
#include "include/gpio_interface.h"
#include "include/gpio_types.h"
#include "hw_gpio.h"
#include "hw_generic.h"
#include "reg_helper.h"
#include "generic_regs.h"
#undef FN
#define FN(reg_name, field_name) \
generic->shifts->field_name, generic->masks->field_name
#define CTX \
generic->base.base.ctx
#define REG(reg)\
(generic->regs->reg)
struct gpio;
static void dal_hw_generic_destruct(
struct hw_generic *pin)
{
dal_hw_gpio_destruct(&pin->base);
}
static void dal_hw_generic_destroy(
struct hw_gpio_pin **ptr)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr);
dal_hw_generic_destruct(generic);
kfree(generic);
*ptr = NULL;
}
static enum gpio_result set_config(
struct hw_gpio_pin *ptr,
const struct gpio_config_data *config_data)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(ptr);
if (!config_data)
return GPIO_RESULT_INVALID_DATA;
REG_UPDATE_2(mux,
GENERIC_EN, config_data->config.generic_mux.enable_output_from_mux,
GENERIC_SEL, config_data->config.generic_mux.mux_select);
return GPIO_RESULT_OK;
}
static const struct hw_gpio_pin_funcs funcs = {
.destroy = dal_hw_generic_destroy,
.open = dal_hw_gpio_open,
.get_value = dal_hw_gpio_get_value,
.set_value = dal_hw_gpio_set_value,
.set_config = set_config,
.change_mode = dal_hw_gpio_change_mode,
.close = dal_hw_gpio_close,
};
static void dal_hw_generic_construct(
struct hw_generic *pin,
enum gpio_id id,
uint32_t en,
struct dc_context *ctx)
{
dal_hw_gpio_construct(&pin->base, id, en, ctx);
pin->base.base.funcs = &funcs;
}
void dal_hw_generic_init(
struct hw_generic **hw_generic,
struct dc_context *ctx,
enum gpio_id id,
uint32_t en)
{
if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
ASSERT_CRITICAL(false);
*hw_generic = NULL;
}
*hw_generic = kzalloc(sizeof(struct hw_generic), GFP_KERNEL);
if (!*hw_generic) {
ASSERT_CRITICAL(false);
return;
}
dal_hw_generic_construct(*hw_generic, id, en, ctx);
}
struct hw_gpio_pin *dal_hw_generic_get_pin(struct gpio *gpio)
{
struct hw_generic *hw_generic = dal_gpio_get_generic(gpio);
return &hw_generic->base.base;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_interface.h"
#include "include/gpio_types.h"
#include "hw_gpio.h"
#include "hw_hpd.h"
#include "reg_helper.h"
#include "hpd_regs.h"
#undef FN
#define FN(reg_name, field_name) \
hpd->shifts->field_name, hpd->masks->field_name
#define CTX \
hpd->base.base.ctx
#define REG(reg)\
(hpd->regs->reg)
struct gpio;
static void dal_hw_hpd_destruct(
struct hw_hpd *pin)
{
dal_hw_gpio_destruct(&pin->base);
}
static void dal_hw_hpd_destroy(
struct hw_gpio_pin **ptr)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr);
dal_hw_hpd_destruct(hpd);
kfree(hpd);
*ptr = NULL;
}
static enum gpio_result get_value(
const struct hw_gpio_pin *ptr,
uint32_t *value)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
uint32_t hpd_delayed = 0;
/* in Interrupt mode we ask for SENSE bit */
if (ptr->mode == GPIO_MODE_INTERRUPT) {
REG_GET(int_status,
DC_HPD_SENSE_DELAYED, &hpd_delayed);
*value = hpd_delayed;
return GPIO_RESULT_OK;
}
/* in any other modes, operate as normal GPIO */
return dal_hw_gpio_get_value(ptr, value);
}
static enum gpio_result set_config(
struct hw_gpio_pin *ptr,
const struct gpio_config_data *config_data)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
if (!config_data)
return GPIO_RESULT_INVALID_DATA;
REG_UPDATE_2(toggle_filt_cntl,
DC_HPD_CONNECT_INT_DELAY, config_data->config.hpd.delay_on_connect / 10,
DC_HPD_DISCONNECT_INT_DELAY, config_data->config.hpd.delay_on_disconnect / 10);
return GPIO_RESULT_OK;
}
static const struct hw_gpio_pin_funcs funcs = {
.destroy = dal_hw_hpd_destroy,
.open = dal_hw_gpio_open,
.get_value = get_value,
.set_value = dal_hw_gpio_set_value,
.set_config = set_config,
.change_mode = dal_hw_gpio_change_mode,
.close = dal_hw_gpio_close,
};
static void dal_hw_hpd_construct(
struct hw_hpd *pin,
enum gpio_id id,
uint32_t en,
struct dc_context *ctx)
{
dal_hw_gpio_construct(&pin->base, id, en, ctx);
pin->base.base.funcs = &funcs;
}
void dal_hw_hpd_init(
struct hw_hpd **hw_hpd,
struct dc_context *ctx,
enum gpio_id id,
uint32_t en)
{
if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
ASSERT_CRITICAL(false);
*hw_hpd = NULL;
}
*hw_hpd = kzalloc(sizeof(struct hw_hpd), GFP_KERNEL);
if (!*hw_hpd) {
ASSERT_CRITICAL(false);
return;
}
dal_hw_hpd_construct(*hw_hpd, id, en, ctx);
}
struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio)
{
struct hw_hpd *hw_hpd = dal_gpio_get_hpd(gpio);
return &hw_hpd->base.base;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_interface.h"
#include "include/gpio_types.h"
#include "hw_gpio.h"
#include "hw_ddc.h"
#include "reg_helper.h"
#include "gpio_regs.h"
#undef FN
#define FN(reg_name, field_name) \
ddc->shifts->field_name, ddc->masks->field_name
#define CTX \
ddc->base.base.ctx
#define REG(reg)\
(ddc->regs->reg)
struct gpio;
static void dal_hw_ddc_destruct(
struct hw_ddc *pin)
{
dal_hw_gpio_destruct(&pin->base);
}
static void dal_hw_ddc_destroy(
struct hw_gpio_pin **ptr)
{
struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr);
dal_hw_ddc_destruct(pin);
kfree(pin);
*ptr = NULL;
}
static enum gpio_result set_config(
struct hw_gpio_pin *ptr,
const struct gpio_config_data *config_data)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(ptr);
struct hw_gpio *hw_gpio = NULL;
uint32_t regval;
uint32_t ddc_data_pd_en = 0;
uint32_t ddc_clk_pd_en = 0;
uint32_t aux_pad_mode = 0;
hw_gpio = &ddc->base;
if (hw_gpio == NULL) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_NULL_HANDLE;
}
regval = REG_GET_3(gpio.MASK_reg,
DC_GPIO_DDC1DATA_PD_EN, &ddc_data_pd_en,
DC_GPIO_DDC1CLK_PD_EN, &ddc_clk_pd_en,
AUX_PAD1_MODE, &aux_pad_mode);
switch (config_data->config.ddc.type) {
case GPIO_DDC_CONFIG_TYPE_MODE_I2C:
/* On plug-in, there is a transient level on the pad
* which must be discharged through the internal pull-down.
* Enable internal pull-down, 2.5msec discharge time
* is required for detection of AUX mode */
if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) {
if (!ddc_data_pd_en || !ddc_clk_pd_en) {
if (hw_gpio->base.en == GPIO_DDC_LINE_DDC_VGA) {
// bit 4 of mask has different usage in some cases
REG_SET(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1);
} else {
REG_SET_2(gpio.MASK_reg, regval,
DC_GPIO_DDC1DATA_PD_EN, 1,
DC_GPIO_DDC1CLK_PD_EN, 1);
}
if (config_data->type ==
GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
msleep(3);
}
} else {
uint32_t sda_pd_dis = 0;
uint32_t scl_pd_dis = 0;
REG_GET_2(gpio.MASK_reg,
DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
if (sda_pd_dis) {
REG_SET(gpio.MASK_reg, regval,
DC_GPIO_SDA_PD_DIS, 0);
if (config_data->type ==
GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
msleep(3);
}
if (!scl_pd_dis) {
REG_SET(gpio.MASK_reg, regval,
DC_GPIO_SCL_PD_DIS, 1);
if (config_data->type ==
GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
msleep(3);
}
}
if (aux_pad_mode) {
/* let pins to get de-asserted
* before setting pad to I2C mode */
if (config_data->config.ddc.data_en_bit_present ||
config_data->config.ddc.clock_en_bit_present)
/* [anaumov] in DAL2, there was
* dc_service_delay_in_microseconds(2000); */
msleep(2);
/* set the I2C pad mode */
/* read the register again,
* some bits may have been changed */
REG_UPDATE(gpio.MASK_reg,
AUX_PAD1_MODE, 0);
}
if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1);
}
//set DC_IO_aux_rxsel = 2'b01
if (ddc->regs->phy_aux_cntl != 0) {
REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1);
}
return GPIO_RESULT_OK;
case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
/* set the AUX pad mode */
if (!aux_pad_mode) {
REG_SET(gpio.MASK_reg, regval,
AUX_PAD1_MODE, 1);
}
if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
REG_UPDATE(dc_gpio_aux_ctrl_5,
DDC_PAD_I2CMODE, 0);
}
return GPIO_RESULT_OK;
case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
(hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
REG_UPDATE_3(ddc_setup,
DC_I2C_DDC1_ENABLE, 1,
DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
DC_I2C_DDC1_EDID_DETECT_MODE, 0);
return GPIO_RESULT_OK;
}
break;
case GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT:
if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
(hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
REG_UPDATE_3(ddc_setup,
DC_I2C_DDC1_ENABLE, 1,
DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
DC_I2C_DDC1_EDID_DETECT_MODE, 1);
return GPIO_RESULT_OK;
}
break;
case GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING:
if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
(hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
REG_UPDATE_2(ddc_setup,
DC_I2C_DDC1_ENABLE, 0,
DC_I2C_DDC1_EDID_DETECT_ENABLE, 0);
return GPIO_RESULT_OK;
}
break;
}
BREAK_TO_DEBUGGER();
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
static const struct hw_gpio_pin_funcs funcs = {
.destroy = dal_hw_ddc_destroy,
.open = dal_hw_gpio_open,
.get_value = dal_hw_gpio_get_value,
.set_value = dal_hw_gpio_set_value,
.set_config = set_config,
.change_mode = dal_hw_gpio_change_mode,
.close = dal_hw_gpio_close,
};
static void dal_hw_ddc_construct(
struct hw_ddc *ddc,
enum gpio_id id,
uint32_t en,
struct dc_context *ctx)
{
dal_hw_gpio_construct(&ddc->base, id, en, ctx);
ddc->base.base.funcs = &funcs;
}
void dal_hw_ddc_init(
struct hw_ddc **hw_ddc,
struct dc_context *ctx,
enum gpio_id id,
uint32_t en)
{
if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
ASSERT_CRITICAL(false);
*hw_ddc = NULL;
}
*hw_ddc = kzalloc(sizeof(struct hw_ddc), GFP_KERNEL);
if (!*hw_ddc) {
ASSERT_CRITICAL(false);
return;
}
dal_hw_ddc_construct(*hw_ddc, id, en, ctx);
}
struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio)
{
struct hw_ddc *hw_ddc = dal_gpio_get_ddc(gpio);
return &hw_ddc->base.base;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "hw_gpio.h"
#include "reg_helper.h"
#include "gpio_regs.h"
#undef FN
#define FN(reg_name, field_name) \
gpio->regs->field_name ## _shift, gpio->regs->field_name ## _mask
#define CTX \
gpio->base.ctx
#define REG(reg)\
(gpio->regs->reg)
static void store_registers(
struct hw_gpio *gpio)
{
REG_GET(MASK_reg, MASK, &gpio->store.mask);
REG_GET(A_reg, A, &gpio->store.a);
REG_GET(EN_reg, EN, &gpio->store.en);
/* TODO store GPIO_MUX_CONTROL if we ever use it */
}
static void restore_registers(
struct hw_gpio *gpio)
{
REG_UPDATE(MASK_reg, MASK, gpio->store.mask);
REG_UPDATE(A_reg, A, gpio->store.a);
REG_UPDATE(EN_reg, EN, gpio->store.en);
/* TODO restore GPIO_MUX_CONTROL if we ever use it */
}
bool dal_hw_gpio_open(
struct hw_gpio_pin *ptr,
enum gpio_mode mode)
{
struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
store_registers(pin);
ptr->opened = (dal_hw_gpio_config_mode(pin, mode) == GPIO_RESULT_OK);
return ptr->opened;
}
enum gpio_result dal_hw_gpio_get_value(
const struct hw_gpio_pin *ptr,
uint32_t *value)
{
const struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
enum gpio_result result = GPIO_RESULT_OK;
switch (ptr->mode) {
case GPIO_MODE_INPUT:
case GPIO_MODE_OUTPUT:
case GPIO_MODE_HARDWARE:
case GPIO_MODE_FAST_OUTPUT:
REG_GET(Y_reg, Y, value);
break;
default:
result = GPIO_RESULT_NON_SPECIFIC_ERROR;
}
return result;
}
enum gpio_result dal_hw_gpio_set_value(
const struct hw_gpio_pin *ptr,
uint32_t value)
{
struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
/* This is the public interface
* where the input comes from client, not shifted yet
* (because client does not know the shifts). */
switch (ptr->mode) {
case GPIO_MODE_OUTPUT:
REG_UPDATE(A_reg, A, value);
return GPIO_RESULT_OK;
case GPIO_MODE_FAST_OUTPUT:
/* We use (EN) to faster switch (used in DDC GPIO).
* So (A) is grounded, output is driven by (EN = 0)
* to pull the line down (output == 0) and (EN=1)
* then output is tri-state */
REG_UPDATE(EN_reg, EN, ~value);
return GPIO_RESULT_OK;
default:
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
}
enum gpio_result dal_hw_gpio_change_mode(
struct hw_gpio_pin *ptr,
enum gpio_mode mode)
{
struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
return dal_hw_gpio_config_mode(pin, mode);
}
void dal_hw_gpio_close(
struct hw_gpio_pin *ptr)
{
struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
restore_registers(pin);
ptr->mode = GPIO_MODE_UNKNOWN;
ptr->opened = false;
}
enum gpio_result dal_hw_gpio_config_mode(
struct hw_gpio *gpio,
enum gpio_mode mode)
{
gpio->base.mode = mode;
switch (mode) {
case GPIO_MODE_INPUT:
/* turn off output enable, act as input pin;
* program the pin as GPIO, mask out signal driven by HW */
REG_UPDATE(EN_reg, EN, 0);
REG_UPDATE(MASK_reg, MASK, 1);
return GPIO_RESULT_OK;
case GPIO_MODE_OUTPUT:
/* turn on output enable, act as output pin;
* program the pin as GPIO, mask out signal driven by HW */
REG_UPDATE(A_reg, A, 0);
REG_UPDATE(MASK_reg, MASK, 1);
return GPIO_RESULT_OK;
case GPIO_MODE_FAST_OUTPUT:
/* grounding the A register then use the EN register bit
* will have faster effect on the rise time */
REG_UPDATE(A_reg, A, 0);
REG_UPDATE(MASK_reg, MASK, 1);
return GPIO_RESULT_OK;
case GPIO_MODE_HARDWARE:
/* program the pin as tri-state, pin is driven by HW */
REG_UPDATE(MASK_reg, MASK, 0);
return GPIO_RESULT_OK;
case GPIO_MODE_INTERRUPT:
/* Interrupt mode supported only by HPD (IrqGpio) pins. */
REG_UPDATE(MASK_reg, MASK, 0);
return GPIO_RESULT_OK;
default:
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
}
void dal_hw_gpio_construct(
struct hw_gpio *pin,
enum gpio_id id,
uint32_t en,
struct dc_context *ctx)
{
pin->base.ctx = ctx;
pin->base.id = id;
pin->base.en = en;
pin->base.mode = GPIO_MODE_UNKNOWN;
pin->base.opened = false;
pin->store.mask = 0;
pin->store.a = 0;
pin->store.en = 0;
pin->store.mux = 0;
pin->mux_supported = false;
}
void dal_hw_gpio_destruct(
struct hw_gpio *pin)
{
ASSERT(!pin->base.opened);
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
/*
* Pre-requisites: headers required by header of this unit
*/
#include "include/gpio_types.h"
/*
* Header of this unit
*/
#include "hw_translate.h"
/*
* Post-requisites: headers required by this unit
*/
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/hw_translate_dce60.h"
#endif
#include "dce80/hw_translate_dce80.h"
#include "dce110/hw_translate_dce110.h"
#include "dce120/hw_translate_dce120.h"
#include "dcn10/hw_translate_dcn10.h"
#include "dcn20/hw_translate_dcn20.h"
#include "dcn21/hw_translate_dcn21.h"
#include "dcn30/hw_translate_dcn30.h"
#include "dcn315/hw_translate_dcn315.h"
#include "dcn32/hw_translate_dcn32.h"
/*
* This unit
*/
bool dal_hw_translate_init(
struct hw_translate *translate,
enum dce_version dce_version,
enum dce_environment dce_environment)
{
switch (dce_version) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case DCE_VERSION_6_0:
case DCE_VERSION_6_1:
case DCE_VERSION_6_4:
dal_hw_translate_dce60_init(translate);
return true;
#endif
case DCE_VERSION_8_0:
case DCE_VERSION_8_1:
case DCE_VERSION_8_3:
dal_hw_translate_dce80_init(translate);
return true;
case DCE_VERSION_10_0:
case DCE_VERSION_11_0:
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
dal_hw_translate_dce110_init(translate);
return true;
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
dal_hw_translate_dce120_init(translate);
return true;
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
dal_hw_translate_dcn10_init(translate);
return true;
case DCN_VERSION_2_0:
dal_hw_translate_dcn20_init(translate);
return true;
case DCN_VERSION_2_01:
case DCN_VERSION_2_1:
dal_hw_translate_dcn21_init(translate);
return true;
case DCN_VERSION_3_0:
case DCN_VERSION_3_01:
case DCN_VERSION_3_02:
case DCN_VERSION_3_03:
case DCN_VERSION_3_1:
case DCN_VERSION_3_14:
case DCN_VERSION_3_16:
dal_hw_translate_dcn30_init(translate);
return true;
case DCN_VERSION_3_15:
dal_hw_translate_dcn315_init(translate);
return true;
case DCN_VERSION_3_2:
case DCN_VERSION_3_21:
dal_hw_translate_dcn32_init(translate);
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
/*
* Pre-requisites: headers required by header of this unit
*/
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "hw_translate_dce80.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
#include "smu/smu_7_0_1_d.h"
/*
* @brief
* Returns index of first bit (starting with LSB) which is set
*/
static uint32_t index_from_vector(
uint32_t vector)
{
uint32_t result = 0;
uint32_t mask = 1;
do {
if (vector == mask)
return result;
++result;
mask <<= 1;
} while (mask);
BREAK_TO_DEBUGGER();
return GPIO_ENUM_UNKNOWN;
}
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case mmDC_GPIO_GENERIC_A:
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
break;
/* HPD */
case mmDC_GPIO_HPD_A:
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
break;
/* SYNCA */
case mmDC_GPIO_SYNCA_A:
*id = GPIO_ID_SYNC;
switch (mask) {
case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
*en = GPIO_SYNC_HSYNC_A;
return true;
case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
*en = GPIO_SYNC_VSYNC_A;
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
break;
/* mmDC_GPIO_GENLK_MASK */
case mmDC_GPIO_GENLK_A:
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
break;
/* GPIOPAD */
case mmGPIOPAD_A:
*id = GPIO_ID_GPIO_PAD;
*en = index_from_vector(mask);
return (*en <= GPIO_GPIO_PAD_MAX);
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method */
case mmDC_GPIO_DDC1_A:
*en = GPIO_DDC_LINE_DDC1;
return true;
case mmDC_GPIO_DDC2_A:
*en = GPIO_DDC_LINE_DDC2;
return true;
case mmDC_GPIO_DDC3_A:
*en = GPIO_DDC_LINE_DDC3;
return true;
case mmDC_GPIO_DDC4_A:
*en = GPIO_DDC_LINE_DDC4;
return true;
case mmDC_GPIO_DDC5_A:
*en = GPIO_DDC_LINE_DDC5;
return true;
case mmDC_GPIO_DDC6_A:
*en = GPIO_DDC_LINE_DDC6;
return true;
case mmDC_GPIO_DDCVGA_A:
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/* GPIO_I2CPAD */
case mmDC_GPIO_I2CPAD_A:
*en = GPIO_DDC_LINE_I2C_PAD;
return true;
/* Not implemented */
case mmDC_GPIO_PWRSEQ_A:
case mmDC_GPIO_PAD_STRENGTH_1:
case mmDC_GPIO_PAD_STRENGTH_2:
case mmDC_GPIO_DEBUG:
return false;
/* UNEXPECTED */
default:
BREAK_TO_DEBUGGER();
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = mmDC_GPIO_DDC1_A;
break;
case GPIO_DDC_LINE_DDC2:
info->offset = mmDC_GPIO_DDC2_A;
break;
case GPIO_DDC_LINE_DDC3:
info->offset = mmDC_GPIO_DDC3_A;
break;
case GPIO_DDC_LINE_DDC4:
info->offset = mmDC_GPIO_DDC4_A;
break;
case GPIO_DDC_LINE_DDC5:
info->offset = mmDC_GPIO_DDC5_A;
break;
case GPIO_DDC_LINE_DDC6:
info->offset = mmDC_GPIO_DDC6_A;
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = mmDC_GPIO_DDCVGA_A;
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = mmDC_GPIO_I2CPAD_A;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = mmDC_GPIO_DDC1_A;
break;
case GPIO_DDC_LINE_DDC2:
info->offset = mmDC_GPIO_DDC2_A;
break;
case GPIO_DDC_LINE_DDC3:
info->offset = mmDC_GPIO_DDC3_A;
break;
case GPIO_DDC_LINE_DDC4:
info->offset = mmDC_GPIO_DDC4_A;
break;
case GPIO_DDC_LINE_DDC5:
info->offset = mmDC_GPIO_DDC5_A;
break;
case GPIO_DDC_LINE_DDC6:
info->offset = mmDC_GPIO_DDC6_A;
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = mmDC_GPIO_DDCVGA_A;
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = mmDC_GPIO_I2CPAD_A;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = mmDC_GPIO_GENERIC_A;
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = mmDC_GPIO_HPD_A;
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_SYNC:
switch (en) {
case GPIO_SYNC_HSYNC_A:
info->offset = mmDC_GPIO_SYNCA_A;
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
break;
case GPIO_SYNC_VSYNC_A:
info->offset = mmDC_GPIO_SYNCA_A;
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
break;
case GPIO_SYNC_HSYNC_B:
case GPIO_SYNC_VSYNC_B:
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
break;
case GPIO_GSL_GENLOCK_VSYNC:
info->offset = mmDC_GPIO_GENLK_A;
info->mask =
DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_A:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_B:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_GPIO_PAD:
info->offset = mmGPIOPAD_A;
info->mask = (1 << en);
result = (info->mask <= GPIO_GPIO_PAD_MAX);
break;
case GPIO_ID_VIP_PAD:
default:
BREAK_TO_DEBUGGER();
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
void dal_hw_translate_dce80_init(
struct hw_translate *translate)
{
translate->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "hw_factory_dce80.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
#define REG(reg_name)\
mm ## reg_name
#include "reg_helper.h"
#include "../hpd_regs.h"
#define HPD_REG_LIST_DCE8(id) \
HPD_GPIO_REG_LIST(id), \
.int_status = mmDC_HPD ## id ## _INT_STATUS,\
.toggle_filt_cntl = mmDC_HPD ## id ## _TOGGLE_FILT_CNTL
#define HPD_MASK_SH_LIST_DCE8(mask_sh) \
.DC_HPD_SENSE_DELAYED = DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED ## mask_sh,\
.DC_HPD_SENSE = DC_HPD1_INT_STATUS__DC_HPD1_SENSE ## mask_sh,\
.DC_HPD_CONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY ## mask_sh,\
.DC_HPD_DISCONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY ## mask_sh
#define hpd_regs(id) \
{\
HPD_REG_LIST_DCE8(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5),
hpd_regs(6)
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST_DCE8(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST_DCE8(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs[] = {
ddc_data_regs(1),
ddc_data_regs(2),
ddc_data_regs(3),
ddc_data_regs(4),
ddc_data_regs(5),
ddc_data_regs(6),
ddc_vga_data_regs,
ddc_i2c_data_regs
};
static const struct ddc_registers ddc_clk_regs[] = {
ddc_clk_regs(1),
ddc_clk_regs(2),
ddc_clk_regs(3),
ddc_clk_regs(4),
ddc_clk_regs(5),
ddc_clk_regs(6),
ddc_vga_clk_regs,
ddc_i2c_clk_regs
};
static const struct ddc_sh_mask ddc_shift = {
DDC_MASK_SH_LIST(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask = {
DDC_MASK_SH_LIST(_MASK)
};
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs[en];
ddc->base.regs = &ddc_data_regs[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs[en];
ddc->base.regs = &ddc_clk_regs[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift;
ddc->masks = &ddc_mask;
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = NULL,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = NULL,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers
};
void dal_hw_factory_dce80_init(
struct hw_factory *factory)
{
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 7;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 2;
factory->number_of_pins[GPIO_ID_GSL] = 4;
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "hw_factory_dcn30.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
#include "dcn/dcn_3_0_0_sh_mask.h"
#include "nbio/nbio_7_4_offset.h"
#include "dpcs/dpcs_3_0_0_offset.h"
#include "dpcs/dpcs_3_0_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "reg_helper.h"
#include "../hpd_regs.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
#define REGI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5),
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(1),
ddc_data_regs_dcn2(2),
ddc_data_regs_dcn2(3),
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
ddc_data_regs_dcn2(6),
{
DDC_GPIO_VGA_REG_LIST(DATA),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(1),
ddc_clk_regs_dcn2(2),
ddc_clk_regs_dcn2(3),
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
ddc_clk_regs_dcn2(6),
{
DDC_GPIO_VGA_REG_LIST(CLK),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 1),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 2),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 1),
DDC_MASK_SH_LIST_DCN2(_MASK, 2),
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
DDC_MASK_SH_LIST_DCN2(_MASK, 6),
DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
/* set field name */
#define SF_GENERIC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define generic_regs(id) \
{\
GENERIC_REG_LIST(id)\
}
static const struct generic_registers generic_regs[] = {
generic_regs(A),
generic_regs(B),
};
static const struct generic_sh_mask generic_shift[] = {
GENERIC_MASK_SH_LIST(__SHIFT, A),
GENERIC_MASK_SH_LIST(__SHIFT, B),
};
static const struct generic_sh_mask generic_mask[] = {
GENERIC_MASK_SH_LIST(_MASK, A),
GENERIC_MASK_SH_LIST(_MASK, B),
};
static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
generic->regs = &generic_regs[en];
generic->shifts = &generic_shift[en];
generic->masks = &generic_mask[en];
generic->base.regs = &generic_regs[en].gpio;
}
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs_dcn[en];
ddc->base.regs = &ddc_data_regs_dcn[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs_dcn[en];
ddc->base.regs = &ddc_clk_regs_dcn[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift[en];
ddc->masks = &ddc_mask[en];
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers,
.define_generic_registers = define_generic_registers
};
/*
* dal_hw_factory_dcn10_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dcn30_init(struct hw_factory *factory)
{
/*TODO check ASIC CAPs*/
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 4;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 0;
factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "hw_translate_dcn30.h"
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
#include "dcn/dcn_3_0_0_sh_mask.h"
#include "nbio/nbio_7_4_offset.h"
#include "dpcs/dpcs_3_0_0_offset.h"
#include "dpcs/dpcs_3_0_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#undef REG
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* HPD */
case REG(DC_GPIO_HPD_A):
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* REG(DC_GPIO_GENLK_MASK */
case REG(DC_GPIO_GENLK_A):
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method
*/
case REG(DC_GPIO_DDC1_A):
*en = GPIO_DDC_LINE_DDC1;
return true;
case REG(DC_GPIO_DDC2_A):
*en = GPIO_DDC_LINE_DDC2;
return true;
case REG(DC_GPIO_DDC3_A):
*en = GPIO_DDC_LINE_DDC3;
return true;
case REG(DC_GPIO_DDC4_A):
*en = GPIO_DDC_LINE_DDC4;
return true;
case REG(DC_GPIO_DDC5_A):
*en = GPIO_DDC_LINE_DDC5;
return true;
case REG(DC_GPIO_DDC6_A):
*en = GPIO_DDC_LINE_DDC6;
return true;
case REG(DC_GPIO_DDCVGA_A):
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/*
* case REG(DC_GPIO_I2CPAD_A): not exit
* case REG(DC_GPIO_PWRSEQ_A):
* case REG(DC_GPIO_PAD_STRENGTH_1):
* case REG(DC_GPIO_PAD_STRENGTH_2):
* case REG(DC_GPIO_DEBUG):
*/
/* UNEXPECTED */
default:
/* case REG(DC_GPIO_SYNCA_A): not exist */
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC6:
info->offset = REG(DC_GPIO_DDC6_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC6:
info->offset = REG(DC_GPIO_DDC6_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = REG(DC_GPIO_GENERIC_A);
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = REG(DC_GPIO_HPD_A);
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_GENLOCK_VSYNC:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_A:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_B:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dcn30_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dcn30_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c |
/*
* Copyright 2020 Mauro Rossi <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
/*
* Pre-requisites: headers required by header of this unit
*/
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "hw_translate_dce60.h"
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#include "smu/smu_6_0_d.h"
/*
* @brief
* Returns index of first bit (starting with LSB) which is set
*/
static uint32_t index_from_vector(
uint32_t vector)
{
uint32_t result = 0;
uint32_t mask = 1;
do {
if (vector == mask)
return result;
++result;
mask <<= 1;
} while (mask);
BREAK_TO_DEBUGGER();
return GPIO_ENUM_UNKNOWN;
}
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case mmDC_GPIO_GENERIC_A:
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
break;
/* HPD */
case mmDC_GPIO_HPD_A:
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
break;
/* SYNCA */
case mmDC_GPIO_SYNCA_A:
*id = GPIO_ID_SYNC;
switch (mask) {
case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
*en = GPIO_SYNC_HSYNC_A;
return true;
case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
*en = GPIO_SYNC_VSYNC_A;
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
break;
/* mmDC_GPIO_GENLK_MASK */
case mmDC_GPIO_GENLK_A:
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
BREAK_TO_DEBUGGER();
return false;
}
break;
/* GPIOPAD */
case mmGPIOPAD_A:
*id = GPIO_ID_GPIO_PAD;
*en = index_from_vector(mask);
return (*en <= GPIO_GPIO_PAD_MAX);
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method */
case mmDC_GPIO_DDC1_A:
*en = GPIO_DDC_LINE_DDC1;
return true;
case mmDC_GPIO_DDC2_A:
*en = GPIO_DDC_LINE_DDC2;
return true;
case mmDC_GPIO_DDC3_A:
*en = GPIO_DDC_LINE_DDC3;
return true;
case mmDC_GPIO_DDC4_A:
*en = GPIO_DDC_LINE_DDC4;
return true;
case mmDC_GPIO_DDC5_A:
*en = GPIO_DDC_LINE_DDC5;
return true;
case mmDC_GPIO_DDC6_A:
*en = GPIO_DDC_LINE_DDC6;
return true;
case mmDC_GPIO_DDCVGA_A:
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/* GPIO_I2CPAD */
case mmDC_GPIO_I2CPAD_A:
*en = GPIO_DDC_LINE_I2C_PAD;
return true;
/* Not implemented */
case mmDC_GPIO_PWRSEQ_A:
case mmDC_GPIO_PAD_STRENGTH_1:
case mmDC_GPIO_PAD_STRENGTH_2:
case mmDC_GPIO_DEBUG:
return false;
/* UNEXPECTED */
default:
BREAK_TO_DEBUGGER();
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = mmDC_GPIO_DDC1_A;
break;
case GPIO_DDC_LINE_DDC2:
info->offset = mmDC_GPIO_DDC2_A;
break;
case GPIO_DDC_LINE_DDC3:
info->offset = mmDC_GPIO_DDC3_A;
break;
case GPIO_DDC_LINE_DDC4:
info->offset = mmDC_GPIO_DDC4_A;
break;
case GPIO_DDC_LINE_DDC5:
info->offset = mmDC_GPIO_DDC5_A;
break;
case GPIO_DDC_LINE_DDC6:
info->offset = mmDC_GPIO_DDC6_A;
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = mmDC_GPIO_DDCVGA_A;
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = mmDC_GPIO_I2CPAD_A;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = mmDC_GPIO_DDC1_A;
break;
case GPIO_DDC_LINE_DDC2:
info->offset = mmDC_GPIO_DDC2_A;
break;
case GPIO_DDC_LINE_DDC3:
info->offset = mmDC_GPIO_DDC3_A;
break;
case GPIO_DDC_LINE_DDC4:
info->offset = mmDC_GPIO_DDC4_A;
break;
case GPIO_DDC_LINE_DDC5:
info->offset = mmDC_GPIO_DDC5_A;
break;
case GPIO_DDC_LINE_DDC6:
info->offset = mmDC_GPIO_DDC6_A;
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = mmDC_GPIO_DDCVGA_A;
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = mmDC_GPIO_I2CPAD_A;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = mmDC_GPIO_GENERIC_A;
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = mmDC_GPIO_HPD_A;
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_SYNC:
switch (en) {
case GPIO_SYNC_HSYNC_A:
info->offset = mmDC_GPIO_SYNCA_A;
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
break;
case GPIO_SYNC_VSYNC_A:
info->offset = mmDC_GPIO_SYNCA_A;
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
break;
case GPIO_SYNC_HSYNC_B:
case GPIO_SYNC_VSYNC_B:
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
break;
case GPIO_GSL_GENLOCK_VSYNC:
info->offset = mmDC_GPIO_GENLK_A;
info->mask =
DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_A:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_B:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
break;
default:
BREAK_TO_DEBUGGER();
result = false;
}
break;
case GPIO_ID_GPIO_PAD:
info->offset = mmGPIOPAD_A;
info->mask = (1 << en);
result = (info->mask <= GPIO_GPIO_PAD_MAX);
break;
case GPIO_ID_VIP_PAD:
default:
BREAK_TO_DEBUGGER();
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
void dal_hw_translate_dce60_init(
struct hw_translate *translate)
{
translate->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c |
/*
* Copyright 2020 Mauro Rossi <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "hw_factory_dce60.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#define REG(reg_name)\
mm ## reg_name
#include "reg_helper.h"
#include "../hpd_regs.h"
#define HPD_REG_LIST_DCE6(id) \
HPD_GPIO_REG_LIST(id), \
.int_status = mmDC_HPD ## id ## _INT_STATUS,\
.toggle_filt_cntl = mmDC_HPD ## id ## _TOGGLE_FILT_CNTL
#define HPD_MASK_SH_LIST_DCE6(mask_sh) \
.DC_HPD_SENSE_DELAYED = DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED ## mask_sh,\
.DC_HPD_SENSE = DC_HPD1_INT_STATUS__DC_HPD1_SENSE ## mask_sh,\
.DC_HPD_CONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY ## mask_sh,\
.DC_HPD_DISCONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY ## mask_sh
#define hpd_regs(id) \
{\
HPD_REG_LIST_DCE6(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5),
hpd_regs(6)
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST_DCE6(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST_DCE6(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs[] = {
ddc_data_regs(1),
ddc_data_regs(2),
ddc_data_regs(3),
ddc_data_regs(4),
ddc_data_regs(5),
ddc_data_regs(6),
ddc_vga_data_regs,
ddc_i2c_data_regs
};
static const struct ddc_registers ddc_clk_regs[] = {
ddc_clk_regs(1),
ddc_clk_regs(2),
ddc_clk_regs(3),
ddc_clk_regs(4),
ddc_clk_regs(5),
ddc_clk_regs(6),
ddc_vga_clk_regs,
ddc_i2c_clk_regs
};
static const struct ddc_sh_mask ddc_shift = {
DDC_MASK_SH_LIST(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask = {
DDC_MASK_SH_LIST(_MASK)
};
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs[en];
ddc->base.regs = &ddc_data_regs[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs[en];
ddc->base.regs = &ddc_clk_regs[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift;
ddc->masks = &ddc_mask;
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = NULL,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = NULL,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers
};
void dal_hw_factory_dce60_init(
struct hw_factory *factory)
{
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 7;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 2;
factory->number_of_pins[GPIO_ID_GSL] = 4;
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c |
/*
* Copyright 2013-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "hw_translate_dce110.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case mmDC_GPIO_GENERIC_A:
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* HPD */
case mmDC_GPIO_HPD_A:
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* SYNCA */
case mmDC_GPIO_SYNCA_A:
*id = GPIO_ID_SYNC;
switch (mask) {
case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
*en = GPIO_SYNC_HSYNC_A;
return true;
case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
*en = GPIO_SYNC_VSYNC_A;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* mmDC_GPIO_GENLK_MASK */
case mmDC_GPIO_GENLK_A:
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method */
case mmDC_GPIO_DDC1_A:
*en = GPIO_DDC_LINE_DDC1;
return true;
case mmDC_GPIO_DDC2_A:
*en = GPIO_DDC_LINE_DDC2;
return true;
case mmDC_GPIO_DDC3_A:
*en = GPIO_DDC_LINE_DDC3;
return true;
case mmDC_GPIO_DDC4_A:
*en = GPIO_DDC_LINE_DDC4;
return true;
case mmDC_GPIO_DDC5_A:
*en = GPIO_DDC_LINE_DDC5;
return true;
case mmDC_GPIO_DDC6_A:
*en = GPIO_DDC_LINE_DDC6;
return true;
case mmDC_GPIO_DDCVGA_A:
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/* GPIO_I2CPAD */
case mmDC_GPIO_I2CPAD_A:
*en = GPIO_DDC_LINE_I2C_PAD;
return true;
/* Not implemented */
case mmDC_GPIO_PWRSEQ_A:
case mmDC_GPIO_PAD_STRENGTH_1:
case mmDC_GPIO_PAD_STRENGTH_2:
case mmDC_GPIO_DEBUG:
return false;
/* UNEXPECTED */
default:
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = mmDC_GPIO_DDC1_A;
break;
case GPIO_DDC_LINE_DDC2:
info->offset = mmDC_GPIO_DDC2_A;
break;
case GPIO_DDC_LINE_DDC3:
info->offset = mmDC_GPIO_DDC3_A;
break;
case GPIO_DDC_LINE_DDC4:
info->offset = mmDC_GPIO_DDC4_A;
break;
case GPIO_DDC_LINE_DDC5:
info->offset = mmDC_GPIO_DDC5_A;
break;
case GPIO_DDC_LINE_DDC6:
info->offset = mmDC_GPIO_DDC6_A;
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = mmDC_GPIO_DDCVGA_A;
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = mmDC_GPIO_I2CPAD_A;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = mmDC_GPIO_DDC1_A;
break;
case GPIO_DDC_LINE_DDC2:
info->offset = mmDC_GPIO_DDC2_A;
break;
case GPIO_DDC_LINE_DDC3:
info->offset = mmDC_GPIO_DDC3_A;
break;
case GPIO_DDC_LINE_DDC4:
info->offset = mmDC_GPIO_DDC4_A;
break;
case GPIO_DDC_LINE_DDC5:
info->offset = mmDC_GPIO_DDC5_A;
break;
case GPIO_DDC_LINE_DDC6:
info->offset = mmDC_GPIO_DDC6_A;
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = mmDC_GPIO_DDCVGA_A;
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = mmDC_GPIO_I2CPAD_A;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = mmDC_GPIO_GENERIC_A;
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = mmDC_GPIO_HPD_A;
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
switch (en) {
case GPIO_SYNC_HSYNC_A:
info->offset = mmDC_GPIO_SYNCA_A;
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
break;
case GPIO_SYNC_VSYNC_A:
info->offset = mmDC_GPIO_SYNCA_A;
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
break;
case GPIO_SYNC_HSYNC_B:
case GPIO_SYNC_VSYNC_B:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
break;
case GPIO_GSL_GENLOCK_VSYNC:
info->offset = mmDC_GPIO_GENLK_A;
info->mask =
DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_A:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_B:
info->offset = mmDC_GPIO_GENLK_A;
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dce110_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dce110_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c |
/*
* Copyright 2013-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "hw_factory_dce110.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
/* set field name */
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define REG(reg_name)\
mm ## reg_name
#define REGI(reg_name, block, id)\
mm ## block ## id ## _ ## reg_name
#include "reg_helper.h"
#include "../hpd_regs.h"
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5)
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs[] = {
ddc_data_regs(1),
ddc_data_regs(2),
ddc_data_regs(3),
ddc_data_regs(4),
ddc_data_regs(5),
ddc_data_regs(6),
ddc_vga_data_regs,
ddc_i2c_data_regs
};
static const struct ddc_registers ddc_clk_regs[] = {
ddc_clk_regs(1),
ddc_clk_regs(2),
ddc_clk_regs(3),
ddc_clk_regs(4),
ddc_clk_regs(5),
ddc_clk_regs(6),
ddc_vga_clk_regs,
ddc_i2c_clk_regs
};
static const struct ddc_sh_mask ddc_shift = {
DDC_MASK_SH_LIST(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask = {
DDC_MASK_SH_LIST(_MASK)
};
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs[en];
ddc->base.regs = &ddc_data_regs[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs[en];
ddc->base.regs = &ddc_clk_regs[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift;
ddc->masks = &ddc_mask;
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = NULL,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = NULL,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers
};
/*
* dal_hw_factory_dce110_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dce110_init(struct hw_factory *factory)
{
/*TODO check ASIC CAPs*/
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 7;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 2;
factory->number_of_pins[GPIO_ID_GSL] = 4;
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c |
/*
* Copyright 2013-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "hw_factory_dce120.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#define block HPD
#define reg_num 0
/* set field name */
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
/* set field name */
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define REGI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#include "reg_helper.h"
#include "../hpd_regs.h"
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5)
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs[] = {
ddc_data_regs(1),
ddc_data_regs(2),
ddc_data_regs(3),
ddc_data_regs(4),
ddc_data_regs(5),
ddc_data_regs(6),
ddc_vga_data_regs,
ddc_i2c_data_regs
};
static const struct ddc_registers ddc_clk_regs[] = {
ddc_clk_regs(1),
ddc_clk_regs(2),
ddc_clk_regs(3),
ddc_clk_regs(4),
ddc_clk_regs(5),
ddc_clk_regs(6),
ddc_vga_clk_regs,
ddc_i2c_clk_regs
};
static const struct ddc_sh_mask ddc_shift = {
DDC_MASK_SH_LIST(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask = {
DDC_MASK_SH_LIST(_MASK)
};
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs[en];
ddc->base.regs = &ddc_data_regs[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs[en];
ddc->base.regs = &ddc_clk_regs[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift;
ddc->masks = &ddc_mask;
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = NULL,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = NULL,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers
};
/*
* dal_hw_factory_dce120_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dce120_init(struct hw_factory *factory)
{
/*TODO check ASIC CAPs*/
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 7;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 2;
factory->number_of_pins[GPIO_ID_GSL] = 4;
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c |
/*
* Copyright 2013-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "hw_translate_dce120.h"
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define REGI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
/* macros to expend register list macro defined in HW object header file
* end *********************/
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* HPD */
case REG(DC_GPIO_HPD_A):
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* SYNCA */
case REG(DC_GPIO_SYNCA_A):
*id = GPIO_ID_SYNC;
switch (mask) {
case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
*en = GPIO_SYNC_HSYNC_A;
return true;
case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
*en = GPIO_SYNC_VSYNC_A;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* REG(DC_GPIO_GENLK_MASK */
case REG(DC_GPIO_GENLK_A):
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method */
case REG(DC_GPIO_DDC1_A):
*en = GPIO_DDC_LINE_DDC1;
return true;
case REG(DC_GPIO_DDC2_A):
*en = GPIO_DDC_LINE_DDC2;
return true;
case REG(DC_GPIO_DDC3_A):
*en = GPIO_DDC_LINE_DDC3;
return true;
case REG(DC_GPIO_DDC4_A):
*en = GPIO_DDC_LINE_DDC4;
return true;
case REG(DC_GPIO_DDC5_A):
*en = GPIO_DDC_LINE_DDC5;
return true;
case REG(DC_GPIO_DDC6_A):
*en = GPIO_DDC_LINE_DDC6;
return true;
case REG(DC_GPIO_DDCVGA_A):
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/* GPIO_I2CPAD */
case REG(DC_GPIO_I2CPAD_A):
*en = GPIO_DDC_LINE_I2C_PAD;
return true;
/* Not implemented */
case REG(DC_GPIO_PWRSEQ_A):
case REG(DC_GPIO_PAD_STRENGTH_1):
case REG(DC_GPIO_PAD_STRENGTH_2):
case REG(DC_GPIO_DEBUG):
return false;
/* UNEXPECTED */
default:
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC6:
info->offset = REG(DC_GPIO_DDC6_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = REG(DC_GPIO_I2CPAD_A);
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC6:
info->offset = REG(DC_GPIO_DDC6_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = REG(DC_GPIO_I2CPAD_A);
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = REG(DC_GPIO_GENERIC_A);
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = REG(DC_GPIO_HPD_A);
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
switch (en) {
case GPIO_SYNC_HSYNC_A:
info->offset = REG(DC_GPIO_SYNCA_A);
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
break;
case GPIO_SYNC_VSYNC_A:
info->offset = REG(DC_GPIO_SYNCA_A);
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
break;
case GPIO_SYNC_HSYNC_B:
case GPIO_SYNC_VSYNC_B:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
info->offset = REG(DC_GPIO_GENLK_A);
info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
break;
case GPIO_GSL_GENLOCK_VSYNC:
info->offset = REG(DC_GPIO_GENLK_A);
info->mask =
DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_A:
info->offset = REG(DC_GPIO_GENLK_A);
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_B:
info->offset = REG(DC_GPIO_GENLK_A);
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dce120_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dce120_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "hw_translate_dcn32.h"
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
#define DCN_BASE__INST0_SEG2 0x000034C0
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#undef REG
#define REG(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* HPD */
case REG(DC_GPIO_HPD_A):
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* REG(DC_GPIO_GENLK_MASK */
case REG(DC_GPIO_GENLK_A):
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method */
case REG(DC_GPIO_DDC1_A):
*en = GPIO_DDC_LINE_DDC1;
return true;
case REG(DC_GPIO_DDC2_A):
*en = GPIO_DDC_LINE_DDC2;
return true;
case REG(DC_GPIO_DDC3_A):
*en = GPIO_DDC_LINE_DDC3;
return true;
case REG(DC_GPIO_DDC4_A):
*en = GPIO_DDC_LINE_DDC4;
return true;
case REG(DC_GPIO_DDC5_A):
*en = GPIO_DDC_LINE_DDC5;
return true;
case REG(DC_GPIO_DDCVGA_A):
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = REG(DC_GPIO_GENERIC_A);
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = REG(DC_GPIO_HPD_A);
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_GENLOCK_VSYNC:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_A:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_B:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dcn32_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dcn32_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_translate_dcn32.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "hw_factory_dcn32.h"
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
#include "reg_helper.h"
#include "../hpd_regs.h"
#define DCN_BASE__INST0_SEG2 0x000034C0
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define REG(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
#define REGI(reg_name, block, id)\
BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(1),
ddc_data_regs_dcn2(2),
ddc_data_regs_dcn2(3),
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
{
// add a dummy entry for cases no such port
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
},
{
DDC_GPIO_VGA_REG_LIST(DATA),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(1),
ddc_clk_regs_dcn2(2),
ddc_clk_regs_dcn2(3),
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
{
// add a dummy entry for cases no such port
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
},
{
DDC_GPIO_VGA_REG_LIST(CLK),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 1),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 2),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 1),
DDC_MASK_SH_LIST_DCN2(_MASK, 2),
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
DDC_MASK_SH_LIST_DCN2(_MASK, 6),
DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
/* set field name */
#define SF_GENERIC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define generic_regs(id) \
{\
GENERIC_REG_LIST(id)\
}
static const struct generic_registers generic_regs[] = {
generic_regs(A),
generic_regs(B),
};
static const struct generic_sh_mask generic_shift[] = {
GENERIC_MASK_SH_LIST(__SHIFT, A),
GENERIC_MASK_SH_LIST(__SHIFT, B),
};
static const struct generic_sh_mask generic_mask[] = {
GENERIC_MASK_SH_LIST(_MASK, A),
GENERIC_MASK_SH_LIST(_MASK, B),
};
static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
generic->regs = &generic_regs[en];
generic->shifts = &generic_shift[en];
generic->masks = &generic_mask[en];
generic->base.regs = &generic_regs[en].gpio;
}
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs_dcn[en];
ddc->base.regs = &ddc_data_regs_dcn[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs_dcn[en];
ddc->base.regs = &ddc_clk_regs_dcn[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift[en];
ddc->masks = &ddc_mask[en];
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
/* fucntion table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers,
.define_generic_registers = define_generic_registers
};
/*
* dal_hw_factory_dcn32_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dcn32_init(struct hw_factory *factory)
{
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 4;
factory->number_of_pins[GPIO_ID_HPD] = 5;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 0;
factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c |
/*
* Copyright 2013-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "hw_factory_dcn20.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
#include "navi10_ip_offset.h"
#include "reg_helper.h"
#include "../hpd_regs.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
#define REGI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5),
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(1),
ddc_data_regs_dcn2(2),
ddc_data_regs_dcn2(3),
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
ddc_data_regs_dcn2(6),
{
DDC_GPIO_VGA_REG_LIST(DATA),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(1),
ddc_clk_regs_dcn2(2),
ddc_clk_regs_dcn2(3),
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
ddc_clk_regs_dcn2(6),
{
DDC_GPIO_VGA_REG_LIST(CLK),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 1),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 2),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 1),
DDC_MASK_SH_LIST_DCN2(_MASK, 2),
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
DDC_MASK_SH_LIST_DCN2(_MASK, 6),
DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
/* set field name */
#define SF_GENERIC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define generic_regs(id) \
{\
GENERIC_REG_LIST(id)\
}
static const struct generic_registers generic_regs[] = {
generic_regs(A),
generic_regs(B),
};
static const struct generic_sh_mask generic_shift[] = {
GENERIC_MASK_SH_LIST(__SHIFT, A),
GENERIC_MASK_SH_LIST(__SHIFT, B),
};
static const struct generic_sh_mask generic_mask[] = {
GENERIC_MASK_SH_LIST(_MASK, A),
GENERIC_MASK_SH_LIST(_MASK, B),
};
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs_dcn[en];
ddc->base.regs = &ddc_data_regs_dcn[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs_dcn[en];
ddc->base.regs = &ddc_clk_regs_dcn[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift[en];
ddc->masks = &ddc_mask[en];
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
generic->regs = &generic_regs[en];
generic->shifts = &generic_shift[en];
generic->masks = &generic_mask[en];
generic->base.regs = &generic_regs[en].gpio;
}
/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers,
.define_generic_registers = define_generic_registers,
};
/*
* dal_hw_factory_dcn10_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dcn20_init(struct hw_factory *factory)
{
/*TODO check ASIC CAPs*/
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 4;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 0;
factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "hw_translate_dcn20.h"
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#undef REG
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* HPD */
case REG(DC_GPIO_HPD_A):
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* REG(DC_GPIO_GENLK_MASK */
case REG(DC_GPIO_GENLK_A):
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method
*/
case REG(DC_GPIO_DDC1_A):
*en = GPIO_DDC_LINE_DDC1;
return true;
case REG(DC_GPIO_DDC2_A):
*en = GPIO_DDC_LINE_DDC2;
return true;
case REG(DC_GPIO_DDC3_A):
*en = GPIO_DDC_LINE_DDC3;
return true;
case REG(DC_GPIO_DDC4_A):
*en = GPIO_DDC_LINE_DDC4;
return true;
case REG(DC_GPIO_DDC5_A):
*en = GPIO_DDC_LINE_DDC5;
return true;
case REG(DC_GPIO_DDC6_A):
*en = GPIO_DDC_LINE_DDC6;
return true;
case REG(DC_GPIO_DDCVGA_A):
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/*
* case REG(DC_GPIO_I2CPAD_A): not exit
* case REG(DC_GPIO_PWRSEQ_A):
* case REG(DC_GPIO_PAD_STRENGTH_1):
* case REG(DC_GPIO_PAD_STRENGTH_2):
* case REG(DC_GPIO_DEBUG):
*/
/* UNEXPECTED */
default:
/* case REG(DC_GPIO_SYNCA_A): not exist */
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC6:
info->offset = REG(DC_GPIO_DDC6_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC6:
info->offset = REG(DC_GPIO_DDC6_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = REG(DC_GPIO_GENERIC_A);
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = REG(DC_GPIO_HPD_A);
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_GENLOCK_VSYNC:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_A:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_B:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dcn10_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dcn20_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c |
/*
* Copyright 2013-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "hw_translate_dcn10.h"
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define REGI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
/* macros to expend register list macro defined in HW object header file
* end *********************/
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* HPD */
case REG(DC_GPIO_HPD_A):
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* SYNCA */
case REG(DC_GPIO_SYNCA_A):
*id = GPIO_ID_SYNC;
switch (mask) {
case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
*en = GPIO_SYNC_HSYNC_A;
return true;
case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
*en = GPIO_SYNC_VSYNC_A;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* REG(DC_GPIO_GENLK_MASK */
case REG(DC_GPIO_GENLK_A):
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method */
case REG(DC_GPIO_DDC1_A):
*en = GPIO_DDC_LINE_DDC1;
return true;
case REG(DC_GPIO_DDC2_A):
*en = GPIO_DDC_LINE_DDC2;
return true;
case REG(DC_GPIO_DDC3_A):
*en = GPIO_DDC_LINE_DDC3;
return true;
case REG(DC_GPIO_DDC4_A):
*en = GPIO_DDC_LINE_DDC4;
return true;
case REG(DC_GPIO_DDC5_A):
*en = GPIO_DDC_LINE_DDC5;
return true;
case REG(DC_GPIO_DDC6_A):
*en = GPIO_DDC_LINE_DDC6;
return true;
case REG(DC_GPIO_DDCVGA_A):
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/* GPIO_I2CPAD */
case REG(DC_GPIO_I2CPAD_A):
*en = GPIO_DDC_LINE_I2C_PAD;
return true;
/* Not implemented */
case REG(DC_GPIO_PWRSEQ_A):
case REG(DC_GPIO_PAD_STRENGTH_1):
case REG(DC_GPIO_PAD_STRENGTH_2):
case REG(DC_GPIO_DEBUG):
return false;
/* UNEXPECTED */
default:
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC6:
info->offset = REG(DC_GPIO_DDC6_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = REG(DC_GPIO_I2CPAD_A);
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC6:
info->offset = REG(DC_GPIO_DDC6_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
info->offset = REG(DC_GPIO_I2CPAD_A);
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = REG(DC_GPIO_GENERIC_A);
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = REG(DC_GPIO_HPD_A);
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
switch (en) {
case GPIO_SYNC_HSYNC_A:
info->offset = REG(DC_GPIO_SYNCA_A);
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
break;
case GPIO_SYNC_VSYNC_A:
info->offset = REG(DC_GPIO_SYNCA_A);
info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
break;
case GPIO_SYNC_HSYNC_B:
case GPIO_SYNC_VSYNC_B:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
info->offset = REG(DC_GPIO_GENLK_A);
info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
break;
case GPIO_GSL_GENLOCK_VSYNC:
info->offset = REG(DC_GPIO_GENLK_A);
info->mask =
DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_A:
info->offset = REG(DC_GPIO_GENLK_A);
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
break;
case GPIO_GSL_SWAPLOCK_B:
info->offset = REG(DC_GPIO_GENLK_A);
info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dcn10_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dcn10_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c |
/*
* Copyright 2013-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "hw_factory_dcn10.h"
#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#define block HPD
#define reg_num 0
/* set field name */
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
/* compile time expand base address. */
#define BASE(seg) \
BASE_INNER(seg)
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define REGI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#include "reg_helper.h"
#include "../hpd_regs.h"
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5)
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs[] = {
ddc_data_regs(1),
ddc_data_regs(2),
ddc_data_regs(3),
ddc_data_regs(4),
ddc_data_regs(5),
ddc_data_regs(6),
ddc_vga_data_regs,
ddc_i2c_data_regs
};
static const struct ddc_registers ddc_clk_regs[] = {
ddc_clk_regs(1),
ddc_clk_regs(2),
ddc_clk_regs(3),
ddc_clk_regs(4),
ddc_clk_regs(5),
ddc_clk_regs(6),
ddc_vga_clk_regs,
ddc_i2c_clk_regs
};
static const struct ddc_sh_mask ddc_shift = {
DDC_MASK_SH_LIST(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask = {
DDC_MASK_SH_LIST(_MASK)
};
#include "../generic_regs.h"
/* set field name */
#define SF_GENERIC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define generic_regs(id) \
{\
GENERIC_REG_LIST(id)\
}
static const struct generic_registers generic_regs[] = {
generic_regs(A),
generic_regs(B),
};
static const struct generic_sh_mask generic_shift[] = {
GENERIC_MASK_SH_LIST(__SHIFT, A),
GENERIC_MASK_SH_LIST(__SHIFT, B),
};
static const struct generic_sh_mask generic_mask[] = {
GENERIC_MASK_SH_LIST(_MASK, A),
GENERIC_MASK_SH_LIST(_MASK, B),
};
static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
generic->regs = &generic_regs[en];
generic->shifts = &generic_shift[en];
generic->masks = &generic_mask[en];
generic->base.regs = &generic_regs[en].gpio;
}
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs[en];
ddc->base.regs = &ddc_data_regs[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs[en];
ddc->base.regs = &ddc_clk_regs[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift;
ddc->masks = &ddc_mask;
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers,
.define_generic_registers = define_generic_registers
};
/*
* dal_hw_factory_dcn10_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dcn10_init(struct hw_factory *factory)
{
/*TODO check ASIC CAPs*/
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 7;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 2;
factory->number_of_pins[GPIO_ID_GSL] = 4;
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/*
* Pre-requisites: headers required by header of this unit
*/
#include "hw_translate_dcn21.h"
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "dcn/dcn_2_1_0_offset.h"
#include "dcn/dcn_2_1_0_sh_mask.h"
#include "renoir_ip_offset.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#undef REG
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
ASSERT_CRITICAL(false);
#ifdef PALLADIUM_SUPPORTED
*en = GPIO_DDC_LINE_DDC1;
return true;
#endif
return false;
}
break;
/* HPD */
case REG(DC_GPIO_HPD_A):
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* REG(DC_GPIO_GENLK_MASK */
case REG(DC_GPIO_GENLK_A):
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method
*/
case REG(DC_GPIO_DDC1_A):
*en = GPIO_DDC_LINE_DDC1;
return true;
case REG(DC_GPIO_DDC2_A):
*en = GPIO_DDC_LINE_DDC2;
return true;
case REG(DC_GPIO_DDC3_A):
*en = GPIO_DDC_LINE_DDC3;
return true;
case REG(DC_GPIO_DDC4_A):
*en = GPIO_DDC_LINE_DDC4;
return true;
case REG(DC_GPIO_DDC5_A):
*en = GPIO_DDC_LINE_DDC5;
return true;
case REG(DC_GPIO_DDCVGA_A):
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/*
* case REG(DC_GPIO_I2CPAD_A): not exit
* case REG(DC_GPIO_PWRSEQ_A):
* case REG(DC_GPIO_PAD_STRENGTH_1):
* case REG(DC_GPIO_PAD_STRENGTH_2):
* case REG(DC_GPIO_DEBUG):
*/
/* UNEXPECTED */
default:
/* case REG(DC_GPIO_SYNCA_A): not exista */
#ifdef PALLADIUM_SUPPORTED
*id = GPIO_ID_HPD;
*en = GPIO_DDC_LINE_DDC1;
return true;
#endif
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = REG(DC_GPIO_GENERIC_A);
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = REG(DC_GPIO_HPD_A);
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
#ifdef PALLADIUM_SUPPORTED
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
result = true;
#endif
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_GENLOCK_VSYNC:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_A:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_B:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dcn10_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dcn21_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c |
/*
* Copyright 2013-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "hw_factory_dcn21.h"
#include "dcn/dcn_2_1_0_offset.h"
#include "dcn/dcn_2_1_0_sh_mask.h"
#include "renoir_ip_offset.h"
#include "reg_helper.h"
#include "../hpd_regs.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define REG(reg_name)\
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
#define REGI(reg_name, block, id)\
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(1),
ddc_data_regs_dcn2(2),
ddc_data_regs_dcn2(3),
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(1),
ddc_clk_regs_dcn2(2),
ddc_clk_regs_dcn2(3),
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
};
static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 1),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 2),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
};
static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 1),
DDC_MASK_SH_LIST_DCN2(_MASK, 2),
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
DDC_MASK_SH_LIST_DCN2(_MASK, 6)
};
#include "../generic_regs.h"
/* set field name */
#define SF_GENERIC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define generic_regs(id) \
{\
GENERIC_REG_LIST(id)\
}
static const struct generic_registers generic_regs[] = {
generic_regs(A),
};
static const struct generic_sh_mask generic_shift[] = {
GENERIC_MASK_SH_LIST(__SHIFT, A),
};
static const struct generic_sh_mask generic_mask[] = {
GENERIC_MASK_SH_LIST(_MASK, A),
};
static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
generic->regs = &generic_regs[en];
generic->shifts = &generic_shift[en];
generic->masks = &generic_mask[en];
generic->base.regs = &generic_regs[en].gpio;
}
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs_dcn[en];
ddc->base.regs = &ddc_data_regs_dcn[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs_dcn[en];
ddc->base.regs = &ddc_clk_regs_dcn[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift[en];
ddc->masks = &ddc_mask[en];
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers,
.define_generic_registers = define_generic_registers
};
/*
* dal_hw_factory_dcn10_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dcn21_init(struct hw_factory *factory)
{
/*TODO check ASIC CAPs*/
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 4;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 0;
factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
#include "../hw_gpio.h"
#include "../hw_ddc.h"
#include "../hw_hpd.h"
#include "../hw_generic.h"
#include "hw_factory_dcn315.h"
#include "dcn/dcn_3_1_5_offset.h"
#include "dcn/dcn_3_1_5_sh_mask.h"
#include "reg_helper.h"
#include "../hpd_regs.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define REG(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
#define REGI(reg_name, block, id)\
BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
#define hpd_regs(id) \
{\
HPD_REG_LIST(id)\
}
static const struct hpd_registers hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
};
static const struct hpd_sh_mask hpd_shift = {
HPD_MASK_SH_LIST(__SHIFT)
};
static const struct hpd_sh_mask hpd_mask = {
HPD_MASK_SH_LIST(_MASK)
};
#include "../ddc_regs.h"
/* set field name */
#define SF_DDC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(1),
ddc_data_regs_dcn2(2),
ddc_data_regs_dcn2(3),
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
{
DDC_GPIO_VGA_REG_LIST(DATA),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(1),
ddc_clk_regs_dcn2(2),
ddc_clk_regs_dcn2(3),
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
{
DDC_GPIO_VGA_REG_LIST(CLK),
.ddc_setup = 0,
.phy_aux_cntl = 0,
.dc_gpio_aux_ctrl_5 = 0
}
};
static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 1),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 2),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
};
static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 1),
DDC_MASK_SH_LIST_DCN2(_MASK, 2),
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
DDC_MASK_SH_LIST_DCN2(_MASK, 6)
};
#include "../generic_regs.h"
/* set field name */
#define SF_GENERIC(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define generic_regs(id) \
{\
GENERIC_REG_LIST(id)\
}
static const struct generic_registers generic_regs[] = {
generic_regs(A),
generic_regs(B),
};
static const struct generic_sh_mask generic_shift[] = {
GENERIC_MASK_SH_LIST(__SHIFT, A),
GENERIC_MASK_SH_LIST(__SHIFT, B),
};
static const struct generic_sh_mask generic_mask[] = {
GENERIC_MASK_SH_LIST(_MASK, A),
GENERIC_MASK_SH_LIST(_MASK, B),
};
static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
generic->regs = &generic_regs[en];
generic->shifts = &generic_shift[en];
generic->masks = &generic_mask[en];
generic->base.regs = &generic_regs[en].gpio;
}
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
{
struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
switch (pin->id) {
case GPIO_ID_DDC_DATA:
ddc->regs = &ddc_data_regs_dcn[en];
ddc->base.regs = &ddc_data_regs_dcn[en].gpio;
break;
case GPIO_ID_DDC_CLOCK:
ddc->regs = &ddc_clk_regs_dcn[en];
ddc->base.regs = &ddc_clk_regs_dcn[en].gpio;
break;
default:
ASSERT_CRITICAL(false);
return;
}
ddc->shifts = &ddc_shift[en];
ddc->masks = &ddc_mask[en];
}
static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
hpd->regs = &hpd_regs[en];
hpd->shifts = &hpd_shift;
hpd->masks = &hpd_mask;
hpd->base.regs = &hpd_regs[en].gpio;
}
/* fucntion table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
.init_hpd = dal_hw_hpd_init,
.get_ddc_pin = dal_hw_ddc_get_pin,
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
.define_ddc_registers = define_ddc_registers,
.define_generic_registers = define_generic_registers
};
/*
* dal_hw_factory_dcn10_init
*
* @brief
* Initialize HW factory function pointers and pin info
*
* @param
* struct hw_factory *factory - [out] struct of function pointers
*/
void dal_hw_factory_dcn315_init(struct hw_factory *factory)
{
/*TODO check ASIC CAPs*/
factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
factory->number_of_pins[GPIO_ID_GENERIC] = 4;
factory->number_of_pins[GPIO_ID_HPD] = 6;
factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28;
factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
factory->number_of_pins[GPIO_ID_SYNC] = 0;
factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/
factory->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "hw_translate_dcn315.h"
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
#include "dcn/dcn_3_1_5_offset.h"
#include "dcn/dcn_3_1_5_sh_mask.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
/* DCN */
#define block HPD
#define reg_num 0
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#undef REG
#define REG(reg_name)\
BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
/* macros to expend register list macro defined in HW object header file
* end *********************/
static bool offset_to_id(
uint32_t offset,
uint32_t mask,
enum gpio_id *id,
uint32_t *en)
{
switch (offset) {
/* GENERIC */
case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
*en = GPIO_GENERIC_A;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
*en = GPIO_GENERIC_B;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
*en = GPIO_GENERIC_C;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
*en = GPIO_GENERIC_D;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
*en = GPIO_GENERIC_E;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
*en = GPIO_GENERIC_F;
return true;
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
*en = GPIO_GENERIC_G;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* HPD */
case REG(DC_GPIO_HPD_A):
*id = GPIO_ID_HPD;
switch (mask) {
case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
*en = GPIO_HPD_1;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
*en = GPIO_HPD_2;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
*en = GPIO_HPD_3;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
*en = GPIO_HPD_4;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
*en = GPIO_HPD_5;
return true;
case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
*en = GPIO_HPD_6;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* REG(DC_GPIO_GENLK_MASK */
case REG(DC_GPIO_GENLK_A):
*id = GPIO_ID_GSL;
switch (mask) {
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
*en = GPIO_GSL_GENLOCK_CLOCK;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
*en = GPIO_GSL_GENLOCK_VSYNC;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
*en = GPIO_GSL_SWAPLOCK_A;
return true;
case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
*en = GPIO_GSL_SWAPLOCK_B;
return true;
default:
ASSERT_CRITICAL(false);
return false;
}
break;
/* DDC */
/* we don't care about the GPIO_ID for DDC
* in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
* directly in the create method
*/
case REG(DC_GPIO_DDC1_A):
*en = GPIO_DDC_LINE_DDC1;
return true;
case REG(DC_GPIO_DDC2_A):
*en = GPIO_DDC_LINE_DDC2;
return true;
case REG(DC_GPIO_DDC3_A):
*en = GPIO_DDC_LINE_DDC3;
return true;
case REG(DC_GPIO_DDC4_A):
*en = GPIO_DDC_LINE_DDC4;
return true;
case REG(DC_GPIO_DDC5_A):
*en = GPIO_DDC_LINE_DDC5;
return true;
case REG(DC_GPIO_DDCVGA_A):
*en = GPIO_DDC_LINE_DDC_VGA;
return true;
/*
* case REG(DC_GPIO_I2CPAD_A): not exit
* case REG(DC_GPIO_PWRSEQ_A):
* case REG(DC_GPIO_PAD_STRENGTH_1):
* case REG(DC_GPIO_PAD_STRENGTH_2):
* case REG(DC_GPIO_DEBUG):
*/
/* UNEXPECTED */
default:
/* case REG(DC_GPIO_SYNCA_A): not exist */
ASSERT_CRITICAL(false);
return false;
}
}
static bool id_to_offset(
enum gpio_id id,
uint32_t en,
struct gpio_pin_info *info)
{
bool result = true;
switch (id) {
case GPIO_ID_DDC_DATA:
info->mask = DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_DDC_CLOCK:
info->mask = DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK;
switch (en) {
case GPIO_DDC_LINE_DDC1:
info->offset = REG(DC_GPIO_DDC1_A);
break;
case GPIO_DDC_LINE_DDC2:
info->offset = REG(DC_GPIO_DDC2_A);
break;
case GPIO_DDC_LINE_DDC3:
info->offset = REG(DC_GPIO_DDC3_A);
break;
case GPIO_DDC_LINE_DDC4:
info->offset = REG(DC_GPIO_DDC4_A);
break;
case GPIO_DDC_LINE_DDC5:
info->offset = REG(DC_GPIO_DDC5_A);
break;
case GPIO_DDC_LINE_DDC_VGA:
info->offset = REG(DC_GPIO_DDCVGA_A);
break;
case GPIO_DDC_LINE_I2C_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GENERIC:
info->offset = REG(DC_GPIO_GENERIC_A);
switch (en) {
case GPIO_GENERIC_A:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
break;
case GPIO_GENERIC_B:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
break;
case GPIO_GENERIC_C:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
break;
case GPIO_GENERIC_D:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
break;
case GPIO_GENERIC_E:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
break;
case GPIO_GENERIC_F:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
break;
case GPIO_GENERIC_G:
info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_HPD:
info->offset = REG(DC_GPIO_HPD_A);
switch (en) {
case GPIO_HPD_1:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
break;
case GPIO_HPD_2:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
break;
case GPIO_HPD_3:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
break;
case GPIO_HPD_4:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
break;
case GPIO_HPD_5:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
break;
case GPIO_HPD_6:
info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_GSL:
switch (en) {
case GPIO_GSL_GENLOCK_CLOCK:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_GENLOCK_VSYNC:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_A:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
case GPIO_GSL_SWAPLOCK_B:
/*not implmented*/
ASSERT_CRITICAL(false);
result = false;
break;
default:
ASSERT_CRITICAL(false);
result = false;
}
break;
case GPIO_ID_SYNC:
case GPIO_ID_VIP_PAD:
default:
ASSERT_CRITICAL(false);
result = false;
}
if (result) {
info->offset_y = info->offset + 2;
info->offset_en = info->offset + 1;
info->offset_mask = info->offset - 1;
info->mask_y = info->mask;
info->mask_en = info->mask;
info->mask_mask = info->mask;
}
return result;
}
/* function table */
static const struct hw_translate_funcs funcs = {
.offset_to_id = offset_to_id,
.id_to_offset = id_to_offset,
};
/*
* dal_hw_translate_dcn30_init
*
* @brief
* Initialize Hw translate function pointers.
*
* @param
* struct hw_translate *tr - [out] struct of function pointers
*
*/
void dal_hw_translate_dcn315_init(struct hw_translate *tr)
{
tr->funcs = &funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc.h"
#include "dcn31/dcn31_init.h"
#include "dcn314/dcn314_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn314_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
#include "dcn31/dcn31_resource.h"
#include "dcn10/dcn10_ipp.h"
#include "dcn30/dcn30_hubbub.h"
#include "dcn31/dcn31_hubbub.h"
#include "dcn30/dcn30_mpc.h"
#include "dcn31/dcn31_hubp.h"
#include "irq/dcn31/irq_service_dcn31.h"
#include "irq/dcn314/irq_service_dcn314.h"
#include "dcn30/dcn30_dpp.h"
#include "dcn314/dcn314_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn30/dcn30_opp.h"
#include "dcn20/dcn20_dsc.h"
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn314/dcn314_dio_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_vpg.h"
#include "dcn31/dcn31_afmt.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "clk_mgr.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/dcn314/dcn314_fpu.h"
#include "dcn314/dcn314_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn314/dcn314_hwseq.h"
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
#include "dcn/dcn_3_1_4_offset.h"
#include "dcn/dcn_3_1_4_sh_mask.h"
#include "dpcs/dpcs_3_1_4_offset.h"
#include "dpcs/dpcs_3_1_4_sh_mask.h"
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
#include "dce/dmub_replay.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn314/display_mode_vba_314.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "link_enc_cfg.h"
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define NBIO_BASE__INST0_SEG1 0x00000014
#define MAX_INSTANCE 7
#define MAX_SEGMENT 8
#define regBIF_BX2_BIOS_SCRATCH_2 0x003a
#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 1
#define regBIF_BX2_BIOS_SCRATCH_3 0x003b
#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 1
#define regBIF_BX2_BIOS_SCRATCH_6 0x003e
#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1
#define DC_LOGGER_INIT(logger)
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
DCN31_CLK_SRC_PLL2,
DCN31_CLK_SRC_PLL3,
DCN31_CLK_SRC_PLL4,
DCN30_CLK_SRC_TOTAL
};
/* begin *********************
* macros to expend register list macro defined in HW object header file
*/
/* DCN */
/* TODO awful hack. fixup dcn20_dwb.h */
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## temp_name
#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
.field_name = reg_name ## __ ## field_name ## post_fix
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
reg ## reg_name ## _ ## block ## id
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
regBIF_BX2_ ## reg_name
/* MMHUB */
#define MMHUB_BASE_INNER(seg) \
MMHUB_BASE__INST0_SEG ## seg
#define MMHUB_BASE(seg) \
MMHUB_BASE_INNER(seg)
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
/* CLOCK */
#define CLK_BASE_INNER(seg) \
CLK_BASE__INST0_SEG ## seg
#define CLK_BASE(seg) \
CLK_BASE_INNER(seg)
#define CLK_SRI(reg_name, block, inst)\
.reg_name = CLK_BASE(reg ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## _ ## inst ## _ ## reg_name
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
}
static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(0, A),
clk_src_regs(1, B),
clk_src_regs(2, C),
clk_src_regs(3, D),
clk_src_regs(4, E)
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT)
};
static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK)
};
#define abm_regs(id)\
[id] = {\
ABM_DCN302_REG_LIST(id)\
}
static const struct dce_abm_registers abm_regs[] = {
abm_regs(0),
abm_regs(1),
abm_regs(2),
abm_regs(3),
};
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN30(_MASK)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
}
static const struct dce_audio_registers audio_regs[] = {
audio_regs(0),
audio_regs(1),
audio_regs(2),
audio_regs(3),
audio_regs(4),
audio_regs(5),
audio_regs(6)
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
#define vpg_regs(id)\
[id] = {\
VPG_DCN31_REG_LIST(id)\
}
static const struct dcn31_vpg_registers vpg_regs[] = {
vpg_regs(0),
vpg_regs(1),
vpg_regs(2),
vpg_regs(3),
vpg_regs(4),
vpg_regs(5),
vpg_regs(6),
vpg_regs(7),
vpg_regs(8),
vpg_regs(9),
};
static const struct dcn31_vpg_shift vpg_shift = {
DCN31_VPG_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_vpg_mask vpg_mask = {
DCN31_VPG_MASK_SH_LIST(_MASK)
};
#define afmt_regs(id)\
[id] = {\
AFMT_DCN31_REG_LIST(id)\
}
static const struct dcn31_afmt_registers afmt_regs[] = {
afmt_regs(0),
afmt_regs(1),
afmt_regs(2),
afmt_regs(3),
afmt_regs(4),
afmt_regs(5)
};
static const struct dcn31_afmt_shift afmt_shift = {
DCN31_AFMT_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_afmt_mask afmt_mask = {
DCN31_AFMT_MASK_SH_LIST(_MASK)
};
#define apg_regs(id)\
[id] = {\
APG_DCN31_REG_LIST(id)\
}
static const struct dcn31_apg_registers apg_regs[] = {
apg_regs(0),
apg_regs(1),
apg_regs(2),
apg_regs(3)
};
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
#define stream_enc_regs(id)\
[id] = {\
SE_DCN314_REG_LIST(id)\
}
static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
stream_enc_regs(3),
stream_enc_regs(4)
};
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN314(__SHIFT)
};
static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN314(_MASK)
};
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
}
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
aux_regs(2),
aux_regs(3),
aux_regs(4)
};
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
}
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4)
};
#define link_regs(id, phyid)\
[id] = {\
LE_DCN31_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
}
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
};
static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0, A),
link_regs(1, B),
link_regs(2, C),
link_regs(3, D),
link_regs(4, E)
};
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT),
DPCS_DCN31_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK),
DPCS_DCN31_MASK_SH_LIST(_MASK)
};
#define hpo_dp_stream_encoder_reg_list(id)\
[id] = {\
DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
}
static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
hpo_dp_stream_encoder_reg_list(3)
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
};
#define hpo_dp_link_encoder_reg_list(id)\
[id] = {\
DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
DCN3_1_RDPCSTX_REG_LIST(0),\
DCN3_1_RDPCSTX_REG_LIST(1),\
DCN3_1_RDPCSTX_REG_LIST(2),\
}
static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
hpo_dp_link_encoder_reg_list(0),
hpo_dp_link_encoder_reg_list(1),
};
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
};
static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
#define dpp_regs(id)\
[id] = {\
DPP_REG_LIST_DCN30(id),\
}
static const struct dcn3_dpp_registers dpp_regs[] = {
dpp_regs(0),
dpp_regs(1),
dpp_regs(2),
dpp_regs(3)
};
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30(__SHIFT)
};
static const struct dcn3_dpp_mask tf_mask = {
DPP_REG_LIST_SH_MASK_DCN30(_MASK)
};
#define opp_regs(id)\
[id] = {\
OPP_REG_LIST_DCN30(id),\
}
static const struct dcn20_opp_registers opp_regs[] = {
opp_regs(0),
opp_regs(1),
opp_regs(2),
opp_regs(3)
};
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
};
static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST0(id), \
.AUXN_IMPCAL = 0, \
.AUXP_IMPCAL = 0, \
.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4)
};
#define dwbc_regs_dcn3(id)\
[id] = {\
DWBC_COMMON_REG_LIST_DCN30(id),\
}
static const struct dcn30_dwbc_registers dwbc30_regs[] = {
dwbc_regs_dcn3(0),
};
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define mcif_wb_regs_dcn3(id)\
[id] = {\
MCIF_WB_COMMON_REG_LIST_DCN30(id),\
}
static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
mcif_wb_regs_dcn3(0)
};
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define dsc_regsDCN314(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
dsc_regsDCN314(0),
dsc_regsDCN314(1),
dsc_regsDCN314(2),
dsc_regsDCN314(3)
};
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
MPC_REG_LIST_DCN3_0(0),
MPC_REG_LIST_DCN3_0(1),
MPC_REG_LIST_DCN3_0(2),
MPC_REG_LIST_DCN3_0(3),
MPC_OUT_MUX_REG_LIST_DCN3_0(0),
MPC_OUT_MUX_REG_LIST_DCN3_0(1),
MPC_OUT_MUX_REG_LIST_DCN3_0(2),
MPC_OUT_MUX_REG_LIST_DCN3_0(3),
MPC_RMU_GLOBAL_REG_LIST_DCN3AG,
MPC_RMU_REG_LIST_DCN3AG(0),
MPC_RMU_REG_LIST_DCN3AG(1),
//MPC_RMU_REG_LIST_DCN3AG(2),
MPC_DWB_MUX_REG_LIST_DCN3_0(0),
};
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
#define optc_regs(id)\
[id] = {OPTC_COMMON_REG_LIST_DCN3_14(id)}
static const struct dcn_optc_registers optc_regs[] = {
optc_regs(0),
optc_regs(1),
optc_regs(2),
optc_regs(3)
};
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_14(__SHIFT)
};
static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_14(_MASK)
};
#define hubp_regs(id)\
[id] = {\
HUBP_REG_LIST_DCN30(id)\
}
static const struct dcn_hubp2_registers hubp_regs[] = {
hubp_regs(0),
hubp_regs(1),
hubp_regs(2),
hubp_regs(3)
};
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN31(__SHIFT)
};
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN31(_MASK)
};
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN31(0)
};
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN31(__SHIFT)
};
static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN31(_MASK)
};
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN314()
};
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN314(__SHIFT)
};
static const struct dccg_mask dccg_mask = {
DCCG_MASK_SH_LIST_DCN314(_MASK)
};
#define SRII2(reg_name_pre, reg_name_post, id)\
.reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \
## id ## _ ## reg_name_post ## _BASE_IDX) + \
reg ## reg_name_pre ## id ## _ ## reg_name_post
#define HWSEQ_DCN31_REG_LIST()\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DIO_MEM_PWR_CTRL), \
SR(ODM_MEM_PWR_CTRL3), \
SR(DMU_MEM_PWR_CNTL), \
SR(MMHUBBUB_MEM_PWR_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL2), \
SR(DCFCLK_CNTL),\
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
SRII(PIXEL_RATE_CNTL, OTG, 0), \
SRII(PIXEL_RATE_CNTL, OTG, 1),\
SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\
SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\
SR(MICROSECOND_TIME_BASE_DIV), \
SR(MILLISECOND_TIME_BASE_DIV), \
SR(DISPCLK_FREQ_CHANGE_CNTL), \
SR(RBBMIF_TIMEOUT_DIS), \
SR(RBBMIF_TIMEOUT_DIS_2), \
SR(DCHUBBUB_CRC_CTRL), \
SR(DPP_TOP0_DPP_CRC_CTRL), \
SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
SR(MPC_CRC_CTRL), \
SR(MPC_CRC_RESULT_GB), \
SR(MPC_CRC_RESULT_C), \
SR(MPC_CRC_RESULT_AR), \
SR(DOMAIN0_PG_CONFIG), \
SR(DOMAIN1_PG_CONFIG), \
SR(DOMAIN2_PG_CONFIG), \
SR(DOMAIN3_PG_CONFIG), \
SR(DOMAIN16_PG_CONFIG), \
SR(DOMAIN17_PG_CONFIG), \
SR(DOMAIN18_PG_CONFIG), \
SR(DOMAIN19_PG_CONFIG), \
SR(DOMAIN0_PG_STATUS), \
SR(DOMAIN1_PG_STATUS), \
SR(DOMAIN2_PG_STATUS), \
SR(DOMAIN3_PG_STATUS), \
SR(DOMAIN16_PG_STATUS), \
SR(DOMAIN17_PG_STATUS), \
SR(DOMAIN18_PG_STATUS), \
SR(DOMAIN19_PG_STATUS), \
SR(D1VGA_CONTROL), \
SR(D2VGA_CONTROL), \
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING), \
SR(HPO_TOP_HW_CONTROL)
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN31_REG_LIST()
};
#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)
};
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN31_MASK_SH_LIST(_MASK)
};
#define vmid_regs(id)\
[id] = {\
DCN20_VMID_REG_LIST(id)\
}
static const struct dcn_vmid_registers vmid_regs[] = {
vmid_regs(0),
vmid_regs(1),
vmid_regs(2),
vmid_regs(3),
vmid_regs(4),
vmid_regs(5),
vmid_regs(6),
vmid_regs(7),
vmid_regs(8),
vmid_regs(9),
vmid_regs(10),
vmid_regs(11),
vmid_regs(12),
vmid_regs(13),
vmid_regs(14),
vmid_regs(15)
};
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
};
static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
static const struct resource_caps res_cap_dcn314 = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
.num_audio = 5,
.num_stream_encoder = 5,
.num_dig_link_enc = 5,
.num_hpo_dp_stream_encoder = 4,
.num_hpo_dp_link_encoder = 2,
.num_pll = 5,
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 16,
.num_mpc_3dlut = 2,
.num_dsc = 4,
};
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
.p010 = true,
.ayuv = false,
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 16000
},
// 6:1 downscaling ratio: 1000/6 = 166.666
// 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
64,
64
};
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
.minimum_z8_residency_time = 2000,
.psr_skip_crtc_disable = true,
.replay_skip_crtc_disabled = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_dpp_power_gate = false,
.disable_hubp_power_gate = false,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
.max_downscale_src_width = 4096,/*upto true 4k*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.pstate_enabled = true,
.use_max_lb = true,
.enable_mem_low_power = {
.bits = {
.vga = true,
.i2c = true,
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
.dscl = true,
.cm = true,
.mpc = true,
.optc = true,
.vpg = true,
.afmt = true,
}
},
.root_clock_optimization = {
.bits = {
.dpp = true,
.dsc = true,
.hdmistream = true,
.hdmichar = true,
.dpstream = true,
.symclk32_se = true,
.symclk32_le = true,
.symclk_fe = true,
.physymclk = true,
.dpiasymclk = true,
}
},
.seamless_boot_odm_combine = true
};
static const struct dc_debug_options debug_defaults_diags = {
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
.disable_stutter = false,
.scl_reset_length10 = true,
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.enable_tri_buf = true,
.use_max_lb = true
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
.disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},
};
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
}
static struct dpp *dcn31_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn3_dpp *dpp =
kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
if (dpp3_construct(dpp, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp->base;
BREAK_TO_DEBUGGER();
kfree(dpp);
return NULL;
}
static struct output_pixel_processor *dcn31_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
return NULL;
}
dcn20_opp_construct(opp, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
static struct dce_aux *dcn31_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
&aux_mask,
&aux_shift,
ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
i2c_inst_regs(1),
i2c_inst_regs(2),
i2c_inst_regs(3),
i2c_inst_regs(4),
i2c_inst_regs(5),
};
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
};
static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
/* ========================================================== */
/*
* DPIA index | Preferred Encoder | Host Router
* 0 | C | 0
* 1 | First Available | 0
* 2 | D | 1
* 3 | First Available | 1
*/
/* ========================================================== */
static const enum engine_id dpia_to_preferred_enc_id_table[] = {
ENGINE_ID_DIGC,
ENGINE_ID_DIGC,
ENGINE_ID_DIGD,
ENGINE_ID_DIGD
};
static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index)
{
return dpia_to_preferred_enc_id_table[dpia_index];
}
static struct dce_i2c_hw *dcn31_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
return dce_i2c_hw;
}
static struct mpc *dcn31_mpc_create(
struct dc_context *ctx,
int num_mpcc,
int num_rmu)
{
struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc),
GFP_KERNEL);
if (!mpc30)
return NULL;
dcn30_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
&mpc_mask,
num_mpcc,
num_rmu);
return &mpc30->base;
}
static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx)
{
int i;
struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
GFP_KERNEL);
if (!hubbub3)
return NULL;
hubbub31_construct(hubbub3, ctx,
&hubbub_reg,
&hubbub_shift,
&hubbub_mask,
dcn3_14_ip.det_buffer_size_kbytes,
dcn3_14_ip.pixel_chunk_size_kbytes,
dcn3_14_ip.config_return_buffer_size_in_kbytes);
for (i = 0; i < res_cap_dcn314.num_vmid; i++) {
struct dcn20_vmid *vmid = &hubbub3->vmid[i];
vmid->ctx = ctx;
vmid->regs = &vmid_regs[i];
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
return &hubbub3->base;
}
static struct timing_generator *dcn31_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
tgn10->tg_regs = &optc_regs[instance];
tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask;
dcn314_timing_generator_init(tgn10);
return &tgn10->base;
}
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
.fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true
};
static struct link_encoder *dcn31_link_encoder_create(
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
if (!enc20)
return NULL;
dcn31_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
&link_enc_regs[enc_init_data->transmitter],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc20->enc10.base;
}
/* Create a minimal link encoder object not associated with a particular
* physical connector.
* resource_funcs.link_enc_create_minimal
*/
static struct link_encoder *dcn31_link_enc_create_minimal(
struct dc_context *ctx, enum engine_id eng_id)
{
struct dcn20_link_encoder *enc20;
if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc)
return NULL;
enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
if (!enc20)
return NULL;
dcn31_link_encoder_construct_minimal(
enc20,
ctx,
&link_enc_feature,
&link_enc_regs[eng_id - ENGINE_ID_DIGA],
eng_id);
return &enc20->enc10.base;
}
static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dcn31_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
if (!panel_cntl)
return NULL;
dcn31_panel_cntl_construct(panel_cntl, init_data);
return &panel_cntl->base;
}
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
static struct audio *dcn31_create_audio(
struct dc_context *ctx, unsigned int inst)
{
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
static struct vpg *dcn31_vpg_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
if (!vpg31)
return NULL;
vpg31_construct(vpg31, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
&vpg_mask);
return &vpg31->base;
}
static struct afmt *dcn31_afmt_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
if (!afmt31)
return NULL;
afmt31_construct(afmt31, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
&afmt_mask);
// Light sleep by default, no need to power down here
return &afmt31->base;
}
static struct apg *dcn31_apg_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
if (!apg31)
return NULL;
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
&apg_mask);
return &apg31->base;
}
static struct stream_encoder *dcn314_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1;
struct vpg *vpg;
struct afmt *afmt;
int vpg_inst;
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
if (eng_id < ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
return NULL;
enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
vpg = dcn31_vpg_create(ctx, vpg_inst);
afmt = dcn31_afmt_create(ctx, afmt_inst);
if (!enc1 || !vpg || !afmt) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
return NULL;
}
dcn314_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
return &enc1->base;
}
static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
struct vpg *vpg;
struct apg *apg;
uint32_t hpo_dp_inst;
uint32_t vpg_inst;
uint32_t apg_inst;
ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
/* Mapping of VPG register blocks to HPO DP block instance:
* VPG[6] -> HPO_DP[0]
* VPG[7] -> HPO_DP[1]
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
//Uses offset index 5-8, but actually maps to vpg_inst 6-9
vpg_inst = hpo_dp_inst + 5;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
* APG[1] -> HPO_DP[1]
* APG[2] -> HPO_DP[2]
* APG[3] -> HPO_DP[3]
*/
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
if (!hpo_dp_enc31 || !vpg || !apg) {
kfree(hpo_dp_enc31);
kfree(vpg);
kfree(apg);
return NULL;
}
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
&hpo_dp_se_shift, &hpo_dp_se_mask);
return &hpo_dp_enc31->base;
}
static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
uint8_t inst,
struct dc_context *ctx)
{
struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
return &hpo_dp_enc31->base;
}
static struct dce_hwseq *dcn314_hwseq_create(
struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
}
return hws;
}
static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn31_create_audio,
.create_stream_encoder = dcn314_stream_encoder_create,
.create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
.create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
.create_hwseq = dcn314_hwseq_create,
};
static void dcn314_resource_destruct(struct dcn314_resource_pool *pool)
{
unsigned int i;
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
if (pool->base.stream_enc[i]->vpg != NULL) {
kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
pool->base.stream_enc[i]->vpg = NULL;
}
if (pool->base.stream_enc[i]->afmt != NULL) {
kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
pool->base.stream_enc[i]->afmt = NULL;
}
kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
if (pool->base.hpo_dp_stream_enc[i] != NULL) {
if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
}
if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
pool->base.hpo_dp_stream_enc[i]->apg = NULL;
}
kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
pool->base.hpo_dp_stream_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
if (pool->base.hpo_dp_link_enc[i] != NULL) {
kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
pool->base.hpo_dp_link_enc[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
if (pool->base.hubbub != NULL) {
kfree(pool->base.hubbub);
pool->base.hubbub = NULL;
}
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.dpps[i] != NULL)
dcn31_dpp_destroy(&pool->base.dpps[i]);
if (pool->base.ipps[i] != NULL)
pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
if (pool->base.hubps[i] != NULL) {
kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
pool->base.hubps[i] = NULL;
}
if (pool->base.irqs != NULL)
dal_irq_service_destroy(&pool->base.irqs);
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
}
if (pool->base.sw_i2cs[i] != NULL) {
kfree(pool->base.sw_i2cs[i]);
pool->base.sw_i2cs[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.timing_generators[i] != NULL) {
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
if (pool->base.dwbc[i] != NULL) {
kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
pool->base.dwbc[i] = NULL;
}
if (pool->base.mcif_wb[i] != NULL) {
kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
pool->base.mcif_wb[i] = NULL;
}
}
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] != NULL) {
dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
pool->base.clock_sources[i] = NULL;
}
}
for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
if (pool->base.mpc_lut[i] != NULL) {
dc_3dlut_func_release(pool->base.mpc_lut[i]);
pool->base.mpc_lut[i] = NULL;
}
if (pool->base.mpc_shaper[i] != NULL) {
dc_transfer_func_release(pool->base.mpc_shaper[i]);
pool->base.mpc_shaper[i] = NULL;
}
}
if (pool->base.dp_clock_source != NULL) {
dcn20_clock_source_destroy(&pool->base.dp_clock_source);
pool->base.dp_clock_source = NULL;
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
if (pool->base.multiple_abms[i] != NULL)
dce_abm_destroy(&pool->base.multiple_abms[i]);
}
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
if (pool->base.replay != NULL)
dmub_replay_destroy(&pool->base.replay);
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
static struct hubp *dcn31_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
if (!hubp2)
return NULL;
if (hubp31_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
BREAK_TO_DEBUGGER();
kfree(hubp2);
return NULL;
}
static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
GFP_KERNEL);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
return false;
}
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
&dwbc30_mask,
i);
pool->dwbc[i] = &dwbc30->base;
}
return true;
}
static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
GFP_KERNEL);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
return false;
}
dcn30_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
&mcif_wb30_mask,
i);
pool->mcif_wb[i] = &mcif_wb30->base;
}
return true;
}
static struct display_stream_compressor *dcn314_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
return NULL;
}
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
return &dsc->base;
}
static void dcn314_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn314_resource_pool *dcn314_pool = TO_DCN314_RES_POOL(*pool);
dcn314_resource_destruct(dcn314_pool);
kfree(dcn314_pool);
*pool = NULL;
}
static struct clock_source *dcn31_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
BREAK_TO_DEBUGGER();
kfree(clk_src);
return NULL;
}
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
int pipe_cnt;
DC_FP_START();
pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
DC_FP_END();
return pipe_cnt;
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
DC_FP_START();
dcn314_update_bw_bounding_box_fpu(dc, bw_params);
DC_FP_END();
}
static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config)
{
*panel_config = panel_config_defaults;
}
static bool filter_modes_for_single_channel_workaround(struct dc *dc,
struct dc_state *context)
{
// Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
if (dc->clk_mgr->bw_params->vram_type == 34 &&
dc->clk_mgr->bw_params->num_channels < 2 &&
context->stream_count > 1) {
int total_phy_pix_clk = 0;
for (int i = 0; i < context->stream_count; i++)
if (context->res_ctx.pipe_ctx[i].stream)
total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps
return true;
}
return false;
}
bool dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
bool out = false;
BW_VAL_TRACE_SETUP();
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
if (filter_modes_for_single_channel_workaround(dc, context))
goto validate_fail;
DC_FP_START();
// do not support self refresh only
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
DC_FP_END();
// Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
fast_validate = false;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (fast_validate) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
if (dc->res_pool->funcs->calculate_wm_and_dlg)
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
validate_fail:
DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
BW_VAL_TRACE_SKIP(fail);
out = false;
validate_out:
kfree(pipes);
BW_VAL_TRACE_FINISH();
return out;
}
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
.link_enc_create_minimal = dcn31_link_enc_create_minimal,
.link_encs_assign = link_enc_cfg_link_encs_assign,
.link_enc_unassign = link_enc_cfg_link_enc_unassign,
.panel_cntl_create = dcn31_panel_cntl_create,
.validate_bandwidth = dcn314_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
};
static struct clock_source *dcn30_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
BREAK_TO_DEBUGGER();
kfree(clk_src);
return NULL;
}
static bool dcn314_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn314_resource_pool *pool)
{
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn314;
pool->base.funcs = &dcn314_res_pool_funcs;
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 400;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.max_slave_planes = 2;
dc->caps.max_slave_yuv_planes = 2;
dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->config.forceHBR2CP2520)
dc->caps.force_dp_tps4_for_cp2520 = false;
dc->caps.dp_hpo = true;
dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
dc->caps.seamless_odm = true;
dc->caps.zstate_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
dc->caps.color.dpp.input_lut_shared = 0;
dc->caps.color.dpp.icsc = 1;
dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
dc->caps.color.dpp.dgam_rom_caps.pq = 1;
dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
dc->caps.color.dpp.post_csc = 1;
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
dc->caps.color.dpp.hw_3d_lut = 1;
dc->caps.color.dpp.ogam_ram = 1;
// no OGAM ROM on DCN301
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.dpp.ogam_rom_caps.pq = 0;
dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
dc->caps.color.dpp.ocsc = 0;
dc->caps.color.mpc.gamut_remap = 1;
dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2
dc->caps.color.mpc.ogam_ram = 1;
dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
enum bp_result bp_query_result;
uint8_t is_vbios_lttpr_enable = 0;
bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
}
/* interop bit is implicit */
{
dc->caps.vbios_lttpr_aware = true;
}
}
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
dc->debug = debug_defaults_diags;
/* Disable pipe power gating */
dc->debug.disable_dpp_power_gate = true;
dc->debug.disable_hubp_power_gate = true;
/* Disable root clock optimization */
dc->debug.root_clock_optimization.u32All = 0;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
/*************************************************
* Create resources *
*************************************************/
/* Clock Sources for Pixel Clock*/
pool->base.clock_sources[DCN31_CLK_SRC_PLL0] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL1] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL;
/* todo: not reuse phy_pll registers */
pool->base.dp_clock_source =
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_ID_DP_DTO,
&clk_src_regs[0], true);
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
pool->base.dccg = dccg314_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
if (pool->base.dccg == NULL) {
dm_error("DC: failed to create dccg!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn314_create(&init_data);
if (!pool->base.irqs)
goto create_fail;
/* HUBBUB */
pool->base.hubbub = dcn31_hubbub_create(ctx);
if (pool->base.hubbub == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create hubbub!\n");
goto create_fail;
}
/* HUBPs, DPPs, OPPs and TGs */
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.hubps[i] = dcn31_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create hubps!\n");
goto create_fail;
}
pool->base.dpps[i] = dcn31_dpp_create(ctx, i);
if (pool->base.dpps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
pool->base.opps[i] = dcn31_opp_create(ctx, i);
if (pool->base.opps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.timing_generators[i] = dcn31_timing_generator_create(
ctx, i);
if (pool->base.timing_generators[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto create_fail;
}
}
pool->base.timing_generator_count = i;
/* PSR */
pool->base.psr = dmub_psr_create(ctx);
if (pool->base.psr == NULL) {
dm_error("DC: failed to create psr obj!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
/* Replay */
pool->base.replay = dmub_replay_create(ctx);
if (pool->base.replay == NULL) {
dm_error("DC: failed to create replay obj!\n");
BREAK_TO_DEBUGGER();
goto create_fail;
}
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
&abm_regs[i],
&abm_shift,
&abm_mask);
if (pool->base.multiple_abms[i] == NULL) {
dm_error("DC: failed to create abm for pipe %d!\n", i);
BREAK_TO_DEBUGGER();
goto create_fail;
}
}
/* MPC and DSC */
pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
if (pool->base.mpc == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mpc!\n");
goto create_fail;
}
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn314_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create display stream compressor %d!\n", i);
goto create_fail;
}
}
/* DWB and MMHUBBUB */
if (!dcn31_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create dwbc!\n");
goto create_fail;
}
if (!dcn31_mmhubbub_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mcif_wb!\n");
goto create_fail;
}
/* AUX and I2C */
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dcn31_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto create_fail;
}
pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create hw i2c!!\n");
goto create_fail;
}
pool->base.sw_i2cs[i] = NULL;
}
/* DCN314 has 4 DPIA */
pool->base.usb4_dpia_count = 4;
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto create_fail;
/* HW Sequencer and Plane caps */
dcn314_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->cap_funcs = cap_funcs;
dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp;
return true;
create_fail:
dcn314_resource_destruct(pool);
return false;
}
struct resource_pool *dcn314_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn314_resource_pool *pool =
kzalloc(sizeof(struct dcn314_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
if (dcn314_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
kfree(pool);
return NULL;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc_bios_types.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn314_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
#include "link.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
enc1->base.ctx->logger
#define REG(reg)\
(enc1->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
enc1->se_shift->field_name, enc1->se_mask->field_name
#define VBI_LINE_0 0
#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
#define CTX \
enc1->base.ctx
static void enc314_reset_fifo(struct stream_encoder *enc, bool reset)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t reset_val = reset ? 1 : 0;
uint32_t is_symclk_on;
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val);
REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on);
if (is_symclk_on)
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000);
else
udelay(10);
}
static void enc314_enable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
enc314_reset_fifo(enc, true);
enc314_reset_fifo(enc, false);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
}
static void enc314_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
static void enc314_dp_set_odm_combine(
struct stream_encoder *enc,
bool odm_combine)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, odm_combine);
}
/* setup stream encoder in dvi mode */
static void enc314_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
struct bp_encoder_control cntl = {0};
cntl.action = ENCODER_CONTROL_SETUP;
cntl.engine_id = enc1->base.id;
cntl.signal = is_dual_link ?
SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
cntl.enable_dp_audio = false;
cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10;
cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
if (enc1->base.bp->funcs->encoder_control(
enc1->base.bp, &cntl) != BP_RESULT_OK)
return;
} else {
//Set pattern for clock channel, default vlue 0x63 does not work
REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
//DIG_BE_TMDS_DVI_MODE : TMDS-DVI mode is already set in link_encoder_setup
//DIG_SOURCE_SELECT is already set in dig_connect_to_otg
enc314_enable_fifo(enc);
}
ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
}
/* setup stream encoder in hdmi mode */
static void enc314_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
int actual_pix_clk_khz,
bool enable_audio)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
struct bp_encoder_control cntl = {0};
cntl.action = ENCODER_CONTROL_SETUP;
cntl.engine_id = enc1->base.id;
cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
cntl.enable_dp_audio = enable_audio;
cntl.pixel_clock = actual_pix_clk_khz;
cntl.lanes_number = LANE_COUNT_FOUR;
if (enc1->base.bp->funcs->encoder_control(
enc1->base.bp, &cntl) != BP_RESULT_OK)
return;
} else {
//Set pattern for clock channel, default vlue 0x63 does not work
REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
//DIG_BE_TMDS_HDMI_MODE : TMDS-HDMI mode is already set in link_encoder_setup
//DIG_SOURCE_SELECT is already set in dig_connect_to_otg
enc314_enable_fifo(enc);
}
/* Configure pixel encoding */
enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
/* setup HDMI engine */
REG_UPDATE_6(HDMI_CONTROL,
HDMI_PACKET_GEN_VERSION, 1,
HDMI_KEEPOUT_MODE, 1,
HDMI_DEEP_COLOR_ENABLE, 0,
HDMI_DATA_SCRAMBLE_EN, 0,
HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1,
HDMI_CLOCK_CHANNEL_RATE, 0);
/* Configure color depth */
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_888:
REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
break;
case COLOR_DEPTH_101010:
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 1,
HDMI_DEEP_COLOR_ENABLE, 0);
} else {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 1,
HDMI_DEEP_COLOR_ENABLE, 1);
}
break;
case COLOR_DEPTH_121212:
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 2,
HDMI_DEEP_COLOR_ENABLE, 0);
} else {
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 2,
HDMI_DEEP_COLOR_ENABLE, 1);
}
break;
case COLOR_DEPTH_161616:
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DEEP_COLOR_DEPTH, 3,
HDMI_DEEP_COLOR_ENABLE, 1);
break;
default:
break;
}
if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
/* enable HDMI data scrambler
* HDMI_CLOCK_CHANNEL_RATE_MORE_340M
* Clock channel frequency is 1/4 of character rate.
*/
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DATA_SCRAMBLE_EN, 1,
HDMI_CLOCK_CHANNEL_RATE, 1);
} else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
/* TODO: New feature for DCE11, still need to implement */
/* enable HDMI data scrambler
* HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
* Clock channel frequency is the same
* as character rate
*/
REG_UPDATE_2(HDMI_CONTROL,
HDMI_DATA_SCRAMBLE_EN, 1,
HDMI_CLOCK_CHANNEL_RATE, 0);
}
/* Enable transmission of General Control packet on every frame */
REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
HDMI_GC_CONT, 1,
HDMI_GC_SEND, 1,
HDMI_NULL_SEND, 1);
/* Disable Audio Content Protection packet transmission */
REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0);
/* following belongs to audio */
/* Enable Audio InfoFrame packet transmission. */
REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
/* update double-buffered AUDIO_INFO registers immediately */
ASSERT(enc->afmt);
enc->afmt->funcs->audio_info_immediate_update(enc->afmt);
/* Select line number on which to send Audio InfoFrame packets */
REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
VBI_LINE_0 + 2);
/* set HDMI GC AVMUTE */
REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
}
static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
return two_pix;
}
static void enc314_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
enc1_stream_encoder_dp_blank(link, enc);
/* Disable FIFO after the DP vid stream is disabled to avoid corruption. */
if (enc->ctx->dc->debug.dig_fifo_off_in_blank)
enc314_disable_fifo(enc);
}
static void enc314_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
uint32_t n_vid = 0x8000;
uint32_t m_vid;
uint32_t n_multiply = 0;
uint32_t pix_per_cycle = 0;
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(¶m->timing) || param->opp_cnt > 1) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
pix_per_cycle = 1;
}
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
*/
m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
m_vid = (uint32_t) m_vid_l;
/* enable auto measurement */
REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
/* auto measurement need 1 full 0x8000 symbol cycle to kick in,
* therefore program initial value for Mvid and Nvid
*/
REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
REG_UPDATE_2(DP_VID_TIMING,
DP_VID_M_N_GEN_EN, 1,
DP_VID_N_MUL, n_multiply);
REG_UPDATE(DP_PIXEL_FORMAT,
DP_PIXEL_PER_CYCLE_PROCESSING_MODE,
pix_per_cycle);
}
/* make sure stream is disabled before resetting steer fifo */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
/* DIG_START is removed from the register spec */
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
udelay(100);
/* the hardware would start sending video at the start of the next DP
* frame (i.e. rising edge of the vblank).
* NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
* register has no effect on enable transition! HW always guarantees
* VID_STREAM enable at start of next frame, and this is not
* programmable
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
/*
* DIG Resync FIFO now needs to be explicitly enabled.
* This should come after DP_VID_STREAM_ENABLE per HW docs.
*/
enc314_enable_fifo(enc);
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
* dsc_mode: 0 disables DSC, other values enable DSC in specified format
* sc_bytes_per_pixel: DP_DSC_BYTES_PER_PIXEL removed in DCN32
* dsc_slice_width: DP_DSC_SLICE_WIDTH removed in DCN32
*/
static void enc314_dp_set_dsc_config(struct stream_encoder *enc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
static void enc314_read_state(struct stream_encoder *enc, struct enc_state *s)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
//if dsc is enabled, continue to read
REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode);
if (s->dsc_mode) {
REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, &s->sec_gsp_pps_line_num);
REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference);
REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num);
REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, &s->sec_gsp_pps_enable);
REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
}
}
static void enc314_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
// The naming of this field is confusing, what it means is the output mode of otg, which
// is the input mode of the dig
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0);
}
static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.dp_set_odm_combine =
enc314_dp_set_odm_combine,
.dp_set_stream_attribute =
enc2_stream_encoder_dp_set_stream_attribute,
.hdmi_set_stream_attribute =
enc314_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
enc314_stream_encoder_dvi_set_stream_attribute,
.set_throttled_vcp_size =
enc1_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc3_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets_sdp_line_num =
enc3_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
enc314_stream_encoder_dp_blank,
.dp_unblank =
enc314_stream_encoder_dp_unblank,
.audio_mute_control = enc3_audio_mute_control,
.dp_audio_setup = enc3_se_dp_audio_setup,
.dp_audio_enable = enc3_se_dp_audio_enable,
.dp_audio_disable = enc1_se_dp_audio_disable,
.hdmi_audio_setup = enc3_se_hdmi_audio_setup,
.hdmi_audio_disable = enc1_se_hdmi_audio_disable,
.setup_stereo_sync = enc1_setup_stereo_sync,
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.dig_source_otg = enc1_dig_source_otg,
.dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
.enc_read_state = enc314_read_state,
.dp_set_dsc_config = enc314_dp_set_dsc_config,
.dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet,
.set_dynamic_metadata = enc2_set_dynamic_metadata,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.enable_fifo = enc314_enable_fifo,
.disable_fifo = enc314_disable_fifo,
.set_input_mode = enc314_set_dig_input_mode,
};
void dcn314_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
struct dc_bios *bp,
enum engine_id eng_id,
struct vpg *vpg,
struct afmt *afmt,
const struct dcn10_stream_enc_registers *regs,
const struct dcn10_stream_encoder_shift *se_shift,
const struct dcn10_stream_encoder_mask *se_mask)
{
enc1->base.funcs = &dcn314_str_enc_funcs;
enc1->base.ctx = ctx;
enc1->base.id = eng_id;
enc1->base.bp = bp;
enc1->base.vpg = vpg;
enc1->base.afmt = afmt;
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
enc1->base.stream_enc_inst = vpg->inst;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn21/dcn21_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dcn301/dcn301_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn314/dcn314_hwseq.h"
#include "dcn314_init.h"
static const struct hw_sequencer_funcs dcn314_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn31_init_hw,
.power_down_on_boot = dcn10_power_down_on_boot,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
.program_output_csc = dcn20_program_output_csc,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
.update_info_frame = dcn31_update_info_frame,
.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
.enable_stream = dcn20_enable_stream,
.disable_stream = dce110_disable_stream,
.unblank_stream = dcn20_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_T12 = dce110_edp_wait_for_T12,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
.program_triplebuffer = dcn20_program_triple_buffer,
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
.mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
.init_sys_ctx = dcn31_init_sys_ctx,
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
.disable_link_output = dcn314_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
.init_pipes = dcn10_init_pipes,
.update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
.set_output_transfer_func = dcn30_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
.reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn20_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
.hubp_pg_control = dcn31_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn30_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
.resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
{
dc->hwss = dcn314_funcs;
dc->hwseq->funcs = dcn314_private_funcs;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
#include "dccg.h"
#include "dce/dce_hwseq.h"
#include "clk_mgr.h"
#include "reg_helper.h"
#include "abm.h"
#include "hubp.h"
#include "dchubbub.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
#include "mpc.h"
#include "mcif_wb.h"
#include "dc_dmub_srv.h"
#include "dcn314_hwseq.h"
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
#include "link.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
#include "dce/dce_i2c_hw.h"
#include "dsc.h"
#include "dcn20/dcn20_optc.h"
#include "dcn30/dcn30_cm_common.h"
#define DC_LOGGER_INIT(logger)
#define CTX \
hws->ctx
#define REG(reg)\
hws->regs->reg
#define DC_LOGGER \
dc->ctx->logger
#undef FN
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
int opp_cnt)
{
bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
int flow_ctrl_cnt;
if (opp_cnt >= 2)
hblank_halved = true;
flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
stream->timing.h_border_left -
stream->timing.h_border_right;
if (hblank_halved)
flow_ctrl_cnt /= 2;
/* ODM combine 4:1 case */
if (opp_cnt == 4)
flow_ctrl_cnt /= 2;
return flow_ctrl_cnt;
}
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
ASSERT(dsc);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
if (enable) {
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg;
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
dsc_cfg.pic_width *= opp_cnt;
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
/* Enable DSC in OPTC */
DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
optc_dsc_mode,
dsc_optc_cfg.bytes_per_pixel,
dsc_optc_cfg.slice_width);
} else {
/* disable DSC in OPTC */
pipe_ctx->stream_res.tg->funcs->set_dsc_config(
pipe_ctx->stream_res.tg,
OPTC_DSC_DISABLED, 0, 0);
/* disable DSC block */
dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
ASSERT(odm_pipe->stream_res.dsc);
odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
}
}
}
// Given any pipe_ctx, return the total ODM combine factor, and optionally return
// the OPPids which are used
static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances)
{
unsigned int opp_count = 1;
struct pipe_ctx *odm_pipe;
// First get to the top pipe
for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe)
;
// First pipe is always used
if (opp_instances)
opp_instances[0] = odm_pipe->stream_res.opp->inst;
// Find and count odm pipes, if any
for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
if (opp_instances)
opp_instances[opp_count] = odm_pipe->stream_res.opp->inst;
opp_count++;
}
return opp_count;
}
void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
struct mpc_dwb_flow_control flow_control;
struct mpc *mpc = dc->res_pool->mpc;
int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
if (opp_cnt > 1)
pipe_ctx->stream_res.tg->funcs->set_odm_combine(
pipe_ctx->stream_res.tg,
opp_inst, opp_cnt,
&pipe_ctx->stream->timing);
else
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
flow_control.flow_ctrl_mode = 0;
flow_control.flow_ctrl_cnt0 = 0x80;
flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
if (mpc->funcs->set_out_rate_control) {
for (i = 0; i < opp_cnt; ++i) {
mpc->funcs->set_out_rate_control(
mpc, opp_inst[i],
true,
rate_control_2x_pclk,
&flow_control);
}
}
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
true);
}
if (pipe_ctx->stream_res.dsc) {
struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
/* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
/* disconnect DSC block from stream */
dsc->funcs->dsc_disconnect(dsc);
}
}
}
void dcn314_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on)
{
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl = 0;
if (hws->ctx->dc->debug.disable_dsc_power_gate)
return;
if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc &&
hws->ctx->dc->res_pool->dccg->funcs->enable_dsc &&
power_on)
hws->ctx->dc->res_pool->dccg->funcs->enable_dsc(
hws->ctx->dc->res_pool->dccg, dsc_inst);
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
switch (dsc_inst) {
case 0: /* DSC0 */
REG_UPDATE(DOMAIN16_PG_CONFIG,
DOMAIN_POWER_GATE, power_gate);
REG_WAIT(DOMAIN16_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 1: /* DSC1 */
REG_UPDATE(DOMAIN17_PG_CONFIG,
DOMAIN_POWER_GATE, power_gate);
REG_WAIT(DOMAIN17_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 2: /* DSC2 */
REG_UPDATE(DOMAIN18_PG_CONFIG,
DOMAIN_POWER_GATE, power_gate);
REG_WAIT(DOMAIN18_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
case 3: /* DSC3 */
REG_UPDATE(DOMAIN19_PG_CONFIG,
DOMAIN_POWER_GATE, power_gate);
REG_WAIT(DOMAIN19_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
1, 1000);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) {
if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on)
hws->ctx->dc->res_pool->dccg->funcs->disable_dsc(
hws->ctx->dc->res_pool->dccg, dsc_inst);
}
}
void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
{
bool force_on = true; /* disable power gating */
uint32_t org_ip_request_cntl = 0;
if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
force_on = false;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
/* DPP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
force_on = true; /* disable power gating */
if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
force_on = false;
/* DCS0/1/2/3/4 */
REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
*k2_div = PIXEL_RATE_DIV_BY_2;
else
*k2_div = PIXEL_RATE_DIV_BY_4;
} else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
if (odm_combine_factor == 2)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
if ((*k1_div == PIXEL_RATE_DIV_NA) && (*k2_div == PIXEL_RATE_DIV_NA))
ASSERT(false);
return odm_combine_factor;
}
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
{
uint32_t pix_per_cycle = 1;
uint32_t odm_combine_factor = 1;
if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc)
return;
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1)
pix_per_cycle = 2;
if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
pix_per_cycle);
}
void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
{
unsigned int i;
struct pipe_ctx *pipe = NULL;
bool otg_disabled[MAX_PIPES] = {false};
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
}
}
hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (otg_disabled[i])
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
{
if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
return;
if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control)
hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
}
static void apply_symclk_on_tx_off_wa(struct dc_link *link)
{
/* There are use cases where SYMCLK is referenced by OTG. For instance
* for TMDS signal, OTG relies SYMCLK even if TX video output is off.
* However current link interface will power off PHY when disabling link
* output. This will turn off SYMCLK generated by PHY. The workaround is
* to identify such case where SYMCLK is still in use by OTG when we
* power off PHY. When this is detected, we will temporarily power PHY
* back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
* program_pix_clk interface. When OTG is disabled, we will then power
* off PHY by calling disable link output again.
*
* In future dcn generations, we plan to rework transmitter control
* interface so that we could have an option to set SYMCLK ON TX OFF
* state in one step without this workaround
*/
struct dc *dc = link->ctx->dc;
struct pipe_ctx *pipe_ctx = NULL;
uint8_t i;
if (link->phy_state.symclk_ref_cnts.otg > 0) {
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings);
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
break;
}
}
}
}
void dcn314_disable_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
struct dmcu *dmcu = dc->res_pool->dmcu;
if (signal == SIGNAL_TYPE_EDP &&
link->dc->hwss.edp_backlight_control)
link->dc->hwss.edp_backlight_control(link, false);
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
/*
* Add the logic to extract BOTH power up and power down sequences
* from enable/disable link output and only call edp panel control
* in enable_link_dp and disable_link_dp once.
*/
if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
apply_symclk_on_tx_off_wa(link);
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "core_types.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn314_dccg.h"
#define TO_DCN_DCCG(dccg)\
container_of(dccg, struct dcn_dccg, base)
#define REG(reg) \
(dccg_dcn->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
#define CTX \
dccg_dcn->base.ctx
#define DC_LOGGER \
dccg->ctx->logger
static void dccg314_trigger_dio_fifo_resync(
struct dccg *dccg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
static void dccg314_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
enum pixel_rate_div *k1,
enum pixel_rate_div *k2)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
*k1 = PIXEL_RATE_DIV_NA;
*k2 = PIXEL_RATE_DIV_NA;
switch (otg_inst) {
case 0:
REG_GET_2(OTG_PIXEL_RATE_DIV,
OTG0_PIXEL_RATE_DIVK1, &val_k1,
OTG0_PIXEL_RATE_DIVK2, &val_k2);
break;
case 1:
REG_GET_2(OTG_PIXEL_RATE_DIV,
OTG1_PIXEL_RATE_DIVK1, &val_k1,
OTG1_PIXEL_RATE_DIVK2, &val_k2);
break;
case 2:
REG_GET_2(OTG_PIXEL_RATE_DIV,
OTG2_PIXEL_RATE_DIVK1, &val_k1,
OTG2_PIXEL_RATE_DIVK2, &val_k2);
break;
case 3:
REG_GET_2(OTG_PIXEL_RATE_DIV,
OTG3_PIXEL_RATE_DIVK1, &val_k1,
OTG3_PIXEL_RATE_DIVK2, &val_k2);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
*k1 = (enum pixel_rate_div)val_k1;
*k2 = (enum pixel_rate_div)val_k2;
}
static void dccg314_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
enum pixel_rate_div k1,
enum pixel_rate_div k2)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
// Don't program 0xF into the register field. Not valid since
// K1 / K2 field is only 1 / 2 bits wide
if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) {
BREAK_TO_DEBUGGER();
return;
}
dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
if (k1 == cur_k1 && k2 == cur_k2)
return;
switch (otg_inst) {
case 0:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG0_PIXEL_RATE_DIVK1, k1,
OTG0_PIXEL_RATE_DIVK2, k2);
break;
case 1:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG1_PIXEL_RATE_DIVK1, k1,
OTG1_PIXEL_RATE_DIVK2, k2);
break;
case 2:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG2_PIXEL_RATE_DIVK1, k1,
OTG2_PIXEL_RATE_DIVK2, k2);
break;
case 3:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG3_PIXEL_RATE_DIVK1, k1,
OTG3_PIXEL_RATE_DIVK2, k2);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
static void dccg314_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t p_src_sel = 0; /* selects dprefclk */
if (src == DTBCLK0)
p_src_sel = 2; /* selects dtbclk0 */
switch (otg_inst) {
case 0:
if (src == REFCLK)
REG_UPDATE(DTBCLK_P_CNTL,
DTBCLK_P0_EN, 0);
else
REG_UPDATE_2(DTBCLK_P_CNTL,
DTBCLK_P0_SRC_SEL, p_src_sel,
DTBCLK_P0_EN, 1);
break;
case 1:
if (src == REFCLK)
REG_UPDATE(DTBCLK_P_CNTL,
DTBCLK_P1_EN, 0);
else
REG_UPDATE_2(DTBCLK_P_CNTL,
DTBCLK_P1_SRC_SEL, p_src_sel,
DTBCLK_P1_EN, 1);
break;
case 2:
if (src == REFCLK)
REG_UPDATE(DTBCLK_P_CNTL,
DTBCLK_P2_EN, 0);
else
REG_UPDATE_2(DTBCLK_P_CNTL,
DTBCLK_P2_SRC_SEL, p_src_sel,
DTBCLK_P2_EN, 1);
break;
case 3:
if (src == REFCLK)
REG_UPDATE(DTBCLK_P_CNTL,
DTBCLK_P3_EN, 0);
else
REG_UPDATE_2(DTBCLK_P_CNTL,
DTBCLK_P3_SRC_SEL, p_src_sel,
DTBCLK_P3_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
static void dccg314_set_dtbclk_dto(
struct dccg *dccg,
const struct dtbclk_dto_params *params)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* DTO Output Rate / Pixel Rate = 1/4 */
int req_dtbclk_khz = params->pixclk_khz / 4;
if (params->ref_dtbclk_khz && req_dtbclk_khz) {
uint32_t modulo, phase;
// phase / modulo = dtbclk / dtbclk ref
modulo = params->ref_dtbclk_khz * 1000;
phase = req_dtbclk_khz * 1000;
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase);
REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
DTBCLK_DTO_ENABLE[params->otg_inst], 1);
REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst],
DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1,
1, 100);
/* program OTG_PIXEL_RATE_DIV for DIVK1 and DIVK2 fields */
dccg314_set_pixel_rate_div(dccg, params->otg_inst, PIXEL_RATE_DIV_BY_1, PIXEL_RATE_DIV_BY_1);
/* The recommended programming sequence to enable DTBCLK DTO to generate
* valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
* be set only after DTO is enabled
*/
REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
PIPE_DTO_SRC_SEL[params->otg_inst], 2);
} else {
REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
DTBCLK_DTO_ENABLE[params->otg_inst], 0,
PIPE_DTO_SRC_SEL[params->otg_inst], 1);
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
}
}
static void dccg314_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
int dp_hpo_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* set the dtbclk_p source */
dccg314_set_dtbclk_p_src(dccg, src, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */
switch (dp_hpo_inst) {
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN, (src == REFCLK) ? 0 : 1,
DPSTREAMCLK0_SRC_SEL, otg_inst);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK1_EN, (src == REFCLK) ? 0 : 1,
DPSTREAMCLK1_SRC_SEL, otg_inst);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK2_EN, (src == REFCLK) ? 0 : 1,
DPSTREAMCLK2_SRC_SEL, otg_inst);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK3_EN, (src == REFCLK) ? 0 : 1,
DPSTREAMCLK3_SRC_SEL, otg_inst);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
}
static void dccg314_init(struct dccg *dccg)
{
int otg_inst;
/* Set HPO stream encoder to use refclk to avoid case where PHY is
* disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which
* will cause DCN to hang.
*/
for (otg_inst = 0; otg_inst < 4; otg_inst++)
dccg31_disable_symclk32_se(dccg, otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
for (otg_inst = 0; otg_inst < 2; otg_inst++)
dccg31_disable_symclk32_le(dccg, otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
for (otg_inst = 0; otg_inst < 4; otg_inst++)
dccg314_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
for (otg_inst = 0; otg_inst < 5; otg_inst++)
dccg31_set_physymclk(dccg, otg_inst,
PHYSYMCLK_FORCE_SRC_SYMCLK, false);
}
static void dccg314_set_valid_pixel_rate(
struct dccg *dccg,
int ref_dtbclk_khz,
int otg_inst,
int pixclk_khz)
{
struct dtbclk_dto_params dto_params = {0};
dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
dto_params.otg_inst = otg_inst;
dto_params.pixclk_khz = pixclk_khz;
dccg314_set_dtbclk_dto(dccg, &dto_params);
}
static void dccg314_dpp_root_clock_control(
struct dccg *dccg,
unsigned int dpp_inst,
bool clock_on)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
return;
if (clock_on) {
/* turn off the DTO and leave phase/modulo at max */
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0xFF,
DPPCLK0_DTO_MODULO, 0xFF);
} else {
/* turn on the DTO to generate a 0hz clock */
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
}
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
}
static const struct dccg_funcs dccg314_funcs = {
.update_dpp_dto = dccg31_update_dpp_dto,
.dpp_root_clock_control = dccg314_dpp_root_clock_control,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
.dccg_init = dccg314_init,
.set_dpstreamclk = dccg314_set_dpstreamclk,
.enable_symclk32_se = dccg31_enable_symclk32_se,
.disable_symclk32_se = dccg31_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
.disable_symclk32_le = dccg31_disable_symclk32_le,
.set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
.set_physymclk = dccg31_set_physymclk,
.set_dtbclk_dto = dccg314_set_dtbclk_dto,
.set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
.set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
.otg_add_pixel = dccg31_otg_add_pixel,
.otg_drop_pixel = dccg31_otg_drop_pixel,
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
.disable_dsc = dccg31_disable_dscclk,
.enable_dsc = dccg31_enable_dscclk,
.set_pixel_rate_div = dccg314_set_pixel_rate_div,
.trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync,
.set_valid_pixel_rate = dccg314_set_valid_pixel_rate,
};
struct dccg *dccg314_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
base = &dccg_dcn->base;
base->ctx = ctx;
base->funcs = &dccg314_funcs;
dccg_dcn->regs = regs;
dccg_dcn->dccg_shift = dccg_shift;
dccg_dcn->dccg_mask = dccg_mask;
return &dccg_dcn->base;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn314_optc.h"
#include "dcn30/dcn30_optc.h"
#include "dcn31/dcn31_optc.h"
#include "reg_helper.h"
#include "dc.h"
#include "dcn_calc_math.h"
#define REG(reg)\
optc1->tg_regs->reg
#define CTX \
optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
/*
* Enable CRTC
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t memory_mask = 0;
int h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
int mpcc_hactive = h_active / opp_cnt;
/* Each memory instance is 2048x(314x2) bits to support half line of 4096 */
int odm_mem_count = (h_active + 2047) / 2048;
/*
* display <= 4k : 2 memories + 2 pipes
* 4k < display <= 8k : 4 memories + 2 pipes
* 8k < display <= 12k : 6 memories + 4 pipes
*/
if (opp_cnt == 4) {
if (odm_mem_count <= 2)
memory_mask = 0x3;
else if (odm_mem_count <= 4)
memory_mask = 0xf;
else
memory_mask = 0x3f;
} else {
if (odm_mem_count <= 2)
memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2);
else if (odm_mem_count <= 4)
memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
else
memory_mask = 0x77;
}
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, memory_mask);
if (opp_cnt == 2) {
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
OPTC_SEG0_SRC_SEL, opp_id[0],
OPTC_SEG1_SRC_SEL, opp_id[1]);
} else if (opp_cnt == 4) {
REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 3,
OPTC_SEG0_SRC_SEL, opp_id[0],
OPTC_SEG1_SRC_SEL, opp_id[1],
OPTC_SEG2_SRC_SEL, opp_id[2],
OPTC_SEG3_SRC_SEL, opp_id[3]);
}
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt;
}
static bool optc314_enable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* opp instance for OTG, 1 to 1 mapping and odm will adjust */
REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, optc->inst);
/* VTG enable first is for HW workaround */
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
REG_SEQ_START();
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 2,
OTG_MASTER_EN, 1);
REG_SEQ_SUBMIT();
REG_SEQ_WAIT_DONE();
return true;
}
/* disable_crtc */
static bool optc314_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* disable otg request until end of the first line
* in the vertical blank region
*/
REG_UPDATE(OTG_CONTROL,
OTG_MASTER_EN, 0);
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
return true;
}
static void optc314_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* Disable immediately. */
REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0);
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);
}
static void optc314_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
enum h_timing_div_mode h_div = H_TIMING_NO_DIV;
REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 0,
OPTC_SEG0_SRC_SEL, optc->inst,
OPTC_SEG1_SRC_SEL, 0xf,
OPTC_SEG2_SRC_SEL, 0xf,
OPTC_SEG3_SRC_SEL, 0xf
);
h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, h_div);
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, 0);
optc1->opp_count = 1;
}
static void optc314_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0);
}
static struct timing_generator_funcs dcn314_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc314_enable_crtc,
.disable_crtc = optc314_disable_crtc,
.immediate_disable_crtc = optc31_immediate_disable_crtc,
.phantom_crtc_post_enable = optc314_phantom_crtc_post_enable,
/* used by enable_timing_synchronization. Not need for FPGA */
.is_counter_moving = optc1_is_counter_moving,
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
.set_blank_color = optc3_program_blank_color,
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
.triplebuffer_lock = optc3_triplebuffer_lock,
.triplebuffer_unlock = optc2_triplebuffer_unlock,
.enable_reset_trigger = optc1_enable_reset_trigger,
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc31_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
.configure_crc = optc1_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
.set_vtotal_change_limit = optc3_set_vtotal_change_limit,
.set_gsl = optc2_set_gsl,
.set_gsl_source_select = optc2_set_gsl_source_select,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.init_odm = optc3_init_odm,
.set_odm_bypass = optc314_set_odm_bypass,
.set_odm_combine = optc314_set_odm_combine,
.set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode,
};
void dcn314_timing_generator_init(struct optc *optc1)
{
optc1->base.funcs = &dcn314_tg_funcs;
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: AMD
*/
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
#include "dc_hw_types.h"
#include "dsc.h"
#include "dc.h"
#include "rc_calc.h"
#include "fixed31_32.h"
/* This module's internal functions */
/* default DSC policy target bitrate limit is 16bpp */
static uint32_t dsc_policy_max_target_bpp_limit = 16;
/* default DSC policy enables DSC only when needed */
static bool dsc_policy_enable_dsc_when_not_needed;
static bool dsc_policy_disable_dsc_stream_overhead;
static bool disable_128b_132b_stream_overhead;
#ifndef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#endif
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
/* Need to account for padding due to pixel-to-symbol packing
* for uncompressed 128b/132b streams.
*/
static uint32_t apply_128b_132b_stream_overhead(
const struct dc_crtc_timing *timing, const uint32_t kbps)
{
uint32_t total_kbps = kbps;
if (disable_128b_132b_stream_overhead)
return kbps;
if (!timing->flags.DSC) {
struct fixed31_32 bpp;
struct fixed31_32 overhead_factor;
bpp = dc_fixpt_from_int(kbps);
bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10);
/* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size)
* Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive
*/
overhead_factor = dc_fixpt_from_int(timing->h_addressable);
overhead_factor = dc_fixpt_mul(overhead_factor, bpp);
overhead_factor = dc_fixpt_div_int(overhead_factor, 128);
overhead_factor = dc_fixpt_div(
dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)),
overhead_factor);
total_kbps = dc_fixpt_ceil(
dc_fixpt_mul_int(overhead_factor, total_kbps));
}
return total_kbps;
}
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing,
const enum dc_link_encoding_format link_encoding)
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
if (timing->flags.DSC)
return dc_dsc_stream_bandwidth_in_kbps(timing,
timing->dsc_cfg.bits_per_pixel,
timing->dsc_cfg.num_slices_h,
timing->dsc_cfg.is_dp);
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
bits_per_channel = 6;
break;
case COLOR_DEPTH_888:
bits_per_channel = 8;
break;
case COLOR_DEPTH_101010:
bits_per_channel = 10;
break;
case COLOR_DEPTH_121212:
bits_per_channel = 12;
break;
case COLOR_DEPTH_141414:
bits_per_channel = 14;
break;
case COLOR_DEPTH_161616:
bits_per_channel = 16;
break;
default:
ASSERT(bits_per_channel != 0);
bits_per_channel = 8;
break;
}
kbps = timing->pix_clk_100hz / 10;
kbps *= bits_per_channel;
if (timing->flags.Y_ONLY != 1) {
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
kbps *= 3;
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
kbps /= 2;
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
kbps = kbps * 2 / 3;
}
if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
kbps = apply_128b_132b_stream_overhead(timing, kbps);
return kbps;
}
/* Forward Declerations */
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
const struct dsc_enc_caps *dsc_caps,
const struct dc_crtc_timing *timing,
const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range);
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
const uint32_t num_slices_h,
const uint32_t bpp_increment_div,
const bool is_dp);
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz);
static bool intersect_dsc_caps(
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dsc_enc_caps *dsc_enc_caps,
enum dc_pixel_encoding pixel_encoding,
struct dsc_enc_caps *dsc_common_caps);
static bool setup_dsc_config(
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
{
switch (dpcd_buff_block_size) {
case DP_DSC_RC_BUF_BLK_SIZE_1:
*buff_block_size = 1024;
break;
case DP_DSC_RC_BUF_BLK_SIZE_4:
*buff_block_size = 4 * 1024;
break;
case DP_DSC_RC_BUF_BLK_SIZE_16:
*buff_block_size = 16 * 1024;
break;
case DP_DSC_RC_BUF_BLK_SIZE_64:
*buff_block_size = 64 * 1024;
break;
default: {
dm_error("%s: DPCD DSC buffer size not recognized.\n", __func__);
return false;
}
}
return true;
}
static bool dsc_line_buff_depth_from_dpcd(int dpcd_line_buff_bit_depth, int *line_buff_bit_depth)
{
if (0 <= dpcd_line_buff_bit_depth && dpcd_line_buff_bit_depth <= 7)
*line_buff_bit_depth = dpcd_line_buff_bit_depth + 9;
else if (dpcd_line_buff_bit_depth == 8)
*line_buff_bit_depth = 8;
else {
dm_error("%s: DPCD DSC buffer depth not recognized.\n", __func__);
return false;
}
return true;
}
static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
{
switch (dpcd_throughput) {
case DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED:
*throughput = 0;
break;
case DP_DSC_THROUGHPUT_MODE_0_170:
*throughput = 170;
break;
case DP_DSC_THROUGHPUT_MODE_0_340:
*throughput = 340;
break;
case DP_DSC_THROUGHPUT_MODE_0_400:
*throughput = 400;
break;
case DP_DSC_THROUGHPUT_MODE_0_450:
*throughput = 450;
break;
case DP_DSC_THROUGHPUT_MODE_0_500:
*throughput = 500;
break;
case DP_DSC_THROUGHPUT_MODE_0_550:
*throughput = 550;
break;
case DP_DSC_THROUGHPUT_MODE_0_600:
*throughput = 600;
break;
case DP_DSC_THROUGHPUT_MODE_0_650:
*throughput = 650;
break;
case DP_DSC_THROUGHPUT_MODE_0_700:
*throughput = 700;
break;
case DP_DSC_THROUGHPUT_MODE_0_750:
*throughput = 750;
break;
case DP_DSC_THROUGHPUT_MODE_0_800:
*throughput = 800;
break;
case DP_DSC_THROUGHPUT_MODE_0_850:
*throughput = 850;
break;
case DP_DSC_THROUGHPUT_MODE_0_900:
*throughput = 900;
break;
case DP_DSC_THROUGHPUT_MODE_0_950:
*throughput = 950;
break;
case DP_DSC_THROUGHPUT_MODE_0_1000:
*throughput = 1000;
break;
default: {
dm_error("%s: DPCD DSC throughput mode not recognized.\n", __func__);
return false;
}
}
return true;
}
static bool dsc_bpp_increment_div_from_dpcd(uint8_t bpp_increment_dpcd, uint32_t *bpp_increment_div)
{
// Mask bpp increment dpcd field to avoid reading other fields
bpp_increment_dpcd &= 0x7;
switch (bpp_increment_dpcd) {
case 0:
*bpp_increment_div = 16;
break;
case 1:
*bpp_increment_div = 8;
break;
case 2:
*bpp_increment_div = 4;
break;
case 3:
*bpp_increment_div = 2;
break;
case 4:
*bpp_increment_div = 1;
break;
default: {
dm_error("%s: DPCD DSC bits-per-pixel increment not recognized.\n", __func__);
return false;
}
}
return true;
}
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_branch_decoder_caps,
struct dsc_dec_dpcd_caps *dsc_sink_caps)
{
if (!dpcd_dsc_basic_data)
return false;
dsc_sink_caps->is_dsc_supported =
(dpcd_dsc_basic_data[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & DP_DSC_DECOMPRESSION_IS_SUPPORTED) != 0;
if (!dsc_sink_caps->is_dsc_supported)
return false;
dsc_sink_caps->dsc_version = dpcd_dsc_basic_data[DP_DSC_REV - DP_DSC_SUPPORT];
{
int buff_block_size;
int buff_size;
if (!dsc_buff_block_size_from_dpcd(dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT],
&buff_block_size))
return false;
buff_size = dpcd_dsc_basic_data[DP_DSC_RC_BUF_SIZE - DP_DSC_SUPPORT] + 1;
dsc_sink_caps->rc_buffer_size = buff_size * buff_block_size;
}
dsc_sink_caps->slice_caps1.raw = dpcd_dsc_basic_data[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
if (!dsc_line_buff_depth_from_dpcd(dpcd_dsc_basic_data[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT],
&dsc_sink_caps->lb_bit_depth))
return false;
dsc_sink_caps->is_block_pred_supported =
(dpcd_dsc_basic_data[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
DP_DSC_BLK_PREDICTION_IS_SUPPORTED) != 0;
dsc_sink_caps->edp_max_bits_per_pixel =
dpcd_dsc_basic_data[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
dpcd_dsc_basic_data[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] << 8;
dsc_sink_caps->color_formats.raw = dpcd_dsc_basic_data[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT];
dsc_sink_caps->color_depth.raw = dpcd_dsc_basic_data[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
{
int dpcd_throughput = dpcd_dsc_basic_data[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT];
if (!dsc_throughput_from_dpcd(dpcd_throughput & DP_DSC_THROUGHPUT_MODE_0_MASK,
&dsc_sink_caps->throughput_mode_0_mps))
return false;
dpcd_throughput = (dpcd_throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> DP_DSC_THROUGHPUT_MODE_1_SHIFT;
if (!dsc_throughput_from_dpcd(dpcd_throughput, &dsc_sink_caps->throughput_mode_1_mps))
return false;
}
dsc_sink_caps->max_slice_width = dpcd_dsc_basic_data[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * 320;
dsc_sink_caps->slice_caps2.raw = dpcd_dsc_basic_data[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT],
&dsc_sink_caps->bpp_increment_div))
return false;
if (dc->debug.dsc_bpp_increment_div) {
/* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values,
* we'll accept all and get it into range. This also makes the above check against 0 redundant,
* but that one stresses out the override will be only used if it's not 0.
*/
if (dc->debug.dsc_bpp_increment_div >= 1)
dsc_sink_caps->bpp_increment_div = 1;
if (dc->debug.dsc_bpp_increment_div >= 2)
dsc_sink_caps->bpp_increment_div = 2;
if (dc->debug.dsc_bpp_increment_div >= 4)
dsc_sink_caps->bpp_increment_div = 4;
if (dc->debug.dsc_bpp_increment_div >= 8)
dsc_sink_caps->bpp_increment_div = 8;
if (dc->debug.dsc_bpp_increment_div >= 16)
dsc_sink_caps->bpp_increment_div = 16;
}
/* Extended caps */
if (dpcd_dsc_branch_decoder_caps == NULL) { // branch decoder DPCD DSC data can be null for non branch device
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
dsc_sink_caps->branch_overall_throughput_1_mps = 0;
dsc_sink_caps->branch_max_line_width = 0;
return true;
}
dsc_sink_caps->branch_overall_throughput_0_mps =
dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
if (dsc_sink_caps->branch_overall_throughput_0_mps == 0)
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
else if (dsc_sink_caps->branch_overall_throughput_0_mps == 1)
dsc_sink_caps->branch_overall_throughput_0_mps = 680;
else {
dsc_sink_caps->branch_overall_throughput_0_mps *= 50;
dsc_sink_caps->branch_overall_throughput_0_mps += 600;
}
dsc_sink_caps->branch_overall_throughput_1_mps =
dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
if (dsc_sink_caps->branch_overall_throughput_1_mps == 0)
dsc_sink_caps->branch_overall_throughput_1_mps = 0;
else if (dsc_sink_caps->branch_overall_throughput_1_mps == 1)
dsc_sink_caps->branch_overall_throughput_1_mps = 680;
else {
dsc_sink_caps->branch_overall_throughput_1_mps *= 50;
dsc_sink_caps->branch_overall_throughput_1_mps += 600;
}
dsc_sink_caps->branch_max_line_width =
dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320;
ASSERT(dsc_sink_caps->branch_max_line_width == 0 || dsc_sink_caps->branch_max_line_width >= 5120);
dsc_sink_caps->is_dp = true;
return true;
}
/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and
* timing's pixel clock and uncompressed bandwidth.
* If DSC is not possible, leave '*range' untouched.
*/
bool dc_dsc_compute_bandwidth_range(
const struct display_stream_compressor *dsc,
uint32_t dsc_min_slice_height_override,
uint32_t min_bpp_x16,
uint32_t max_bpp_x16,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
struct dc_dsc_config_options options = {0};
options.dsc_min_slice_height_override = dsc_min_slice_height_override;
options.max_target_bpp_limit_override_x16 = max_bpp_x16;
options.slice_height_granularity = 1;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
&options, link_encoding, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
config.num_slices_h, &dsc_common_caps, timing, link_encoding, range);
return is_dsc_possible;
}
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
// This is a static HW query, so we can use any DSC
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
if (dsc) {
if (!dsc->ctx->dc->debug.disable_dsc)
dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
if (dsc->ctx->dc->debug.native422_support)
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
}
/* Returns 'false' if no intersection was found for at least one capability.
* It also implicitly validates some sink caps against invalid value of zero.
*/
static bool intersect_dsc_caps(
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dsc_enc_caps *dsc_enc_caps,
enum dc_pixel_encoding pixel_encoding,
struct dsc_enc_caps *dsc_common_caps)
{
int32_t max_slices;
int32_t total_sink_throughput;
memset(dsc_common_caps, 0, sizeof(struct dsc_enc_caps));
dsc_common_caps->dsc_version = min(dsc_sink_caps->dsc_version, dsc_enc_caps->dsc_version);
if (!dsc_common_caps->dsc_version)
return false;
dsc_common_caps->slice_caps.bits.NUM_SLICES_1 =
dsc_sink_caps->slice_caps1.bits.NUM_SLICES_1 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
dsc_common_caps->slice_caps.bits.NUM_SLICES_2 =
dsc_sink_caps->slice_caps1.bits.NUM_SLICES_2 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
dsc_common_caps->slice_caps.bits.NUM_SLICES_4 =
dsc_sink_caps->slice_caps1.bits.NUM_SLICES_4 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
dsc_common_caps->slice_caps.bits.NUM_SLICES_8 =
dsc_sink_caps->slice_caps1.bits.NUM_SLICES_8 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;
if (!dsc_common_caps->slice_caps.raw)
return false;
dsc_common_caps->lb_bit_depth = min(dsc_sink_caps->lb_bit_depth, dsc_enc_caps->lb_bit_depth);
if (!dsc_common_caps->lb_bit_depth)
return false;
dsc_common_caps->is_block_pred_supported =
dsc_sink_caps->is_block_pred_supported && dsc_enc_caps->is_block_pred_supported;
dsc_common_caps->color_formats.raw = dsc_sink_caps->color_formats.raw & dsc_enc_caps->color_formats.raw;
if (!dsc_common_caps->color_formats.raw)
return false;
dsc_common_caps->color_depth.raw = dsc_sink_caps->color_depth.raw & dsc_enc_caps->color_depth.raw;
if (!dsc_common_caps->color_depth.raw)
return false;
max_slices = 0;
if (dsc_common_caps->slice_caps.bits.NUM_SLICES_1)
max_slices = 1;
if (dsc_common_caps->slice_caps.bits.NUM_SLICES_2)
max_slices = 2;
if (dsc_common_caps->slice_caps.bits.NUM_SLICES_4)
max_slices = 4;
total_sink_throughput = max_slices * dsc_sink_caps->throughput_mode_0_mps;
if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420)
total_sink_throughput = max_slices * dsc_sink_caps->throughput_mode_1_mps;
dsc_common_caps->max_total_throughput_mps = min(total_sink_throughput, dsc_enc_caps->max_total_throughput_mps);
dsc_common_caps->max_slice_width = min(dsc_sink_caps->max_slice_width, dsc_enc_caps->max_slice_width);
if (!dsc_common_caps->max_slice_width)
return false;
dsc_common_caps->bpp_increment_div = min(dsc_sink_caps->bpp_increment_div, dsc_enc_caps->bpp_increment_div);
// TODO DSC: Remove this workaround for N422 and 420 once it's fixed, or move it to get_dsc_encoder_caps()
if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420)
dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8);
dsc_common_caps->edp_sink_max_bits_per_pixel = dsc_sink_caps->edp_max_bits_per_pixel;
dsc_common_caps->is_dp = dsc_sink_caps->is_dp;
return true;
}
static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
{
return (value + 9) / 10;
}
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
const uint32_t num_slices_h,
const uint32_t bpp_increment_div,
const bool is_dp)
{
uint32_t overhead_in_kbps;
struct fixed31_32 effective_bandwidth_in_kbps;
struct fixed31_32 bpp_x16;
overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps(
timing, num_slices_h, is_dp);
effective_bandwidth_in_kbps = dc_fixpt_from_int(bandwidth_in_kbps);
effective_bandwidth_in_kbps = dc_fixpt_sub_int(effective_bandwidth_in_kbps,
overhead_in_kbps);
bpp_x16 = dc_fixpt_mul_int(effective_bandwidth_in_kbps, 10);
bpp_x16 = dc_fixpt_div_int(bpp_x16, timing->pix_clk_100hz);
bpp_x16 = dc_fixpt_from_int(dc_fixpt_floor(dc_fixpt_mul_int(bpp_x16, bpp_increment_div)));
bpp_x16 = dc_fixpt_div_int(bpp_x16, bpp_increment_div);
bpp_x16 = dc_fixpt_mul_int(bpp_x16, 16);
return dc_fixpt_floor(bpp_x16);
}
/* Decide DSC bandwidth range based on signal, timing, specs specific and input min and max
* requirements.
* The range output includes decided min/max target bpp, the respective bandwidth requirements
* and native timing bandwidth requirement when DSC is not used.
*/
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
const struct dsc_enc_caps *dsc_caps,
const struct dc_crtc_timing *timing,
const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range)
{
uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
memset(range, 0, sizeof(*range));
/* apply signal, timing, specs and explicitly specified DSC range requirements */
if (preferred_bpp_x16) {
if (preferred_bpp_x16 <= max_bpp_x16 &&
preferred_bpp_x16 >= min_bpp_x16) {
range->max_target_bpp_x16 = preferred_bpp_x16;
range->min_target_bpp_x16 = preferred_bpp_x16;
}
}
/* TODO - make this value generic to all signal types */
else if (dsc_caps->edp_sink_max_bits_per_pixel) {
/* apply max bpp limitation from edp sink */
range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel,
max_bpp_x16);
range->min_target_bpp_x16 = min_bpp_x16;
}
else {
range->max_target_bpp_x16 = max_bpp_x16;
range->min_target_bpp_x16 = min_bpp_x16;
}
/* populate output structure */
if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) {
/* native stream bandwidth */
range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding);
/* max dsc target bpp */
range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
range->max_target_bpp_x16, num_slices_h, dsc_caps->is_dp);
/* min dsc target bpp */
range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
range->min_target_bpp_x16, num_slices_h, dsc_caps->is_dp);
}
return range->max_kbps >= range->min_kbps && range->min_kbps > 0;
}
/* Decides if DSC should be used and calculates target bpp if it should, applying DSC policy.
*
* Returns:
* - 'true' if target bpp is decided
* - 'false' if target bpp cannot be decided (e.g. cannot fit even with min DSC bpp),
*/
static bool decide_dsc_target_bpp_x16(
const struct dc_dsc_policy *policy,
const struct dsc_enc_caps *dsc_common_caps,
const int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const int num_slices_h,
const enum dc_link_encoding_format link_encoding,
int *target_bpp_x16)
{
struct dc_dsc_bw_range range;
*target_bpp_x16 = 0;
if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
num_slices_h, dsc_common_caps, timing, link_encoding, &range)) {
if (target_bandwidth_kbps >= range.stream_kbps) {
if (policy->enable_dsc_when_not_needed)
/* enable max bpp even dsc is not needed */
*target_bpp_x16 = range.max_target_bpp_x16;
} else if (target_bandwidth_kbps >= range.max_kbps) {
/* use max target bpp allowed */
*target_bpp_x16 = range.max_target_bpp_x16;
} else if (target_bandwidth_kbps >= range.min_kbps) {
/* use target bpp that can take entire target bandwidth */
*target_bpp_x16 = compute_bpp_x16_from_target_bandwidth(
target_bandwidth_kbps, timing, num_slices_h,
dsc_common_caps->bpp_increment_div,
dsc_common_caps->is_dp);
}
}
return *target_bpp_x16 != 0;
}
#define MIN_AVAILABLE_SLICES_SIZE 6
static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *available_slices)
{
int idx = 0;
if (slice_caps.bits.NUM_SLICES_1)
available_slices[idx++] = 1;
if (slice_caps.bits.NUM_SLICES_2)
available_slices[idx++] = 2;
if (slice_caps.bits.NUM_SLICES_4)
available_slices[idx++] = 4;
if (slice_caps.bits.NUM_SLICES_8)
available_slices[idx++] = 8;
return idx;
}
static int get_max_dsc_slices(union dsc_enc_slice_caps slice_caps)
{
int max_slices = 0;
int available_slices[MIN_AVAILABLE_SLICES_SIZE];
int end_idx = get_available_dsc_slices(slice_caps, &available_slices[0]);
if (end_idx > 0)
max_slices = available_slices[end_idx - 1];
return max_slices;
}
// Increment slice number in available slice numbers stops if possible, or just increment if not
static int inc_num_slices(union dsc_enc_slice_caps slice_caps, int num_slices)
{
// Get next bigger num slices available in common caps
int available_slices[MIN_AVAILABLE_SLICES_SIZE];
int end_idx;
int i;
int new_num_slices = num_slices;
end_idx = get_available_dsc_slices(slice_caps, &available_slices[0]);
if (end_idx == 0) {
// No available slices found
new_num_slices++;
return new_num_slices;
}
// Numbers of slices found - get the next bigger number
for (i = 0; i < end_idx; i++) {
if (new_num_slices < available_slices[i]) {
new_num_slices = available_slices[i];
break;
}
}
if (new_num_slices == num_slices) // No bigger number of slices found
new_num_slices++;
return new_num_slices;
}
// Decrement slice number in available slice numbers stops if possible, or just decrement if not. Stop at zero.
static int dec_num_slices(union dsc_enc_slice_caps slice_caps, int num_slices)
{
// Get next bigger num slices available in common caps
int available_slices[MIN_AVAILABLE_SLICES_SIZE];
int end_idx;
int i;
int new_num_slices = num_slices;
end_idx = get_available_dsc_slices(slice_caps, &available_slices[0]);
if (end_idx == 0 && new_num_slices > 0) {
// No numbers of slices found
new_num_slices++;
return new_num_slices;
}
// Numbers of slices found - get the next smaller number
for (i = end_idx - 1; i >= 0; i--) {
if (new_num_slices > available_slices[i]) {
new_num_slices = available_slices[i];
break;
}
}
if (new_num_slices == num_slices) {
// No smaller number of slices found
new_num_slices--;
if (new_num_slices < 0)
new_num_slices = 0;
}
return new_num_slices;
}
// Choose next bigger number of slices if the requested number of slices is not available
static int fit_num_slices_up(union dsc_enc_slice_caps slice_caps, int num_slices)
{
// Get next bigger num slices available in common caps
int available_slices[MIN_AVAILABLE_SLICES_SIZE];
int end_idx;
int i;
int new_num_slices = num_slices;
end_idx = get_available_dsc_slices(slice_caps, &available_slices[0]);
if (end_idx == 0) {
// No available slices found
new_num_slices++;
return new_num_slices;
}
// Numbers of slices found - get the equal or next bigger number
for (i = 0; i < end_idx; i++) {
if (new_num_slices <= available_slices[i]) {
new_num_slices = available_slices[i];
break;
}
}
return new_num_slices;
}
/* Attempts to set DSC configuration for the stream, applying DSC policy.
* Returns 'true' if successful or 'false' if not.
*
* Parameters:
*
* dsc_sink_caps - DSC sink decoder capabilities (from DPCD)
*
* dsc_enc_caps - DSC encoder capabilities
*
* target_bandwidth_kbps - Target bandwidth to fit the stream into.
* If 0, do not calculate target bpp.
*
* timing - The stream timing to fit into 'target_bandwidth_kbps' or apply
* maximum compression to, if 'target_badwidth == 0'
*
* dsc_cfg - DSC configuration to use if it was possible to come up with
* one for the given inputs.
* The target bitrate after DSC can be calculated by multiplying
* dsc_cfg.bits_per_pixel (in U6.4 format) by pixel rate, e.g.
*
* dsc_stream_bitrate_kbps = (int)ceil(timing->pix_clk_khz * dsc_cfg.bits_per_pixel / 16.0);
*/
static bool setup_dsc_config(
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
int max_slices_h;
int min_slices_h;
int num_slices_h;
int pic_width;
int slice_width;
int target_bpp;
int sink_per_slice_throughput_mps;
int branch_max_throughput_mps = 0;
bool is_dsc_possible = false;
int pic_height;
int slice_height;
struct dc_dsc_policy policy;
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy);
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
if (!dsc_sink_caps->is_dsc_supported)
goto done;
if (dsc_sink_caps->branch_max_line_width && dsc_sink_caps->branch_max_line_width < pic_width)
goto done;
// Intersect decoder with encoder DSC caps and validate DSC settings
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps);
if (!is_dsc_possible)
goto done;
sink_per_slice_throughput_mps = 0;
// Validate available DSC settings against the mode timing
// Validate color format (and pick up the throughput values)
dsc_cfg->ycbcr422_simple = false;
switch (timing->pixel_encoding) {
case PIXEL_ENCODING_RGB:
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.RGB;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps;
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_0_mps;
break;
case PIXEL_ENCODING_YCBCR444:
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_444;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps;
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_0_mps;
break;
case PIXEL_ENCODING_YCBCR422:
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
if (!is_dsc_possible) {
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_SIMPLE_422;
dsc_cfg->ycbcr422_simple = is_dsc_possible;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps;
}
break;
case PIXEL_ENCODING_YCBCR420:
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_420;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
break;
default:
is_dsc_possible = false;
}
// Validate branch's maximum throughput
if (branch_max_throughput_mps && dsc_div_by_10_round_up(timing->pix_clk_100hz) > branch_max_throughput_mps * 1000)
is_dsc_possible = false;
if (!is_dsc_possible)
goto done;
// Color depth
switch (timing->display_color_depth) {
case COLOR_DEPTH_888:
is_dsc_possible = (bool)dsc_common_caps.color_depth.bits.COLOR_DEPTH_8_BPC;
break;
case COLOR_DEPTH_101010:
is_dsc_possible = (bool)dsc_common_caps.color_depth.bits.COLOR_DEPTH_10_BPC;
break;
case COLOR_DEPTH_121212:
is_dsc_possible = (bool)dsc_common_caps.color_depth.bits.COLOR_DEPTH_12_BPC;
break;
default:
is_dsc_possible = false;
}
if (!is_dsc_possible)
goto done;
// Slice width (i.e. number of slices per line)
max_slices_h = get_max_dsc_slices(dsc_common_caps.slice_caps);
while (max_slices_h > 0) {
if (pic_width % max_slices_h == 0)
break;
max_slices_h = dec_num_slices(dsc_common_caps.slice_caps, max_slices_h);
}
is_dsc_possible = (dsc_common_caps.max_slice_width > 0);
if (!is_dsc_possible)
goto done;
min_slices_h = pic_width / dsc_common_caps.max_slice_width;
if (pic_width % dsc_common_caps.max_slice_width)
min_slices_h++;
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
while (min_slices_h <= max_slices_h) {
int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h;
if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000)
break;
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
is_dsc_possible = (min_slices_h <= max_slices_h);
if (pic_width % min_slices_h != 0)
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
if (min_slices_h == 0 && max_slices_h == 0)
is_dsc_possible = false;
if (!is_dsc_possible)
goto done;
if (policy.use_min_slices_h) {
if (min_slices_h > 0)
num_slices_h = min_slices_h;
else if (max_slices_h > 0) { // Fall back to max slices if min slices is not working out
if (policy.max_slices_h)
num_slices_h = min(policy.max_slices_h, max_slices_h);
else
num_slices_h = max_slices_h;
} else
is_dsc_possible = false;
} else {
if (max_slices_h > 0) {
if (policy.max_slices_h)
num_slices_h = min(policy.max_slices_h, max_slices_h);
else
num_slices_h = max_slices_h;
} else if (min_slices_h > 0) // Fall back to min slices if max slices is not possible
num_slices_h = min_slices_h;
else
is_dsc_possible = false;
}
// When we force 2:1 ODM, we can't have 1 slice to divide amongst 2 separate DSC instances
// need to enforce at minimum 2 horizontal slices
if (options->dsc_force_odm_hslice_override) {
num_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, 2);
if (num_slices_h == 0)
is_dsc_possible = false;
}
if (!is_dsc_possible)
goto done;
dsc_cfg->num_slices_h = num_slices_h;
slice_width = pic_width / num_slices_h;
is_dsc_possible = slice_width <= dsc_common_caps.max_slice_width;
if (!is_dsc_possible)
goto done;
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
if (options->dsc_min_slice_height_override == 0)
slice_height = min(policy.min_slice_height, pic_height);
else
slice_height = min((int)(options->dsc_min_slice_height_override), pic_height);
while (slice_height < pic_height && (pic_height % slice_height != 0 ||
slice_height % options->slice_height_granularity != 0 ||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
slice_height++;
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) // For the case when pic_height < dsc_policy.min_sice_height
is_dsc_possible = (slice_height % 2 == 0);
if (!is_dsc_possible)
goto done;
dsc_cfg->num_slices_v = pic_height/slice_height;
if (target_bandwidth_kbps > 0) {
is_dsc_possible = decide_dsc_target_bpp_x16(
&policy,
&dsc_common_caps,
target_bandwidth_kbps,
timing,
num_slices_h,
link_encoding,
&target_bpp);
dsc_cfg->bits_per_pixel = target_bpp;
}
if (!is_dsc_possible)
goto done;
// Final decission: can we do DSC or not?
if (is_dsc_possible) {
// Fill out the rest of DSC settings
dsc_cfg->block_pred_enable = dsc_common_caps.is_block_pred_supported;
dsc_cfg->linebuf_depth = dsc_common_caps.lb_bit_depth;
dsc_cfg->version_minor = (dsc_common_caps.dsc_version & 0xf0) >> 4;
dsc_cfg->is_dp = dsc_sink_caps->is_dp;
}
done:
if (!is_dsc_possible)
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
return is_dsc_possible;
}
bool dc_dsc_compute_config(
const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg)
{
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
timing, options, link_encoding, dsc_cfg);
return is_dsc_possible;
}
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp)
{
uint32_t overhead_in_kbps;
struct fixed31_32 bpp;
struct fixed31_32 actual_bandwidth_in_kbps;
overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps(
timing, num_slices_h, is_dp);
bpp = dc_fixpt_from_fraction(bpp_x16, 16);
actual_bandwidth_in_kbps = dc_fixpt_from_fraction(timing->pix_clk_100hz, 10);
actual_bandwidth_in_kbps = dc_fixpt_mul(actual_bandwidth_in_kbps, bpp);
actual_bandwidth_in_kbps = dc_fixpt_add_int(actual_bandwidth_in_kbps, overhead_in_kbps);
return dc_fixpt_ceil(actual_bandwidth_in_kbps);
}
uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
const struct dc_crtc_timing *timing,
const int num_slices_h,
const bool is_dp)
{
struct fixed31_32 max_dsc_overhead;
struct fixed31_32 refresh_rate;
if (dsc_policy_disable_dsc_stream_overhead || !is_dp)
return 0;
/* use target bpp that can take entire target bandwidth */
refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz);
refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total);
refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total);
refresh_rate = dc_fixpt_mul_int(refresh_rate, 100);
max_dsc_overhead = dc_fixpt_from_int(num_slices_h);
max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total);
max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256);
max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000);
max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate);
return dc_fixpt_ceil(max_dsc_overhead);
}
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy)
{
uint32_t bpc = 0;
policy->min_target_bpp = 0;
policy->max_target_bpp = 0;
/* DSC Policy: Use minimum number of slices that fits the pixel clock */
policy->use_min_slices_h = true;
/* DSC Policy: Use max available slices
* (in our case 4 for or 8, depending on the mode)
*/
policy->max_slices_h = 0;
/* DSC Policy: Use slice height recommended
* by VESA DSC Spreadsheet user guide
*/
policy->min_slice_height = 108;
/* DSC Policy: follow DP specs with an internal upper limit to 16 bpp
* for better interoperability
*/
switch (timing->display_color_depth) {
case COLOR_DEPTH_888:
bpc = 8;
break;
case COLOR_DEPTH_101010:
bpc = 10;
break;
case COLOR_DEPTH_121212:
bpc = 12;
break;
default:
return;
}
switch (timing->pixel_encoding) {
case PIXEL_ENCODING_RGB:
case PIXEL_ENCODING_YCBCR444:
case PIXEL_ENCODING_YCBCR422: /* assume no YCbCr422 native support */
/* DP specs limits to 8 */
policy->min_target_bpp = 8;
/* DP specs limits to 3 x bpc */
policy->max_target_bpp = 3 * bpc;
break;
case PIXEL_ENCODING_YCBCR420:
/* DP specs limits to 6 */
policy->min_target_bpp = 6;
/* DP specs limits to 1.5 x bpc assume bpc is an even number */
policy->max_target_bpp = bpc * 3 / 2;
break;
default:
return;
}
/* internal upper limit, default 16 bpp */
if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
/* apply override */
if (max_target_bpp_limit_override_x16 && policy->max_target_bpp > max_target_bpp_limit_override_x16 / 16)
policy->max_target_bpp = max_target_bpp_limit_override_x16 / 16;
/* enable DSC when not needed, default false */
if (dsc_policy_enable_dsc_when_not_needed)
policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed;
else
policy->enable_dsc_when_not_needed = false;
}
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit)
{
dsc_policy_max_target_bpp_limit = limit;
}
void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable)
{
dsc_policy_enable_dsc_when_not_needed = enable;
}
void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
{
dsc_policy_disable_dsc_stream_overhead = disable;
}
void dc_set_disable_128b_132b_stream_overhead(bool disable)
{
disable_128b_132b_stream_overhead = disable;
}
void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
{
options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine;
options->max_target_bpp_limit_override_x16 = 0;
options->slice_height_granularity = 1;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c |
/*
* Copyright 2012-17 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <drm/display/drm_dsc_helper.h>
#include "dscc_types.h"
#include "rc_calc.h"
static void copy_pps_fields(struct drm_dsc_config *to, const struct drm_dsc_config *from)
{
to->line_buf_depth = from->line_buf_depth;
to->bits_per_component = from->bits_per_component;
to->convert_rgb = from->convert_rgb;
to->slice_width = from->slice_width;
to->slice_height = from->slice_height;
to->simple_422 = from->simple_422;
to->native_422 = from->native_422;
to->native_420 = from->native_420;
to->pic_width = from->pic_width;
to->pic_height = from->pic_height;
to->rc_tgt_offset_high = from->rc_tgt_offset_high;
to->rc_tgt_offset_low = from->rc_tgt_offset_low;
to->bits_per_pixel = from->bits_per_pixel;
to->rc_edge_factor = from->rc_edge_factor;
to->rc_quant_incr_limit1 = from->rc_quant_incr_limit1;
to->rc_quant_incr_limit0 = from->rc_quant_incr_limit0;
to->initial_xmit_delay = from->initial_xmit_delay;
to->initial_dec_delay = from->initial_dec_delay;
to->block_pred_enable = from->block_pred_enable;
to->first_line_bpg_offset = from->first_line_bpg_offset;
to->second_line_bpg_offset = from->second_line_bpg_offset;
to->initial_offset = from->initial_offset;
memcpy(&to->rc_buf_thresh, &from->rc_buf_thresh, sizeof(from->rc_buf_thresh));
memcpy(&to->rc_range_params, &from->rc_range_params, sizeof(from->rc_range_params));
to->rc_model_size = from->rc_model_size;
to->flatness_min_qp = from->flatness_min_qp;
to->flatness_max_qp = from->flatness_max_qp;
to->initial_scale_value = from->initial_scale_value;
to->scale_decrement_interval = from->scale_decrement_interval;
to->scale_increment_interval = from->scale_increment_interval;
to->nfl_bpg_offset = from->nfl_bpg_offset;
to->nsl_bpg_offset = from->nsl_bpg_offset;
to->slice_bpg_offset = from->slice_bpg_offset;
to->final_offset = from->final_offset;
to->vbr_enable = from->vbr_enable;
to->slice_chunk_size = from->slice_chunk_size;
to->second_line_offset_adj = from->second_line_offset_adj;
to->dsc_version_minor = from->dsc_version_minor;
}
static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_params *rc)
{
int i;
dsc_cfg->rc_quant_incr_limit0 = rc->rc_quant_incr_limit0;
dsc_cfg->rc_quant_incr_limit1 = rc->rc_quant_incr_limit1;
dsc_cfg->initial_offset = rc->initial_fullness_offset;
dsc_cfg->initial_xmit_delay = rc->initial_xmit_delay;
dsc_cfg->first_line_bpg_offset = rc->first_line_bpg_offset;
dsc_cfg->second_line_bpg_offset = rc->second_line_bpg_offset;
dsc_cfg->flatness_min_qp = rc->flatness_min_qp;
dsc_cfg->flatness_max_qp = rc->flatness_max_qp;
for (i = 0; i < QP_SET_SIZE; ++i) {
dsc_cfg->rc_range_params[i].range_min_qp = rc->qp_min[i];
dsc_cfg->rc_range_params[i].range_max_qp = rc->qp_max[i];
/* Truncate 8-bit signed value to 6-bit signed value */
dsc_cfg->rc_range_params[i].range_bpg_offset = 0x3f & rc->ofs[i];
}
dsc_cfg->rc_model_size = rc->rc_model_size;
dsc_cfg->rc_edge_factor = rc->rc_edge_factor;
dsc_cfg->rc_tgt_offset_high = rc->rc_tgt_offset_hi;
dsc_cfg->rc_tgt_offset_low = rc->rc_tgt_offset_lo;
for (i = 0; i < QP_SET_SIZE - 1; ++i)
dsc_cfg->rc_buf_thresh[i] = rc->rc_buf_thresh[i];
}
int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
const struct rc_params *rc,
struct dsc_parameters *dsc_params)
{
int ret;
struct drm_dsc_config dsc_cfg;
unsigned long long tmp;
dsc_params->pps = *pps;
dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset);
copy_pps_fields(&dsc_cfg, &dsc_params->pps);
copy_rc_to_cfg(&dsc_cfg, rc);
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
ret = drm_dsc_compute_rc_parameters(&dsc_cfg);
tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1);
do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP
dsc_params->bytes_per_pixel = (uint32_t)tmp;
copy_pps_fields(&dsc_params->pps, &dsc_cfg);
dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits;
return ret;
}
| linux-master | drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "rc_calc.h"
/**
* calc_rc_params - reads the user's cmdline mode
* @rc: DC internal DSC parameters
* @pps: DRM struct with all required DSC values
*
* This function expects a drm_dsc_config data struct with all the required DSC
* values previously filled out by our driver and based on this information it
* computes some of the DSC values.
*
* @note This calculation requires float point operation, most of it executes
* under kernel_fpu_{begin,end}.
*/
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
{
#if defined(CONFIG_DRM_AMD_DC_FP)
enum colour_mode mode;
enum bits_per_comp bpc;
bool is_navite_422_or_420;
u16 drm_bpp = pps->bits_per_pixel;
int slice_width = pps->slice_width;
int slice_height = pps->slice_height;
mode = pps->convert_rgb ? CM_RGB : (pps->simple_422 ? CM_444 :
(pps->native_422 ? CM_422 :
pps->native_420 ? CM_420 : CM_444));
bpc = (pps->bits_per_component == 8) ? BPC_8 : (pps->bits_per_component == 10)
? BPC_10 : BPC_12;
is_navite_422_or_420 = pps->native_422 || pps->native_420;
DC_FP_START();
_do_calc_rc_params(rc, mode, bpc, drm_bpp, is_navite_422_or_420,
slice_width, slice_height,
pps->dsc_version_minor);
DC_FP_END();
#endif
}
| linux-master | drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c |
/* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "power_helpers.h"
#include "dc/inc/hw/dmcu.h"
#include "dc/inc/hw/abm.h"
#include "dc.h"
#include "core_types.h"
#include "dmub_cmd.h"
#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
#define bswap16_based_on_endian(big_endian, value) \
(big_endian) ? cpu_to_be16(value) : cpu_to_le16(value)
/* Possible Min Reduction config from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
* 100 98.0 94.1 94.1 85.1 80.3 75.3 69.4 60.0 57.6 50.2 49.8 40.0 %
*/
static const unsigned char min_reduction_table[13] = {
0xff, 0xfa, 0xf0, 0xf0, 0xd9, 0xcd, 0xc0, 0xb1, 0x99, 0x93, 0x80, 0x82, 0x66};
/* Possible Max Reduction configs from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
* 96.1 89.8 85.1 80.3 69.4 64.7 64.7 50.2 39.6 30.2 30.2 30.2 19.6 %
*/
static const unsigned char max_reduction_table[13] = {
0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
/* Possible ABM 2.2 Min Reduction configs from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
* 100 100 100 100 100 100 100 100 100 92.2 83.1 75.3 75.3 %
*/
static const unsigned char min_reduction_table_v_2_2[13] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd4, 0xc0, 0xc0};
/* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
* 96.1 89.8 74.9 69.4 64.7 52.2 48.6 39.6 30.2 25.1 19.6 12.5 12.5 %
*/
static const unsigned char max_reduction_table_v_2_2[13] = {
0xf5, 0xe5, 0xbf, 0xb1, 0xa5, 0x85, 0x7c, 0x65, 0x4d, 0x40, 0x32, 0x20, 0x20};
/* Predefined ABM configuration sets. We may have different configuration sets
* in order to satisfy different power/quality requirements.
*/
static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_level] = {
/* ABM Level 1, ABM Level 2, ABM Level 3, ABM Level 4 */
{ 2, 5, 7, 8 }, /* Default - Medium aggressiveness */
{ 2, 5, 8, 11 }, /* Alt #1 - Increased aggressiveness */
{ 0, 2, 4, 8 }, /* Alt #2 - Minimal aggressiveness */
{ 3, 6, 10, 12 }, /* Alt #3 - Super aggressiveness */
};
struct abm_parameters {
unsigned char min_reduction;
unsigned char max_reduction;
unsigned char bright_pos_gain;
unsigned char dark_pos_gain;
unsigned char brightness_gain;
unsigned char contrast_factor;
unsigned char deviation_gain;
unsigned char min_knee;
unsigned char max_knee;
unsigned short blRampReduction;
unsigned short blRampStart;
};
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart
{0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xf777, 0xcccc},
{0xde, 0x85, 0x20, 0x00, 0xe0, 0x90, 0xa8, 0x40, 0xc8, 0xf777, 0xcccc},
{0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xeeee, 0x9999},
{0x82, 0x40, 0x20, 0x00, 0x00, 0xb8, 0xb3, 0x70, 0x70, 0xe333, 0xb333},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart
{0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
{0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
{0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
{0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
};
static const struct abm_parameters abm_settings_config2[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart
{0xf0, 0xbf, 0x20, 0x00, 0x88, 0x99, 0xb3, 0x40, 0xe0, 0x0000, 0xcccc},
{0xd8, 0x85, 0x20, 0x00, 0x70, 0x90, 0xa8, 0x40, 0xc8, 0x0700, 0xb333},
{0xb8, 0x58, 0x20, 0x00, 0x64, 0x88, 0x78, 0x70, 0xa0, 0x7000, 0x9999},
{0x82, 0x40, 0x20, 0x00, 0x00, 0xb8, 0xb3, 0x70, 0x70, 0xc333, 0xb333},
};
static const struct abm_parameters * const abm_settings[] = {
abm_settings_config0,
abm_settings_config1,
abm_settings_config2,
};
static const struct dm_bl_data_point custom_backlight_curve0[] = {
{2, 14}, {4, 16}, {6, 18}, {8, 21}, {10, 23}, {12, 26}, {14, 29}, {16, 32}, {18, 35},
{20, 38}, {22, 41}, {24, 44}, {26, 48}, {28, 52}, {30, 55}, {32, 59}, {34, 62},
{36, 67}, {38, 71}, {40, 75}, {42, 80}, {44, 84}, {46, 88}, {48, 93}, {50, 98},
{52, 103}, {54, 108}, {56, 113}, {58, 118}, {60, 123}, {62, 129}, {64, 135}, {66, 140},
{68, 146}, {70, 152}, {72, 158}, {74, 164}, {76, 171}, {78, 177}, {80, 183}, {82, 190},
{84, 197}, {86, 204}, {88, 211}, {90, 218}, {92, 225}, {94, 232}, {96, 240}, {98, 247}};
struct custom_backlight_profile {
uint8_t ac_level_percentage;
uint8_t dc_level_percentage;
uint8_t min_input_signal;
uint8_t max_input_signal;
uint8_t num_data_points;
const struct dm_bl_data_point *data_points;
};
static const struct custom_backlight_profile custom_backlight_profiles[] = {
{100, 32, 12, 255, ARRAY_SIZE(custom_backlight_curve0), custom_backlight_curve0},
};
#define NUM_AMBI_LEVEL 5
#define NUM_AGGR_LEVEL 4
#define NUM_POWER_FN_SEGS 8
#define NUM_BL_CURVE_SEGS 16
#define IRAM_SIZE 256
#define IRAM_RESERVE_AREA_START_V2 0xF0 // reserve 0xF0~0xF6 are write by DMCU only
#define IRAM_RESERVE_AREA_END_V2 0xF6 // reserve 0xF0~0xF6 are write by DMCU only
#define IRAM_RESERVE_AREA_START_V2_2 0xF0 // reserve 0xF0~0xFF are write by DMCU only
#define IRAM_RESERVE_AREA_END_V2_2 0xFF // reserve 0xF0~0xFF are write by DMCU only
#pragma pack(push, 1)
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
uint16_t min_abm_backlight; /* 0x00 U16 */
/* parameters for ABM2.0 algorithm */
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
uint8_t bright_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x52 U2.6 */
uint8_t dark_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x66 U2.6 */
uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x7a U0.8 */
uint8_t deviation_gain; /* 0x7f U0.8 */
/* parameters for crgb conversion */
uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
/* parameters for custom curve */
/* thresholds for brightness --> backlight */
uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
/* offsets for brightness --> backlight */
uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
/* For reading PSR State directly from IRAM */
uint8_t psr_state; /* 0xf0 */
uint8_t dmcu_mcp_interface_version; /* 0xf1 */
uint8_t dmcu_abm_feature_version; /* 0xf2 */
uint8_t dmcu_psr_feature_version; /* 0xf3 */
uint16_t dmcu_version; /* 0xf4 */
uint8_t dmcu_state; /* 0xf6 */
uint16_t blRampReduction; /* 0xf7 */
uint16_t blRampStart; /* 0xf9 */
uint8_t dummy5; /* 0xfb */
uint8_t dummy6; /* 0xfc */
uint8_t dummy7; /* 0xfd */
uint8_t dummy8; /* 0xfe */
uint8_t dummy9; /* 0xff */
};
struct iram_table_v_2_2 {
/* flags */
uint16_t flags; /* 0x00 U16 */
/* parameters for ABM2.2 algorithm */
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
uint16_t min_abm_backlight; /* 0x6b U16 */
uint8_t pad[19]; /* 0x6d U0.8 */
/* parameters for crgb conversion */
uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
/* parameters for custom curve */
/* thresholds for brightness --> backlight */
uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
/* offsets for brightness --> backlight */
uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
/* For reading PSR State directly from IRAM */
uint8_t psr_state; /* 0xf0 */
uint8_t dmcu_mcp_interface_version; /* 0xf1 */
uint8_t dmcu_abm_feature_version; /* 0xf2 */
uint8_t dmcu_psr_feature_version; /* 0xf3 */
uint16_t dmcu_version; /* 0xf4 */
uint8_t dmcu_state; /* 0xf6 */
uint8_t dummy1; /* 0xf7 */
uint8_t dummy2; /* 0xf8 */
uint8_t dummy3; /* 0xf9 */
uint8_t dummy4; /* 0xfa */
uint8_t dummy5; /* 0xfb */
uint8_t dummy6; /* 0xfc */
uint8_t dummy7; /* 0xfd */
uint8_t dummy8; /* 0xfe */
uint8_t dummy9; /* 0xff */
};
#pragma pack(pop)
static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
struct iram_table_v_2 *table)
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
unsigned int lut_index;
table->backlight_thresholds[0] = 0;
table->backlight_offsets[0] = params.backlight_lut_array[0];
table->backlight_thresholds[num_entries-1] = 0xFFFF;
table->backlight_offsets[num_entries-1] =
params.backlight_lut_array[params.backlight_lut_array_size - 1];
/* Setup all brightness levels between 0% and 100% exclusive
* Fills brightness-to-backlight transform table. Backlight custom curve
* describes transform from brightness to backlight. It will be defined
* as set of thresholds and set of offsets, together, implying
* extrapolation of custom curve into 16 uniformly spanned linear
* segments. Each threshold/offset represented by 16 bit entry in
* format U4.10.
*/
for (i = 1; i+1 < num_entries; i++) {
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
table->backlight_thresholds[i] =
cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
table->backlight_offsets[i] =
cpu_to_be16(params.backlight_lut_array[lut_index]);
}
}
static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params,
struct iram_table_v_2_2 *table, bool big_endian)
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
unsigned int lut_index;
table->backlight_thresholds[0] = 0;
table->backlight_offsets[0] = params.backlight_lut_array[0];
table->backlight_thresholds[num_entries-1] = 0xFFFF;
table->backlight_offsets[num_entries-1] =
params.backlight_lut_array[params.backlight_lut_array_size - 1];
/* Setup all brightness levels between 0% and 100% exclusive
* Fills brightness-to-backlight transform table. Backlight custom curve
* describes transform from brightness to backlight. It will be defined
* as set of thresholds and set of offsets, together, implying
* extrapolation of custom curve into 16 uniformly spanned linear
* segments. Each threshold/offset represented by 16 bit entry in
* format U4.10.
*/
for (i = 1; i+1 < num_entries; i++) {
lut_index = DIV_ROUNDUP((i * params.backlight_lut_array_size), num_entries);
ASSERT(lut_index < params.backlight_lut_array_size);
table->backlight_thresholds[i] = (big_endian) ?
cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) :
cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries));
table->backlight_offsets[i] = (big_endian) ?
cpu_to_be16(params.backlight_lut_array[lut_index]) :
cpu_to_le16(params.backlight_lut_array[lut_index]);
}
}
static void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
{
unsigned int set = params.set;
ram_table->min_abm_backlight =
cpu_to_be16(params.min_abm_backlight);
ram_table->deviation_gain = 0xb3;
ram_table->blRampReduction =
cpu_to_be16(params.backlight_ramping_reduction);
ram_table->blRampStart =
cpu_to_be16(params.backlight_ramping_start);
ram_table->min_reduction[0][0] = min_reduction_table[abm_config[set][0]];
ram_table->min_reduction[1][0] = min_reduction_table[abm_config[set][0]];
ram_table->min_reduction[2][0] = min_reduction_table[abm_config[set][0]];
ram_table->min_reduction[3][0] = min_reduction_table[abm_config[set][0]];
ram_table->min_reduction[4][0] = min_reduction_table[abm_config[set][0]];
ram_table->max_reduction[0][0] = max_reduction_table[abm_config[set][0]];
ram_table->max_reduction[1][0] = max_reduction_table[abm_config[set][0]];
ram_table->max_reduction[2][0] = max_reduction_table[abm_config[set][0]];
ram_table->max_reduction[3][0] = max_reduction_table[abm_config[set][0]];
ram_table->max_reduction[4][0] = max_reduction_table[abm_config[set][0]];
ram_table->min_reduction[0][1] = min_reduction_table[abm_config[set][1]];
ram_table->min_reduction[1][1] = min_reduction_table[abm_config[set][1]];
ram_table->min_reduction[2][1] = min_reduction_table[abm_config[set][1]];
ram_table->min_reduction[3][1] = min_reduction_table[abm_config[set][1]];
ram_table->min_reduction[4][1] = min_reduction_table[abm_config[set][1]];
ram_table->max_reduction[0][1] = max_reduction_table[abm_config[set][1]];
ram_table->max_reduction[1][1] = max_reduction_table[abm_config[set][1]];
ram_table->max_reduction[2][1] = max_reduction_table[abm_config[set][1]];
ram_table->max_reduction[3][1] = max_reduction_table[abm_config[set][1]];
ram_table->max_reduction[4][1] = max_reduction_table[abm_config[set][1]];
ram_table->min_reduction[0][2] = min_reduction_table[abm_config[set][2]];
ram_table->min_reduction[1][2] = min_reduction_table[abm_config[set][2]];
ram_table->min_reduction[2][2] = min_reduction_table[abm_config[set][2]];
ram_table->min_reduction[3][2] = min_reduction_table[abm_config[set][2]];
ram_table->min_reduction[4][2] = min_reduction_table[abm_config[set][2]];
ram_table->max_reduction[0][2] = max_reduction_table[abm_config[set][2]];
ram_table->max_reduction[1][2] = max_reduction_table[abm_config[set][2]];
ram_table->max_reduction[2][2] = max_reduction_table[abm_config[set][2]];
ram_table->max_reduction[3][2] = max_reduction_table[abm_config[set][2]];
ram_table->max_reduction[4][2] = max_reduction_table[abm_config[set][2]];
ram_table->min_reduction[0][3] = min_reduction_table[abm_config[set][3]];
ram_table->min_reduction[1][3] = min_reduction_table[abm_config[set][3]];
ram_table->min_reduction[2][3] = min_reduction_table[abm_config[set][3]];
ram_table->min_reduction[3][3] = min_reduction_table[abm_config[set][3]];
ram_table->min_reduction[4][3] = min_reduction_table[abm_config[set][3]];
ram_table->max_reduction[0][3] = max_reduction_table[abm_config[set][3]];
ram_table->max_reduction[1][3] = max_reduction_table[abm_config[set][3]];
ram_table->max_reduction[2][3] = max_reduction_table[abm_config[set][3]];
ram_table->max_reduction[3][3] = max_reduction_table[abm_config[set][3]];
ram_table->max_reduction[4][3] = max_reduction_table[abm_config[set][3]];
ram_table->bright_pos_gain[0][0] = 0x20;
ram_table->bright_pos_gain[0][1] = 0x20;
ram_table->bright_pos_gain[0][2] = 0x20;
ram_table->bright_pos_gain[0][3] = 0x20;
ram_table->bright_pos_gain[1][0] = 0x20;
ram_table->bright_pos_gain[1][1] = 0x20;
ram_table->bright_pos_gain[1][2] = 0x20;
ram_table->bright_pos_gain[1][3] = 0x20;
ram_table->bright_pos_gain[2][0] = 0x20;
ram_table->bright_pos_gain[2][1] = 0x20;
ram_table->bright_pos_gain[2][2] = 0x20;
ram_table->bright_pos_gain[2][3] = 0x20;
ram_table->bright_pos_gain[3][0] = 0x20;
ram_table->bright_pos_gain[3][1] = 0x20;
ram_table->bright_pos_gain[3][2] = 0x20;
ram_table->bright_pos_gain[3][3] = 0x20;
ram_table->bright_pos_gain[4][0] = 0x20;
ram_table->bright_pos_gain[4][1] = 0x20;
ram_table->bright_pos_gain[4][2] = 0x20;
ram_table->bright_pos_gain[4][3] = 0x20;
ram_table->bright_neg_gain[0][0] = 0x00;
ram_table->bright_neg_gain[0][1] = 0x00;
ram_table->bright_neg_gain[0][2] = 0x00;
ram_table->bright_neg_gain[0][3] = 0x00;
ram_table->bright_neg_gain[1][0] = 0x00;
ram_table->bright_neg_gain[1][1] = 0x00;
ram_table->bright_neg_gain[1][2] = 0x00;
ram_table->bright_neg_gain[1][3] = 0x00;
ram_table->bright_neg_gain[2][0] = 0x00;
ram_table->bright_neg_gain[2][1] = 0x00;
ram_table->bright_neg_gain[2][2] = 0x00;
ram_table->bright_neg_gain[2][3] = 0x00;
ram_table->bright_neg_gain[3][0] = 0x00;
ram_table->bright_neg_gain[3][1] = 0x00;
ram_table->bright_neg_gain[3][2] = 0x00;
ram_table->bright_neg_gain[3][3] = 0x00;
ram_table->bright_neg_gain[4][0] = 0x00;
ram_table->bright_neg_gain[4][1] = 0x00;
ram_table->bright_neg_gain[4][2] = 0x00;
ram_table->bright_neg_gain[4][3] = 0x00;
ram_table->dark_pos_gain[0][0] = 0x00;
ram_table->dark_pos_gain[0][1] = 0x00;
ram_table->dark_pos_gain[0][2] = 0x00;
ram_table->dark_pos_gain[0][3] = 0x00;
ram_table->dark_pos_gain[1][0] = 0x00;
ram_table->dark_pos_gain[1][1] = 0x00;
ram_table->dark_pos_gain[1][2] = 0x00;
ram_table->dark_pos_gain[1][3] = 0x00;
ram_table->dark_pos_gain[2][0] = 0x00;
ram_table->dark_pos_gain[2][1] = 0x00;
ram_table->dark_pos_gain[2][2] = 0x00;
ram_table->dark_pos_gain[2][3] = 0x00;
ram_table->dark_pos_gain[3][0] = 0x00;
ram_table->dark_pos_gain[3][1] = 0x00;
ram_table->dark_pos_gain[3][2] = 0x00;
ram_table->dark_pos_gain[3][3] = 0x00;
ram_table->dark_pos_gain[4][0] = 0x00;
ram_table->dark_pos_gain[4][1] = 0x00;
ram_table->dark_pos_gain[4][2] = 0x00;
ram_table->dark_pos_gain[4][3] = 0x00;
ram_table->dark_neg_gain[0][0] = 0x00;
ram_table->dark_neg_gain[0][1] = 0x00;
ram_table->dark_neg_gain[0][2] = 0x00;
ram_table->dark_neg_gain[0][3] = 0x00;
ram_table->dark_neg_gain[1][0] = 0x00;
ram_table->dark_neg_gain[1][1] = 0x00;
ram_table->dark_neg_gain[1][2] = 0x00;
ram_table->dark_neg_gain[1][3] = 0x00;
ram_table->dark_neg_gain[2][0] = 0x00;
ram_table->dark_neg_gain[2][1] = 0x00;
ram_table->dark_neg_gain[2][2] = 0x00;
ram_table->dark_neg_gain[2][3] = 0x00;
ram_table->dark_neg_gain[3][0] = 0x00;
ram_table->dark_neg_gain[3][1] = 0x00;
ram_table->dark_neg_gain[3][2] = 0x00;
ram_table->dark_neg_gain[3][3] = 0x00;
ram_table->dark_neg_gain[4][0] = 0x00;
ram_table->dark_neg_gain[4][1] = 0x00;
ram_table->dark_neg_gain[4][2] = 0x00;
ram_table->dark_neg_gain[4][3] = 0x00;
ram_table->iir_curve[0] = 0x65;
ram_table->iir_curve[1] = 0x65;
ram_table->iir_curve[2] = 0x65;
ram_table->iir_curve[3] = 0x65;
ram_table->iir_curve[4] = 0x65;
//Gamma 2.4
ram_table->crgb_thresh[0] = cpu_to_be16(0x13b6);
ram_table->crgb_thresh[1] = cpu_to_be16(0x1648);
ram_table->crgb_thresh[2] = cpu_to_be16(0x18e3);
ram_table->crgb_thresh[3] = cpu_to_be16(0x1b41);
ram_table->crgb_thresh[4] = cpu_to_be16(0x1d46);
ram_table->crgb_thresh[5] = cpu_to_be16(0x1f21);
ram_table->crgb_thresh[6] = cpu_to_be16(0x2167);
ram_table->crgb_thresh[7] = cpu_to_be16(0x2384);
ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
ram_table->crgb_slope[0] = cpu_to_be16(0x3147);
ram_table->crgb_slope[1] = cpu_to_be16(0x2978);
ram_table->crgb_slope[2] = cpu_to_be16(0x23a2);
ram_table->crgb_slope[3] = cpu_to_be16(0x1f55);
ram_table->crgb_slope[4] = cpu_to_be16(0x1c63);
ram_table->crgb_slope[5] = cpu_to_be16(0x1a0f);
ram_table->crgb_slope[6] = cpu_to_be16(0x178d);
ram_table->crgb_slope[7] = cpu_to_be16(0x15ab);
fill_backlight_transform_table(
params, ram_table);
}
static void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
{
unsigned int set = params.set;
ram_table->flags = 0x0;
ram_table->min_abm_backlight =
cpu_to_be16(params.min_abm_backlight);
ram_table->deviation_gain[0] = 0xb3;
ram_table->deviation_gain[1] = 0xa8;
ram_table->deviation_gain[2] = 0x98;
ram_table->deviation_gain[3] = 0x68;
ram_table->min_reduction[0][0] = min_reduction_table_v_2_2[abm_config[set][0]];
ram_table->min_reduction[1][0] = min_reduction_table_v_2_2[abm_config[set][0]];
ram_table->min_reduction[2][0] = min_reduction_table_v_2_2[abm_config[set][0]];
ram_table->min_reduction[3][0] = min_reduction_table_v_2_2[abm_config[set][0]];
ram_table->min_reduction[4][0] = min_reduction_table_v_2_2[abm_config[set][0]];
ram_table->max_reduction[0][0] = max_reduction_table_v_2_2[abm_config[set][0]];
ram_table->max_reduction[1][0] = max_reduction_table_v_2_2[abm_config[set][0]];
ram_table->max_reduction[2][0] = max_reduction_table_v_2_2[abm_config[set][0]];
ram_table->max_reduction[3][0] = max_reduction_table_v_2_2[abm_config[set][0]];
ram_table->max_reduction[4][0] = max_reduction_table_v_2_2[abm_config[set][0]];
ram_table->min_reduction[0][1] = min_reduction_table_v_2_2[abm_config[set][1]];
ram_table->min_reduction[1][1] = min_reduction_table_v_2_2[abm_config[set][1]];
ram_table->min_reduction[2][1] = min_reduction_table_v_2_2[abm_config[set][1]];
ram_table->min_reduction[3][1] = min_reduction_table_v_2_2[abm_config[set][1]];
ram_table->min_reduction[4][1] = min_reduction_table_v_2_2[abm_config[set][1]];
ram_table->max_reduction[0][1] = max_reduction_table_v_2_2[abm_config[set][1]];
ram_table->max_reduction[1][1] = max_reduction_table_v_2_2[abm_config[set][1]];
ram_table->max_reduction[2][1] = max_reduction_table_v_2_2[abm_config[set][1]];
ram_table->max_reduction[3][1] = max_reduction_table_v_2_2[abm_config[set][1]];
ram_table->max_reduction[4][1] = max_reduction_table_v_2_2[abm_config[set][1]];
ram_table->min_reduction[0][2] = min_reduction_table_v_2_2[abm_config[set][2]];
ram_table->min_reduction[1][2] = min_reduction_table_v_2_2[abm_config[set][2]];
ram_table->min_reduction[2][2] = min_reduction_table_v_2_2[abm_config[set][2]];
ram_table->min_reduction[3][2] = min_reduction_table_v_2_2[abm_config[set][2]];
ram_table->min_reduction[4][2] = min_reduction_table_v_2_2[abm_config[set][2]];
ram_table->max_reduction[0][2] = max_reduction_table_v_2_2[abm_config[set][2]];
ram_table->max_reduction[1][2] = max_reduction_table_v_2_2[abm_config[set][2]];
ram_table->max_reduction[2][2] = max_reduction_table_v_2_2[abm_config[set][2]];
ram_table->max_reduction[3][2] = max_reduction_table_v_2_2[abm_config[set][2]];
ram_table->max_reduction[4][2] = max_reduction_table_v_2_2[abm_config[set][2]];
ram_table->min_reduction[0][3] = min_reduction_table_v_2_2[abm_config[set][3]];
ram_table->min_reduction[1][3] = min_reduction_table_v_2_2[abm_config[set][3]];
ram_table->min_reduction[2][3] = min_reduction_table_v_2_2[abm_config[set][3]];
ram_table->min_reduction[3][3] = min_reduction_table_v_2_2[abm_config[set][3]];
ram_table->min_reduction[4][3] = min_reduction_table_v_2_2[abm_config[set][3]];
ram_table->max_reduction[0][3] = max_reduction_table_v_2_2[abm_config[set][3]];
ram_table->max_reduction[1][3] = max_reduction_table_v_2_2[abm_config[set][3]];
ram_table->max_reduction[2][3] = max_reduction_table_v_2_2[abm_config[set][3]];
ram_table->max_reduction[3][3] = max_reduction_table_v_2_2[abm_config[set][3]];
ram_table->max_reduction[4][3] = max_reduction_table_v_2_2[abm_config[set][3]];
ram_table->bright_pos_gain[0][0] = 0x20;
ram_table->bright_pos_gain[0][1] = 0x20;
ram_table->bright_pos_gain[0][2] = 0x20;
ram_table->bright_pos_gain[0][3] = 0x20;
ram_table->bright_pos_gain[1][0] = 0x20;
ram_table->bright_pos_gain[1][1] = 0x20;
ram_table->bright_pos_gain[1][2] = 0x20;
ram_table->bright_pos_gain[1][3] = 0x20;
ram_table->bright_pos_gain[2][0] = 0x20;
ram_table->bright_pos_gain[2][1] = 0x20;
ram_table->bright_pos_gain[2][2] = 0x20;
ram_table->bright_pos_gain[2][3] = 0x20;
ram_table->bright_pos_gain[3][0] = 0x20;
ram_table->bright_pos_gain[3][1] = 0x20;
ram_table->bright_pos_gain[3][2] = 0x20;
ram_table->bright_pos_gain[3][3] = 0x20;
ram_table->bright_pos_gain[4][0] = 0x20;
ram_table->bright_pos_gain[4][1] = 0x20;
ram_table->bright_pos_gain[4][2] = 0x20;
ram_table->bright_pos_gain[4][3] = 0x20;
ram_table->dark_pos_gain[0][0] = 0x00;
ram_table->dark_pos_gain[0][1] = 0x00;
ram_table->dark_pos_gain[0][2] = 0x00;
ram_table->dark_pos_gain[0][3] = 0x00;
ram_table->dark_pos_gain[1][0] = 0x00;
ram_table->dark_pos_gain[1][1] = 0x00;
ram_table->dark_pos_gain[1][2] = 0x00;
ram_table->dark_pos_gain[1][3] = 0x00;
ram_table->dark_pos_gain[2][0] = 0x00;
ram_table->dark_pos_gain[2][1] = 0x00;
ram_table->dark_pos_gain[2][2] = 0x00;
ram_table->dark_pos_gain[2][3] = 0x00;
ram_table->dark_pos_gain[3][0] = 0x00;
ram_table->dark_pos_gain[3][1] = 0x00;
ram_table->dark_pos_gain[3][2] = 0x00;
ram_table->dark_pos_gain[3][3] = 0x00;
ram_table->dark_pos_gain[4][0] = 0x00;
ram_table->dark_pos_gain[4][1] = 0x00;
ram_table->dark_pos_gain[4][2] = 0x00;
ram_table->dark_pos_gain[4][3] = 0x00;
ram_table->hybrid_factor[0] = 0xff;
ram_table->hybrid_factor[1] = 0xff;
ram_table->hybrid_factor[2] = 0xff;
ram_table->hybrid_factor[3] = 0xc0;
ram_table->contrast_factor[0] = 0x99;
ram_table->contrast_factor[1] = 0x99;
ram_table->contrast_factor[2] = 0x90;
ram_table->contrast_factor[3] = 0x80;
ram_table->iir_curve[0] = 0x65;
ram_table->iir_curve[1] = 0x65;
ram_table->iir_curve[2] = 0x65;
ram_table->iir_curve[3] = 0x65;
ram_table->iir_curve[4] = 0x65;
//Gamma 2.2
ram_table->crgb_thresh[0] = cpu_to_be16(0x127c);
ram_table->crgb_thresh[1] = cpu_to_be16(0x151b);
ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5);
ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56);
ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83);
ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72);
ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0);
ram_table->crgb_thresh[7] = cpu_to_be16(0x232b);
ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
ram_table->crgb_slope[0] = cpu_to_be16(0x3609);
ram_table->crgb_slope[1] = cpu_to_be16(0x2dfa);
ram_table->crgb_slope[2] = cpu_to_be16(0x27ea);
ram_table->crgb_slope[3] = cpu_to_be16(0x235d);
ram_table->crgb_slope[4] = cpu_to_be16(0x2042);
ram_table->crgb_slope[5] = cpu_to_be16(0x1dc3);
ram_table->crgb_slope[6] = cpu_to_be16(0x1b1a);
ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
fill_backlight_transform_table_v_2_2(
params, ram_table, true);
}
static void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
{
unsigned int i, j;
unsigned int set = params.set;
ram_table->flags = 0x0;
ram_table->min_abm_backlight = (big_endian) ?
cpu_to_be16(params.min_abm_backlight) :
cpu_to_le16(params.min_abm_backlight);
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor;
ram_table->deviation_gain[i] = abm_settings[set][i].deviation_gain;
ram_table->min_knee[i] = abm_settings[set][i].min_knee;
ram_table->max_knee[i] = abm_settings[set][i].max_knee;
for (j = 0; j < NUM_AMBI_LEVEL; j++) {
ram_table->min_reduction[j][i] = abm_settings[set][i].min_reduction;
ram_table->max_reduction[j][i] = abm_settings[set][i].max_reduction;
ram_table->bright_pos_gain[j][i] = abm_settings[set][i].bright_pos_gain;
ram_table->dark_pos_gain[j][i] = abm_settings[set][i].dark_pos_gain;
}
}
ram_table->iir_curve[0] = 0x65;
ram_table->iir_curve[1] = 0x65;
ram_table->iir_curve[2] = 0x65;
ram_table->iir_curve[3] = 0x65;
ram_table->iir_curve[4] = 0x65;
//Gamma 2.2
ram_table->crgb_thresh[0] = bswap16_based_on_endian(big_endian, 0x127c);
ram_table->crgb_thresh[1] = bswap16_based_on_endian(big_endian, 0x151b);
ram_table->crgb_thresh[2] = bswap16_based_on_endian(big_endian, 0x17d5);
ram_table->crgb_thresh[3] = bswap16_based_on_endian(big_endian, 0x1a56);
ram_table->crgb_thresh[4] = bswap16_based_on_endian(big_endian, 0x1c83);
ram_table->crgb_thresh[5] = bswap16_based_on_endian(big_endian, 0x1e72);
ram_table->crgb_thresh[6] = bswap16_based_on_endian(big_endian, 0x20f0);
ram_table->crgb_thresh[7] = bswap16_based_on_endian(big_endian, 0x232b);
ram_table->crgb_offset[0] = bswap16_based_on_endian(big_endian, 0x2999);
ram_table->crgb_offset[1] = bswap16_based_on_endian(big_endian, 0x3999);
ram_table->crgb_offset[2] = bswap16_based_on_endian(big_endian, 0x4666);
ram_table->crgb_offset[3] = bswap16_based_on_endian(big_endian, 0x5999);
ram_table->crgb_offset[4] = bswap16_based_on_endian(big_endian, 0x6333);
ram_table->crgb_offset[5] = bswap16_based_on_endian(big_endian, 0x7800);
ram_table->crgb_offset[6] = bswap16_based_on_endian(big_endian, 0x8c00);
ram_table->crgb_offset[7] = bswap16_based_on_endian(big_endian, 0xa000);
ram_table->crgb_slope[0] = bswap16_based_on_endian(big_endian, 0x3609);
ram_table->crgb_slope[1] = bswap16_based_on_endian(big_endian, 0x2dfa);
ram_table->crgb_slope[2] = bswap16_based_on_endian(big_endian, 0x27ea);
ram_table->crgb_slope[3] = bswap16_based_on_endian(big_endian, 0x235d);
ram_table->crgb_slope[4] = bswap16_based_on_endian(big_endian, 0x2042);
ram_table->crgb_slope[5] = bswap16_based_on_endian(big_endian, 0x1dc3);
ram_table->crgb_slope[6] = bswap16_based_on_endian(big_endian, 0x1b1a);
ram_table->crgb_slope[7] = bswap16_based_on_endian(big_endian, 0x1910);
fill_backlight_transform_table_v_2_2(
params, ram_table, big_endian);
}
bool dmub_init_abm_config(struct resource_pool *res_pool,
struct dmcu_iram_parameters params,
unsigned int inst)
{
struct iram_table_v_2_2 ram_table;
struct abm_config_table config;
unsigned int set = params.set;
bool result = false;
uint32_t i, j = 0;
if (res_pool->abm == NULL && res_pool->multiple_abms[inst] == NULL)
return false;
memset(&ram_table, 0, sizeof(ram_table));
memset(&config, 0, sizeof(config));
fill_iram_v_2_3(&ram_table, params, false);
// We must copy to structure that is aligned to 32-bit
for (i = 0; i < NUM_POWER_FN_SEGS; i++) {
config.crgb_thresh[i] = ram_table.crgb_thresh[i];
config.crgb_offset[i] = ram_table.crgb_offset[i];
config.crgb_slope[i] = ram_table.crgb_slope[i];
}
for (i = 0; i < NUM_BL_CURVE_SEGS; i++) {
config.backlight_thresholds[i] = ram_table.backlight_thresholds[i];
config.backlight_offsets[i] = ram_table.backlight_offsets[i];
}
for (i = 0; i < NUM_AMBI_LEVEL; i++)
config.iir_curve[i] = ram_table.iir_curve[i];
for (i = 0; i < NUM_AMBI_LEVEL; i++) {
for (j = 0; j < NUM_AGGR_LEVEL; j++) {
config.min_reduction[i][j] = ram_table.min_reduction[i][j];
config.max_reduction[i][j] = ram_table.max_reduction[i][j];
config.bright_pos_gain[i][j] = ram_table.bright_pos_gain[i][j];
config.dark_pos_gain[i][j] = ram_table.dark_pos_gain[i][j];
}
}
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
config.hybrid_factor[i] = ram_table.hybrid_factor[i];
config.contrast_factor[i] = ram_table.contrast_factor[i];
config.deviation_gain[i] = ram_table.deviation_gain[i];
config.min_knee[i] = ram_table.min_knee[i];
config.max_knee[i] = ram_table.max_knee[i];
}
if (params.backlight_ramping_override) {
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
config.blRampReduction[i] = params.backlight_ramping_reduction;
config.blRampStart[i] = params.backlight_ramping_start;
}
} else {
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
config.blRampReduction[i] = abm_settings[set][i].blRampReduction;
config.blRampStart[i] = abm_settings[set][i].blRampStart;
}
}
config.min_abm_backlight = ram_table.min_abm_backlight;
if (res_pool->multiple_abms[inst]) {
result = res_pool->multiple_abms[inst]->funcs->init_abm_config(
res_pool->multiple_abms[inst], (char *)(&config), sizeof(struct abm_config_table), inst);
} else
result = res_pool->abm->funcs->init_abm_config(
res_pool->abm, (char *)(&config), sizeof(struct abm_config_table), 0);
return result;
}
bool dmcu_load_iram(struct dmcu *dmcu,
struct dmcu_iram_parameters params)
{
unsigned char ram_table[IRAM_SIZE];
bool result = false;
if (dmcu == NULL)
return false;
if (dmcu && !dmcu->funcs->is_dmcu_initialized(dmcu))
return true;
memset(&ram_table, 0, sizeof(ram_table));
if (dmcu->dmcu_version.abm_version == 0x24) {
fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
result = dmcu->funcs->load_iram(dmcu, 0, (char *)(&ram_table),
IRAM_RESERVE_AREA_START_V2_2);
} else if (dmcu->dmcu_version.abm_version == 0x23) {
fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
result = dmcu->funcs->load_iram(
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
} else if (dmcu->dmcu_version.abm_version == 0x22) {
fill_iram_v_2_2((struct iram_table_v_2_2 *)ram_table, params);
result = dmcu->funcs->load_iram(
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
} else {
fill_iram_v_2((struct iram_table_v_2 *)ram_table, params);
result = dmcu->funcs->load_iram(
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2);
if (result)
result = dmcu->funcs->load_iram(
dmcu, IRAM_RESERVE_AREA_END_V2 + 1,
(char *)(&ram_table) + IRAM_RESERVE_AREA_END_V2 + 1,
sizeof(ram_table) - IRAM_RESERVE_AREA_END_V2 - 1);
}
return result;
}
/*
* is_psr_su_specific_panel() - check if sink is AMD vendor-specific PSR-SU
* supported eDP device.
*
* @link: dc link pointer
*
* Return: true if AMDGPU vendor specific PSR-SU eDP panel
*/
bool is_psr_su_specific_panel(struct dc_link *link)
{
bool isPSRSUSupported = false;
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
if (dpcd_caps->edp_rev >= DP_EDP_14) {
if (dpcd_caps->psr_info.psr_version >= DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
isPSRSUSupported = true;
/*
* Some panels will report PSR capabilities over additional DPCD bits.
* Such panels are approved despite reporting only PSR v3, as long as
* the additional bits are reported.
*/
if (dpcd_caps->sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) {
/*
* This is the temporary workaround to disable PSRSU when system turned on
* DSC function on the sepcific sink.
*/
if (dpcd_caps->psr_info.psr_version < DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)
isPSRSUSupported = false;
else if (dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) ||
(dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07)))
isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
}
return isPSRSUSupported;
}
/**
* mod_power_calc_psr_configs() - calculate/update generic psr configuration fields.
* @psr_config: [output], psr configuration structure to be updated
* @link: [input] dc link pointer
* @stream: [input] dc stream state pointer
*
* calculate and update the psr configuration fields that are not DM specific, i.e. such
* fields which are based on DPCD caps or timing information. To setup PSR in DMUB FW,
* this helper is assumed to be called before the call of the DC helper dc_link_setup_psr().
*
* PSR config fields to be updated within the helper:
* - psr_rfb_setup_time
* - psr_sdp_transmit_line_num_deadline
* - line_time_in_us
* - su_y_granularity
* - su_granularity_required
* - psr_frame_capture_indication_req
* - psr_exit_link_training_required
*
* PSR config fields that are DM specific and NOT updated within the helper:
* - allow_smu_optimizations
* - allow_multi_disp_optimizations
*/
void mod_power_calc_psr_configs(struct psr_config *psr_config,
struct dc_link *link,
const struct dc_stream_state *stream)
{
unsigned int num_vblank_lines = 0;
unsigned int vblank_time_in_us = 0;
unsigned int sdp_tx_deadline_in_us = 0;
unsigned int line_time_in_us = 0;
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
const int psr_setup_time_step_in_us = 55; /* refer to eDP spec DPCD 0x071h */
/* timing parameters */
num_vblank_lines = stream->timing.v_total -
stream->timing.v_addressable -
stream->timing.v_border_top -
stream->timing.v_border_bottom;
vblank_time_in_us = (stream->timing.h_total * num_vblank_lines * 1000) / (stream->timing.pix_clk_100hz / 10);
line_time_in_us = ((stream->timing.h_total * 1000) / (stream->timing.pix_clk_100hz / 10)) + 1;
/**
* psr configuration fields
*
* as per eDP 1.5 pg. 377 of 459, DPCD 0x071h bits [3:1], psr setup time bits interpreted as below
* 000b <--> 330 us (default)
* 001b <--> 275 us
* 010b <--> 220 us
* 011b <--> 165 us
* 100b <--> 110 us
* 101b <--> 055 us
* 110b <--> 000 us
*/
psr_config->psr_rfb_setup_time =
(6 - dpcd_caps->psr_info.psr_dpcd_caps.bits.PSR_SETUP_TIME) * psr_setup_time_step_in_us;
if (psr_config->psr_rfb_setup_time > vblank_time_in_us) {
link->psr_settings.psr_frame_capture_indication_req = true;
link->psr_settings.psr_sdp_transmit_line_num_deadline = num_vblank_lines;
} else {
sdp_tx_deadline_in_us = vblank_time_in_us - psr_config->psr_rfb_setup_time;
/* Set the last possible line SDP may be transmitted without violating the RFB setup time */
link->psr_settings.psr_frame_capture_indication_req = false;
link->psr_settings.psr_sdp_transmit_line_num_deadline = sdp_tx_deadline_in_us / line_time_in_us;
}
psr_config->psr_sdp_transmit_line_num_deadline = link->psr_settings.psr_sdp_transmit_line_num_deadline;
psr_config->line_time_in_us = line_time_in_us;
psr_config->su_y_granularity = dpcd_caps->psr_info.psr2_su_y_granularity_cap;
psr_config->su_granularity_required = dpcd_caps->psr_info.psr_dpcd_caps.bits.SU_GRANULARITY_REQUIRED;
psr_config->psr_frame_capture_indication_req = link->psr_settings.psr_frame_capture_indication_req;
psr_config->psr_exit_link_training_required =
!link->dpcd_caps.psr_info.psr_dpcd_caps.bits.LINK_TRAINING_ON_EXIT_NOT_REQUIRED;
}
void init_replay_config(struct dc_link *link, struct replay_config *pr_config)
{
link->replay_settings.config = *pr_config;
}
bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_state *stream)
{
return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
}
bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
struct dc_stream_state *stream,
struct psr_config *config)
{
uint16_t pic_height;
uint16_t slice_height;
config->dsc_slice_height = 0;
if ((link->connector_signal & SIGNAL_TYPE_EDP) &&
(!dc->caps.edp_dsc_support ||
link->panel_config.dsc.disable_dsc_edp ||
!link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
!stream->timing.dsc_cfg.num_slices_v))
return true;
pic_height = stream->timing.v_addressable +
stream->timing.v_border_top + stream->timing.v_border_bottom;
if (stream->timing.dsc_cfg.num_slices_v == 0)
return false;
slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
config->dsc_slice_height = slice_height;
if (slice_height) {
if (config->su_y_granularity &&
(slice_height % config->su_y_granularity)) {
ASSERT(0);
return false;
}
}
return true;
}
bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps)
{
unsigned int data_points_size;
if (config_no >= ARRAY_SIZE(custom_backlight_profiles))
return false;
data_points_size = custom_backlight_profiles[config_no].num_data_points
* sizeof(custom_backlight_profiles[config_no].data_points[0]);
caps->size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size;
caps->flags = 0;
caps->error_code = 0;
caps->ac_level_percentage = custom_backlight_profiles[config_no].ac_level_percentage;
caps->dc_level_percentage = custom_backlight_profiles[config_no].dc_level_percentage;
caps->min_input_signal = custom_backlight_profiles[config_no].min_input_signal;
caps->max_input_signal = custom_backlight_profiles[config_no].max_input_signal;
caps->num_data_points = custom_backlight_profiles[config_no].num_data_points;
memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size);
return true;
}
| linux-master | drivers/gpu/drm/amd/display/modules/power/power_helpers.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "mod_vmid.h"
struct core_vmid {
struct mod_vmid public;
struct dc *dc;
unsigned int num_vmid;
unsigned int num_vmids_available;
uint64_t ptb_assigned_to_vmid[MAX_VMID];
struct dc_virtual_addr_space_config base_config;
};
#define MOD_VMID_TO_CORE(mod_vmid)\
container_of(mod_vmid, struct core_vmid, public)
static void add_ptb_to_table(struct core_vmid *core_vmid, unsigned int vmid, uint64_t ptb)
{
if (vmid < MAX_VMID) {
core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
core_vmid->num_vmids_available--;
}
}
static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned int vmid)
{
if (vmid < MAX_VMID) {
core_vmid->ptb_assigned_to_vmid[vmid] = 0;
core_vmid->num_vmids_available++;
}
}
static void evict_vmids(struct core_vmid *core_vmid)
{
int i;
uint16_t ord = dc_get_vmid_use_vector(core_vmid->dc);
// At this point any positions with value 0 are unused vmids, evict them
for (i = 1; i < core_vmid->num_vmid; i++) {
if (!(ord & (1u << i)))
clear_entry_from_vmid_table(core_vmid, i);
}
}
// Return value of -1 indicates vmid table uninitialized or ptb dne in the table
static int get_existing_vmid_for_ptb(struct core_vmid *core_vmid, uint64_t ptb)
{
int i;
for (i = 0; i < core_vmid->num_vmid; i++) {
if (core_vmid->ptb_assigned_to_vmid[i] == ptb)
return i;
}
return -1;
}
// Expected to be called only when there's an available vmid
static int get_next_available_vmid(struct core_vmid *core_vmid)
{
int i;
for (i = 1; i < core_vmid->num_vmid; i++) {
if (core_vmid->ptb_assigned_to_vmid[i] == 0)
return i;
}
return -1;
}
uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
{
struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid);
int vmid = 0;
// Physical address gets vmid 0
if (ptb == 0)
return 0;
vmid = get_existing_vmid_for_ptb(core_vmid, ptb);
if (vmid == -1) {
struct dc_virtual_addr_space_config va_config = core_vmid->base_config;
va_config.page_table_base_addr = ptb;
if (core_vmid->num_vmids_available == 0)
evict_vmids(core_vmid);
vmid = get_next_available_vmid(core_vmid);
if (vmid != -1) {
add_ptb_to_table(core_vmid, vmid, ptb);
dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
} else
ASSERT(0);
}
return vmid;
}
void mod_vmid_reset(struct mod_vmid *mod_vmid)
{
struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid);
core_vmid->num_vmids_available = core_vmid->num_vmid - 1;
memset(core_vmid->ptb_assigned_to_vmid, 0, sizeof(core_vmid->ptb_assigned_to_vmid[0]) * MAX_VMID);
}
struct mod_vmid *mod_vmid_create(
struct dc *dc,
unsigned int num_vmid,
struct dc_virtual_addr_space_config *va_config)
{
struct core_vmid *core_vmid;
if (num_vmid <= 1)
goto fail_no_vm_ctx;
if (dc == NULL)
goto fail_dc_null;
core_vmid = kzalloc(sizeof(struct core_vmid), GFP_KERNEL);
if (core_vmid == NULL)
goto fail_alloc_context;
core_vmid->dc = dc;
core_vmid->num_vmid = num_vmid;
core_vmid->num_vmids_available = num_vmid - 1;
core_vmid->base_config = *va_config;
memset(core_vmid->ptb_assigned_to_vmid, 0, sizeof(core_vmid->ptb_assigned_to_vmid[0]) * MAX_VMID);
return &core_vmid->public;
fail_no_vm_ctx:
fail_alloc_context:
fail_dc_null:
return NULL;
}
void mod_vmid_destroy(struct mod_vmid *mod_vmid)
{
if (mod_vmid != NULL) {
struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid);
kfree(core_vmid);
}
}
| linux-master | drivers/gpu/drm/amd/display/modules/vmid/vmid.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc.h"
#include "opp.h"
#include "color_gamma.h"
/* When calculating LUT values the first region and at least one subsequent
* region are calculated with full precision. These defines are a demarcation
* of where the second region starts and ends.
* These are hardcoded values to avoid recalculating them in loops.
*/
#define PRECISE_LUT_REGION_START 224
#define PRECISE_LUT_REGION_END 239
static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
// these are helpers for calculations to reduce stack usage
// do not depend on these being preserved across calls
/* Helper to optimize gamma calculation, only use in translate_from_linear, in
* particular the dc_fixpt_pow function which is very expensive
* The idea is that our regions for X points are exponential and currently they all use
* the same number of points (NUM_PTS_IN_REGION) and in each region every point
* is exactly 2x the one at the same index in the previous region. In other words
* X[i] = 2 * X[i-NUM_PTS_IN_REGION] for i>=16
* The other fact is that (2x)^gamma = 2^gamma * x^gamma
* So we compute and save x^gamma for the first 16 regions, and for every next region
* just multiply with 2^gamma which can be computed once, and save the result so we
* recursively compute all the values.
*/
/*
* Regamma coefficients are used for both regamma and degamma. Degamma
* coefficients are calculated in our formula using the regamma coefficients.
*/
/*sRGB 709 2.2 2.4 P3*/
static const int32_t numerator01[] = { 31308, 180000, 0, 0, 0};
static const int32_t numerator02[] = { 12920, 4500, 0, 0, 0};
static const int32_t numerator03[] = { 55, 99, 0, 0, 0};
static const int32_t numerator04[] = { 55, 99, 0, 0, 0};
static const int32_t numerator05[] = { 2400, 2222, 2200, 2400, 2600};
/* one-time setup of X points */
void setup_x_points_distribution(void)
{
struct fixed31_32 region_size = dc_fixpt_from_int(128);
int32_t segment;
uint32_t seg_offset;
uint32_t index;
struct fixed31_32 increment;
coordinates_x[MAX_HW_POINTS].x = region_size;
coordinates_x[MAX_HW_POINTS + 1].x = region_size;
for (segment = 6; segment > (6 - NUM_REGIONS); segment--) {
region_size = dc_fixpt_div_int(region_size, 2);
increment = dc_fixpt_div_int(region_size,
NUM_PTS_IN_REGION);
seg_offset = (segment + (NUM_REGIONS - 7)) * NUM_PTS_IN_REGION;
coordinates_x[seg_offset].x = region_size;
for (index = seg_offset + 1;
index < seg_offset + NUM_PTS_IN_REGION;
index++) {
coordinates_x[index].x = dc_fixpt_add
(coordinates_x[index-1].x, increment);
}
}
}
void log_x_points_distribution(struct dal_logger *logger)
{
int i = 0;
if (logger != NULL) {
LOG_GAMMA_WRITE("Log X Distribution\n");
for (i = 0; i < MAX_HW_POINTS; i++)
LOG_GAMMA_WRITE("%llu\n", coordinates_x[i].x.value);
}
}
static void compute_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
{
/* consts for PQ gamma formula. */
const struct fixed31_32 m1 =
dc_fixpt_from_fraction(159301758, 1000000000);
const struct fixed31_32 m2 =
dc_fixpt_from_fraction(7884375, 100000);
const struct fixed31_32 c1 =
dc_fixpt_from_fraction(8359375, 10000000);
const struct fixed31_32 c2 =
dc_fixpt_from_fraction(188515625, 10000000);
const struct fixed31_32 c3 =
dc_fixpt_from_fraction(186875, 10000);
struct fixed31_32 l_pow_m1;
struct fixed31_32 base;
if (dc_fixpt_lt(in_x, dc_fixpt_zero))
in_x = dc_fixpt_zero;
l_pow_m1 = dc_fixpt_pow(in_x, m1);
base = dc_fixpt_div(
dc_fixpt_add(c1,
(dc_fixpt_mul(c2, l_pow_m1))),
dc_fixpt_add(dc_fixpt_one,
(dc_fixpt_mul(c3, l_pow_m1))));
*out_y = dc_fixpt_pow(base, m2);
}
static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
{
/* consts for dePQ gamma formula. */
const struct fixed31_32 m1 =
dc_fixpt_from_fraction(159301758, 1000000000);
const struct fixed31_32 m2 =
dc_fixpt_from_fraction(7884375, 100000);
const struct fixed31_32 c1 =
dc_fixpt_from_fraction(8359375, 10000000);
const struct fixed31_32 c2 =
dc_fixpt_from_fraction(188515625, 10000000);
const struct fixed31_32 c3 =
dc_fixpt_from_fraction(186875, 10000);
struct fixed31_32 l_pow_m1;
struct fixed31_32 base, div;
struct fixed31_32 base2;
if (dc_fixpt_lt(in_x, dc_fixpt_zero))
in_x = dc_fixpt_zero;
l_pow_m1 = dc_fixpt_pow(in_x,
dc_fixpt_div(dc_fixpt_one, m2));
base = dc_fixpt_sub(l_pow_m1, c1);
div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1));
base2 = dc_fixpt_div(base, div);
// avoid complex numbers
if (dc_fixpt_lt(base2, dc_fixpt_zero))
base2 = dc_fixpt_sub(dc_fixpt_zero, base2);
*out_y = dc_fixpt_pow(base2, dc_fixpt_div(dc_fixpt_one, m1));
}
/* de gamma, non-linear to linear */
static void compute_hlg_eotf(struct fixed31_32 in_x,
struct fixed31_32 *out_y,
uint32_t sdr_white_level, uint32_t max_luminance_nits)
{
struct fixed31_32 a;
struct fixed31_32 b;
struct fixed31_32 c;
struct fixed31_32 threshold;
struct fixed31_32 x;
struct fixed31_32 scaling_factor =
dc_fixpt_from_fraction(max_luminance_nits, sdr_white_level);
a = dc_fixpt_from_fraction(17883277, 100000000);
b = dc_fixpt_from_fraction(28466892, 100000000);
c = dc_fixpt_from_fraction(55991073, 100000000);
threshold = dc_fixpt_from_fraction(1, 2);
if (dc_fixpt_lt(in_x, threshold)) {
x = dc_fixpt_mul(in_x, in_x);
x = dc_fixpt_div_int(x, 3);
} else {
x = dc_fixpt_sub(in_x, c);
x = dc_fixpt_div(x, a);
x = dc_fixpt_exp(x);
x = dc_fixpt_add(x, b);
x = dc_fixpt_div_int(x, 12);
}
*out_y = dc_fixpt_mul(x, scaling_factor);
}
/* re gamma, linear to non-linear */
static void compute_hlg_oetf(struct fixed31_32 in_x, struct fixed31_32 *out_y,
uint32_t sdr_white_level, uint32_t max_luminance_nits)
{
struct fixed31_32 a;
struct fixed31_32 b;
struct fixed31_32 c;
struct fixed31_32 threshold;
struct fixed31_32 x;
struct fixed31_32 scaling_factor =
dc_fixpt_from_fraction(sdr_white_level, max_luminance_nits);
a = dc_fixpt_from_fraction(17883277, 100000000);
b = dc_fixpt_from_fraction(28466892, 100000000);
c = dc_fixpt_from_fraction(55991073, 100000000);
threshold = dc_fixpt_from_fraction(1, 12);
x = dc_fixpt_mul(in_x, scaling_factor);
if (dc_fixpt_lt(x, threshold)) {
x = dc_fixpt_mul(x, dc_fixpt_from_fraction(3, 1));
*out_y = dc_fixpt_pow(x, dc_fixpt_half);
} else {
x = dc_fixpt_mul(x, dc_fixpt_from_fraction(12, 1));
x = dc_fixpt_sub(x, b);
x = dc_fixpt_log(x);
x = dc_fixpt_mul(a, x);
*out_y = dc_fixpt_add(x, c);
}
}
/* one-time pre-compute PQ values - only for sdr_white_level 80 */
void precompute_pq(void)
{
int i;
struct fixed31_32 x;
const struct hw_x_point *coord_x = coordinates_x + 32;
struct fixed31_32 scaling_factor =
dc_fixpt_from_fraction(80, 10000);
struct fixed31_32 *pq_table = mod_color_get_table(type_pq_table);
/* pow function has problems with arguments too small */
for (i = 0; i < 32; i++)
pq_table[i] = dc_fixpt_zero;
for (i = 32; i <= MAX_HW_POINTS; i++) {
x = dc_fixpt_mul(coord_x->x, scaling_factor);
compute_pq(x, &pq_table[i]);
++coord_x;
}
}
/* one-time pre-compute dePQ values - only for max pixel value 125 FP16 */
void precompute_de_pq(void)
{
int i;
struct fixed31_32 y;
uint32_t begin_index, end_index;
struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
struct fixed31_32 *de_pq_table = mod_color_get_table(type_de_pq_table);
/* X points is 2^-25 to 2^7
* De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
*/
begin_index = 13 * NUM_PTS_IN_REGION;
end_index = begin_index + 12 * NUM_PTS_IN_REGION;
for (i = 0; i <= begin_index; i++)
de_pq_table[i] = dc_fixpt_zero;
for (; i <= end_index; i++) {
compute_de_pq(coordinates_x[i].x, &y);
de_pq_table[i] = dc_fixpt_mul(y, scaling_factor);
}
for (; i <= MAX_HW_POINTS; i++)
de_pq_table[i] = de_pq_table[i-1];
}
struct dividers {
struct fixed31_32 divider1;
struct fixed31_32 divider2;
struct fixed31_32 divider3;
};
static bool build_coefficients(struct gamma_coefficients *coefficients,
enum dc_transfer_func_predefined type)
{
uint32_t i = 0;
uint32_t index = 0;
bool ret = true;
if (type == TRANSFER_FUNCTION_SRGB)
index = 0;
else if (type == TRANSFER_FUNCTION_BT709)
index = 1;
else if (type == TRANSFER_FUNCTION_GAMMA22)
index = 2;
else if (type == TRANSFER_FUNCTION_GAMMA24)
index = 3;
else if (type == TRANSFER_FUNCTION_GAMMA26)
index = 4;
else {
ret = false;
goto release;
}
do {
coefficients->a0[i] = dc_fixpt_from_fraction(
numerator01[index], 10000000);
coefficients->a1[i] = dc_fixpt_from_fraction(
numerator02[index], 1000);
coefficients->a2[i] = dc_fixpt_from_fraction(
numerator03[index], 1000);
coefficients->a3[i] = dc_fixpt_from_fraction(
numerator04[index], 1000);
coefficients->user_gamma[i] = dc_fixpt_from_fraction(
numerator05[index], 1000);
++i;
} while (i != ARRAY_SIZE(coefficients->a0));
release:
return ret;
}
static struct fixed31_32 translate_from_linear_space(
struct translate_from_linear_space_args *args)
{
const struct fixed31_32 one = dc_fixpt_from_int(1);
struct fixed31_32 scratch_1, scratch_2;
struct calculate_buffer *cal_buffer = args->cal_buffer;
if (dc_fixpt_le(one, args->arg))
return one;
if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0))) {
scratch_1 = dc_fixpt_add(one, args->a3);
scratch_2 = dc_fixpt_pow(
dc_fixpt_neg(args->arg),
dc_fixpt_recip(args->gamma));
scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
scratch_1 = dc_fixpt_sub(args->a2, scratch_1);
return scratch_1;
} else if (dc_fixpt_le(args->a0, args->arg)) {
if (cal_buffer->buffer_index == 0) {
cal_buffer->gamma_of_2 = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_recip(args->gamma));
}
scratch_1 = dc_fixpt_add(one, args->a3);
/* In the first region (first 16 points) and in the
* region delimited by START/END we calculate with
* full precision to avoid error accumulation.
*/
if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START &&
cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) ||
(cal_buffer->buffer_index < 16))
scratch_2 = dc_fixpt_pow(args->arg,
dc_fixpt_recip(args->gamma));
else
scratch_2 = dc_fixpt_mul(cal_buffer->gamma_of_2,
cal_buffer->buffer[cal_buffer->buffer_index%16]);
if (cal_buffer->buffer_index != -1) {
cal_buffer->buffer[cal_buffer->buffer_index%16] = scratch_2;
cal_buffer->buffer_index++;
}
scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
scratch_1 = dc_fixpt_sub(scratch_1, args->a2);
return scratch_1;
} else
return dc_fixpt_mul(args->arg, args->a1);
}
static struct fixed31_32 translate_from_linear_space_long(
struct translate_from_linear_space_args *args)
{
const struct fixed31_32 one = dc_fixpt_from_int(1);
if (dc_fixpt_lt(one, args->arg))
return one;
if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0)))
return dc_fixpt_sub(
args->a2,
dc_fixpt_mul(
dc_fixpt_add(
one,
args->a3),
dc_fixpt_pow(
dc_fixpt_neg(args->arg),
dc_fixpt_recip(args->gamma))));
else if (dc_fixpt_le(args->a0, args->arg))
return dc_fixpt_sub(
dc_fixpt_mul(
dc_fixpt_add(
one,
args->a3),
dc_fixpt_pow(
args->arg,
dc_fixpt_recip(args->gamma))),
args->a2);
else
return dc_fixpt_mul(args->arg, args->a1);
}
static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf, struct calculate_buffer *cal_buffer)
{
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
struct translate_from_linear_space_args scratch_gamma_args;
scratch_gamma_args.arg = arg;
scratch_gamma_args.a0 = dc_fixpt_zero;
scratch_gamma_args.a1 = dc_fixpt_zero;
scratch_gamma_args.a2 = dc_fixpt_zero;
scratch_gamma_args.a3 = dc_fixpt_zero;
scratch_gamma_args.cal_buffer = cal_buffer;
scratch_gamma_args.gamma = gamma;
if (use_eetf)
return translate_from_linear_space_long(&scratch_gamma_args);
return translate_from_linear_space(&scratch_gamma_args);
}
static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
struct fixed31_32 a1,
struct fixed31_32 a2,
struct fixed31_32 a3,
struct fixed31_32 gamma)
{
struct fixed31_32 linear;
a0 = dc_fixpt_mul(a0, a1);
if (dc_fixpt_le(arg, dc_fixpt_neg(a0)))
linear = dc_fixpt_neg(
dc_fixpt_pow(
dc_fixpt_div(
dc_fixpt_sub(a2, arg),
dc_fixpt_add(
dc_fixpt_one, a3)), gamma));
else if (dc_fixpt_le(dc_fixpt_neg(a0), arg) &&
dc_fixpt_le(arg, a0))
linear = dc_fixpt_div(arg, a1);
else
linear = dc_fixpt_pow(
dc_fixpt_div(
dc_fixpt_add(a2, arg),
dc_fixpt_add(
dc_fixpt_one, a3)), gamma);
return linear;
}
static struct fixed31_32 translate_from_linear_space_ex(
struct fixed31_32 arg,
struct gamma_coefficients *coeff,
uint32_t color_index,
struct calculate_buffer *cal_buffer)
{
struct translate_from_linear_space_args scratch_gamma_args;
scratch_gamma_args.arg = arg;
scratch_gamma_args.a0 = coeff->a0[color_index];
scratch_gamma_args.a1 = coeff->a1[color_index];
scratch_gamma_args.a2 = coeff->a2[color_index];
scratch_gamma_args.a3 = coeff->a3[color_index];
scratch_gamma_args.gamma = coeff->user_gamma[color_index];
scratch_gamma_args.cal_buffer = cal_buffer;
return translate_from_linear_space(&scratch_gamma_args);
}
static inline struct fixed31_32 translate_to_linear_space_ex(
struct fixed31_32 arg,
struct gamma_coefficients *coeff,
uint32_t color_index)
{
return translate_to_linear_space(
arg,
coeff->a0[color_index],
coeff->a1[color_index],
coeff->a2[color_index],
coeff->a3[color_index],
coeff->user_gamma[color_index]);
}
static bool find_software_points(
const struct dc_gamma *ramp,
const struct gamma_pixel *axis_x,
struct fixed31_32 hw_point,
enum channel_name channel,
uint32_t *index_to_start,
uint32_t *index_left,
uint32_t *index_right,
enum hw_point_position *pos)
{
const uint32_t max_number = ramp->num_entries + 3;
struct fixed31_32 left, right;
uint32_t i = *index_to_start;
while (i < max_number) {
if (channel == CHANNEL_NAME_RED) {
left = axis_x[i].r;
if (i < max_number - 1)
right = axis_x[i + 1].r;
else
right = axis_x[max_number - 1].r;
} else if (channel == CHANNEL_NAME_GREEN) {
left = axis_x[i].g;
if (i < max_number - 1)
right = axis_x[i + 1].g;
else
right = axis_x[max_number - 1].g;
} else {
left = axis_x[i].b;
if (i < max_number - 1)
right = axis_x[i + 1].b;
else
right = axis_x[max_number - 1].b;
}
if (dc_fixpt_le(left, hw_point) &&
dc_fixpt_le(hw_point, right)) {
*index_to_start = i;
*index_left = i;
if (i < max_number - 1)
*index_right = i + 1;
else
*index_right = max_number - 1;
*pos = HW_POINT_POSITION_MIDDLE;
return true;
} else if ((i == *index_to_start) &&
dc_fixpt_le(hw_point, left)) {
*index_to_start = i;
*index_left = i;
*index_right = i;
*pos = HW_POINT_POSITION_LEFT;
return true;
} else if ((i == max_number - 1) &&
dc_fixpt_le(right, hw_point)) {
*index_to_start = i;
*index_left = i;
*index_right = i;
*pos = HW_POINT_POSITION_RIGHT;
return true;
}
++i;
}
return false;
}
static bool build_custom_gamma_mapping_coefficients_worker(
const struct dc_gamma *ramp,
struct pixel_gamma_point *coeff,
const struct hw_x_point *coordinates_x,
const struct gamma_pixel *axis_x,
enum channel_name channel,
uint32_t number_of_points)
{
uint32_t i = 0;
while (i <= number_of_points) {
struct fixed31_32 coord_x;
uint32_t index_to_start = 0;
uint32_t index_left = 0;
uint32_t index_right = 0;
enum hw_point_position hw_pos;
struct gamma_point *point;
struct fixed31_32 left_pos;
struct fixed31_32 right_pos;
if (channel == CHANNEL_NAME_RED)
coord_x = coordinates_x[i].regamma_y_red;
else if (channel == CHANNEL_NAME_GREEN)
coord_x = coordinates_x[i].regamma_y_green;
else
coord_x = coordinates_x[i].regamma_y_blue;
if (!find_software_points(
ramp, axis_x, coord_x, channel,
&index_to_start, &index_left, &index_right, &hw_pos)) {
BREAK_TO_DEBUGGER();
return false;
}
if (index_left >= ramp->num_entries + 3) {
BREAK_TO_DEBUGGER();
return false;
}
if (index_right >= ramp->num_entries + 3) {
BREAK_TO_DEBUGGER();
return false;
}
if (channel == CHANNEL_NAME_RED) {
point = &coeff[i].r;
left_pos = axis_x[index_left].r;
right_pos = axis_x[index_right].r;
} else if (channel == CHANNEL_NAME_GREEN) {
point = &coeff[i].g;
left_pos = axis_x[index_left].g;
right_pos = axis_x[index_right].g;
} else {
point = &coeff[i].b;
left_pos = axis_x[index_left].b;
right_pos = axis_x[index_right].b;
}
if (hw_pos == HW_POINT_POSITION_MIDDLE)
point->coeff = dc_fixpt_div(
dc_fixpt_sub(
coord_x,
left_pos),
dc_fixpt_sub(
right_pos,
left_pos));
else if (hw_pos == HW_POINT_POSITION_LEFT)
point->coeff = dc_fixpt_zero;
else if (hw_pos == HW_POINT_POSITION_RIGHT)
point->coeff = dc_fixpt_from_int(2);
else {
BREAK_TO_DEBUGGER();
return false;
}
point->left_index = index_left;
point->right_index = index_right;
point->pos = hw_pos;
++i;
}
return true;
}
static struct fixed31_32 calculate_mapped_value(
struct pwl_float_data *rgb,
const struct pixel_gamma_point *coeff,
enum channel_name channel,
uint32_t max_index)
{
const struct gamma_point *point;
struct fixed31_32 result;
if (channel == CHANNEL_NAME_RED)
point = &coeff->r;
else if (channel == CHANNEL_NAME_GREEN)
point = &coeff->g;
else
point = &coeff->b;
if ((point->left_index < 0) || (point->left_index > max_index)) {
BREAK_TO_DEBUGGER();
return dc_fixpt_zero;
}
if ((point->right_index < 0) || (point->right_index > max_index)) {
BREAK_TO_DEBUGGER();
return dc_fixpt_zero;
}
if (point->pos == HW_POINT_POSITION_MIDDLE)
if (channel == CHANNEL_NAME_RED)
result = dc_fixpt_add(
dc_fixpt_mul(
point->coeff,
dc_fixpt_sub(
rgb[point->right_index].r,
rgb[point->left_index].r)),
rgb[point->left_index].r);
else if (channel == CHANNEL_NAME_GREEN)
result = dc_fixpt_add(
dc_fixpt_mul(
point->coeff,
dc_fixpt_sub(
rgb[point->right_index].g,
rgb[point->left_index].g)),
rgb[point->left_index].g);
else
result = dc_fixpt_add(
dc_fixpt_mul(
point->coeff,
dc_fixpt_sub(
rgb[point->right_index].b,
rgb[point->left_index].b)),
rgb[point->left_index].b);
else if (point->pos == HW_POINT_POSITION_LEFT) {
BREAK_TO_DEBUGGER();
result = dc_fixpt_zero;
} else {
result = dc_fixpt_one;
}
return result;
}
static void build_pq(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x,
uint32_t sdr_white_level)
{
uint32_t i, start_index;
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinate_x;
struct fixed31_32 x;
struct fixed31_32 output;
struct fixed31_32 scaling_factor =
dc_fixpt_from_fraction(sdr_white_level, 10000);
struct fixed31_32 *pq_table = mod_color_get_table(type_pq_table);
if (!mod_color_is_table_init(type_pq_table) && sdr_white_level == 80) {
precompute_pq();
mod_color_set_table_init_state(type_pq_table, true);
}
/* TODO: start index is from segment 2^-24, skipping first segment
* due to x values too small for power calculations
*/
start_index = 32;
rgb += start_index;
coord_x += start_index;
for (i = start_index; i <= hw_points_num; i++) {
/* Multiply 0.008 as regamma is 0-1 and FP16 input is 0-125.
* FP 1.0 = 80nits
*/
if (sdr_white_level == 80) {
output = pq_table[i];
} else {
x = dc_fixpt_mul(coord_x->x, scaling_factor);
compute_pq(x, &output);
}
/* should really not happen? */
if (dc_fixpt_lt(output, dc_fixpt_zero))
output = dc_fixpt_zero;
else if (dc_fixpt_lt(dc_fixpt_one, output))
output = dc_fixpt_one;
rgb->r = output;
rgb->g = output;
rgb->b = output;
++coord_x;
++rgb;
}
}
static void build_de_pq(struct pwl_float_data_ex *de_pq,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x)
{
uint32_t i;
struct fixed31_32 output;
struct fixed31_32 *de_pq_table = mod_color_get_table(type_de_pq_table);
struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
if (!mod_color_is_table_init(type_de_pq_table)) {
precompute_de_pq();
mod_color_set_table_init_state(type_de_pq_table, true);
}
for (i = 0; i <= hw_points_num; i++) {
output = de_pq_table[i];
/* should really not happen? */
if (dc_fixpt_lt(output, dc_fixpt_zero))
output = dc_fixpt_zero;
else if (dc_fixpt_lt(scaling_factor, output))
output = scaling_factor;
de_pq[i].r = output;
de_pq[i].g = output;
de_pq[i].b = output;
}
}
static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x,
enum dc_transfer_func_predefined type,
struct calculate_buffer *cal_buffer)
{
uint32_t i;
bool ret = false;
struct gamma_coefficients *coeff;
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinate_x;
coeff = kvzalloc(sizeof(*coeff), GFP_KERNEL);
if (!coeff)
goto release;
if (!build_coefficients(coeff, type))
goto release;
memset(cal_buffer->buffer, 0, NUM_PTS_IN_REGION * sizeof(struct fixed31_32));
cal_buffer->buffer_index = 0; // see variable definition for more info
i = 0;
while (i <= hw_points_num) {
/* TODO use y vs r,g,b */
rgb->r = translate_from_linear_space_ex(
coord_x->x, coeff, 0, cal_buffer);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
++rgb;
++i;
}
cal_buffer->buffer_index = -1;
ret = true;
release:
kvfree(coeff);
return ret;
}
static void hermite_spline_eetf(struct fixed31_32 input_x,
struct fixed31_32 max_display,
struct fixed31_32 min_display,
struct fixed31_32 max_content,
struct fixed31_32 *out_x)
{
struct fixed31_32 min_lum_pq;
struct fixed31_32 max_lum_pq;
struct fixed31_32 max_content_pq;
struct fixed31_32 ks;
struct fixed31_32 E1;
struct fixed31_32 E2;
struct fixed31_32 E3;
struct fixed31_32 t;
struct fixed31_32 t2;
struct fixed31_32 t3;
struct fixed31_32 two;
struct fixed31_32 three;
struct fixed31_32 temp1;
struct fixed31_32 temp2;
struct fixed31_32 a = dc_fixpt_from_fraction(15, 10);
struct fixed31_32 b = dc_fixpt_from_fraction(5, 10);
struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small
if (dc_fixpt_eq(max_content, dc_fixpt_zero)) {
*out_x = dc_fixpt_zero;
return;
}
compute_pq(input_x, &E1);
compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq);
compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq);
compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird
a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent
ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b
if (dc_fixpt_lt(E1, ks))
E2 = E1;
else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) {
if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks)))
// t = (E1 - ks) / (1 - ks)
t = dc_fixpt_div(dc_fixpt_sub(E1, ks),
dc_fixpt_sub(dc_fixpt_one, ks));
else
t = dc_fixpt_zero;
two = dc_fixpt_from_int(2);
three = dc_fixpt_from_int(3);
t2 = dc_fixpt_mul(t, t);
t3 = dc_fixpt_mul(t2, t);
temp1 = dc_fixpt_mul(two, t3);
temp2 = dc_fixpt_mul(three, t2);
// (2t^3 - 3t^2 + 1) * ks
E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one,
dc_fixpt_sub(temp1, temp2)));
// (-2t^3 + 3t^2) * max_lum_pq
E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq,
dc_fixpt_sub(temp2, temp1)));
temp1 = dc_fixpt_mul(two, t2);
temp2 = dc_fixpt_sub(dc_fixpt_one, ks);
// (t^3 - 2t^2 + t) * (1-ks)
E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2,
dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
} else
E2 = dc_fixpt_one;
temp1 = dc_fixpt_sub(dc_fixpt_one, E2);
temp2 = dc_fixpt_mul(temp1, temp1);
temp2 = dc_fixpt_mul(temp2, temp2);
// temp2 = (1-E2)^4
E3 = dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2));
compute_de_pq(E3, out_x);
*out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content));
}
static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x,
const struct hdr_tm_params *fs_params,
struct calculate_buffer *cal_buffer)
{
uint32_t i;
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinate_x;
const struct hw_x_point *prv_coord_x = coord_x;
struct fixed31_32 scaledX = dc_fixpt_zero;
struct fixed31_32 scaledX1 = dc_fixpt_zero;
struct fixed31_32 max_display;
struct fixed31_32 min_display;
struct fixed31_32 max_content;
struct fixed31_32 clip = dc_fixpt_one;
struct fixed31_32 output;
bool use_eetf = false;
bool is_clipped = false;
struct fixed31_32 sdr_white_level;
struct fixed31_32 coordX_diff;
struct fixed31_32 out_dist_max;
struct fixed31_32 bright_norm;
if (fs_params->max_content == 0 ||
fs_params->max_display == 0)
return false;
max_display = dc_fixpt_from_int(fs_params->max_display);
min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
max_content = dc_fixpt_from_int(fs_params->max_content);
sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
min_display = dc_fixpt_from_fraction(1, 10);
if (fs_params->max_display < 100) // cap at 100 at the top
max_display = dc_fixpt_from_int(100);
// only max used, we don't adjust min luminance
if (fs_params->max_content > fs_params->max_display)
use_eetf = true;
else
max_content = max_display;
if (!use_eetf)
cal_buffer->buffer_index = 0; // see var definition for more info
rgb += 32; // first 32 points have problems with fixed point, too small
coord_x += 32;
for (i = 32; i <= hw_points_num; i++) {
if (!is_clipped) {
if (use_eetf) {
/* max content is equal 1 */
scaledX1 = dc_fixpt_div(coord_x->x,
dc_fixpt_div(max_content, sdr_white_level));
hermite_spline_eetf(scaledX1, max_display, min_display,
max_content, &scaledX);
} else
scaledX = dc_fixpt_div(coord_x->x,
dc_fixpt_div(max_display, sdr_white_level));
if (dc_fixpt_lt(scaledX, clip)) {
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
output = dc_fixpt_zero;
else
output = calculate_gamma22(scaledX, use_eetf, cal_buffer);
// Ensure output respects reasonable boundaries
output = dc_fixpt_clamp(output, dc_fixpt_zero, dc_fixpt_one);
rgb->r = output;
rgb->g = output;
rgb->b = output;
} else {
/* Here clipping happens for the first time */
is_clipped = true;
/* The next few lines implement the equation
* output = prev_out +
* (coord_x->x - prev_coord_x->x) *
* (1.0 - prev_out) /
* (maxDisp/sdr_white_level - prevCoordX)
*
* This equation interpolates the first point
* after max_display/80 so that the slope from
* hw_x_before_max and hw_x_after_max is such
* that we hit Y=1.0 at max_display/80.
*/
coordX_diff = dc_fixpt_sub(coord_x->x, prv_coord_x->x);
out_dist_max = dc_fixpt_sub(dc_fixpt_one, output);
bright_norm = dc_fixpt_div(max_display, sdr_white_level);
output = dc_fixpt_add(
output, dc_fixpt_mul(
coordX_diff, dc_fixpt_div(
out_dist_max,
dc_fixpt_sub(bright_norm, prv_coord_x->x)
)
)
);
/* Relaxing the maximum boundary to 1.07 (instead of 1.0)
* because the last point in the curve must be such that
* the maximum display pixel brightness interpolates to
* exactly 1.0. The worst case scenario was calculated
* around 1.057, so the limit of 1.07 leaves some safety
* margin.
*/
output = dc_fixpt_clamp(output, dc_fixpt_zero,
dc_fixpt_from_fraction(107, 100));
rgb->r = output;
rgb->g = output;
rgb->b = output;
}
} else {
/* Every other clipping after the first
* one is dealt with here
*/
rgb->r = clip;
rgb->g = clip;
rgb->b = clip;
}
prv_coord_x = coord_x;
++coord_x;
++rgb;
}
cal_buffer->buffer_index = -1;
return true;
}
static bool build_degamma(struct pwl_float_data_ex *curve,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x, enum dc_transfer_func_predefined type)
{
uint32_t i;
struct gamma_coefficients coeff;
uint32_t begin_index, end_index;
bool ret = false;
if (!build_coefficients(&coeff, type))
goto release;
i = 0;
/* X points is 2^-25 to 2^7
* De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
*/
begin_index = 13 * NUM_PTS_IN_REGION;
end_index = begin_index + 12 * NUM_PTS_IN_REGION;
while (i != begin_index) {
curve[i].r = dc_fixpt_zero;
curve[i].g = dc_fixpt_zero;
curve[i].b = dc_fixpt_zero;
i++;
}
while (i != end_index) {
curve[i].r = translate_to_linear_space_ex(
coordinate_x[i].x, &coeff, 0);
curve[i].g = curve[i].r;
curve[i].b = curve[i].r;
i++;
}
while (i != hw_points_num + 1) {
curve[i].r = dc_fixpt_one;
curve[i].g = dc_fixpt_one;
curve[i].b = dc_fixpt_one;
i++;
}
ret = true;
release:
return ret;
}
static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x,
uint32_t sdr_white_level, uint32_t max_luminance_nits)
{
uint32_t i;
struct pwl_float_data_ex *rgb = degamma;
const struct hw_x_point *coord_x = coordinate_x;
i = 0;
// check when i == 434
while (i != hw_points_num + 1) {
compute_hlg_eotf(coord_x->x, &rgb->r, sdr_white_level, max_luminance_nits);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
++rgb;
++i;
}
}
static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x,
uint32_t sdr_white_level, uint32_t max_luminance_nits)
{
uint32_t i;
struct pwl_float_data_ex *rgb = regamma;
const struct hw_x_point *coord_x = coordinate_x;
i = 0;
// when i == 471
while (i != hw_points_num + 1) {
compute_hlg_oetf(coord_x->x, &rgb->r, sdr_white_level, max_luminance_nits);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
++rgb;
++i;
}
}
static void scale_gamma(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
{
const struct fixed31_32 max_driver = dc_fixpt_from_int(0xFFFF);
const struct fixed31_32 max_os = dc_fixpt_from_int(0xFF00);
struct fixed31_32 scaler = max_os;
uint32_t i;
struct pwl_float_data *rgb = pwl_rgb;
struct pwl_float_data *rgb_last = rgb + ramp->num_entries - 1;
i = 0;
do {
if (dc_fixpt_lt(max_os, ramp->entries.red[i]) ||
dc_fixpt_lt(max_os, ramp->entries.green[i]) ||
dc_fixpt_lt(max_os, ramp->entries.blue[i])) {
scaler = max_driver;
break;
}
++i;
} while (i != ramp->num_entries);
i = 0;
do {
rgb->r = dc_fixpt_div(
ramp->entries.red[i], scaler);
rgb->g = dc_fixpt_div(
ramp->entries.green[i], scaler);
rgb->b = dc_fixpt_div(
ramp->entries.blue[i], scaler);
++rgb;
++i;
} while (i != ramp->num_entries);
rgb->r = dc_fixpt_mul(rgb_last->r,
dividers.divider1);
rgb->g = dc_fixpt_mul(rgb_last->g,
dividers.divider1);
rgb->b = dc_fixpt_mul(rgb_last->b,
dividers.divider1);
++rgb;
rgb->r = dc_fixpt_mul(rgb_last->r,
dividers.divider2);
rgb->g = dc_fixpt_mul(rgb_last->g,
dividers.divider2);
rgb->b = dc_fixpt_mul(rgb_last->b,
dividers.divider2);
++rgb;
rgb->r = dc_fixpt_mul(rgb_last->r,
dividers.divider3);
rgb->g = dc_fixpt_mul(rgb_last->g,
dividers.divider3);
rgb->b = dc_fixpt_mul(rgb_last->b,
dividers.divider3);
}
static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
{
uint32_t i;
struct fixed31_32 min = dc_fixpt_zero;
struct fixed31_32 max = dc_fixpt_one;
struct fixed31_32 delta = dc_fixpt_zero;
struct fixed31_32 offset = dc_fixpt_zero;
for (i = 0 ; i < ramp->num_entries; i++) {
if (dc_fixpt_lt(ramp->entries.red[i], min))
min = ramp->entries.red[i];
if (dc_fixpt_lt(ramp->entries.green[i], min))
min = ramp->entries.green[i];
if (dc_fixpt_lt(ramp->entries.blue[i], min))
min = ramp->entries.blue[i];
if (dc_fixpt_lt(max, ramp->entries.red[i]))
max = ramp->entries.red[i];
if (dc_fixpt_lt(max, ramp->entries.green[i]))
max = ramp->entries.green[i];
if (dc_fixpt_lt(max, ramp->entries.blue[i]))
max = ramp->entries.blue[i];
}
if (dc_fixpt_lt(min, dc_fixpt_zero))
delta = dc_fixpt_neg(min);
offset = dc_fixpt_add(min, max);
for (i = 0 ; i < ramp->num_entries; i++) {
pwl_rgb[i].r = dc_fixpt_div(
dc_fixpt_add(
ramp->entries.red[i], delta), offset);
pwl_rgb[i].g = dc_fixpt_div(
dc_fixpt_add(
ramp->entries.green[i], delta), offset);
pwl_rgb[i].b = dc_fixpt_div(
dc_fixpt_add(
ramp->entries.blue[i], delta), offset);
}
pwl_rgb[i].r = dc_fixpt_sub(dc_fixpt_mul_int(
pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
pwl_rgb[i].g = dc_fixpt_sub(dc_fixpt_mul_int(
pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
pwl_rgb[i].b = dc_fixpt_sub(dc_fixpt_mul_int(
pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
++i;
pwl_rgb[i].r = dc_fixpt_sub(dc_fixpt_mul_int(
pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
pwl_rgb[i].g = dc_fixpt_sub(dc_fixpt_mul_int(
pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
pwl_rgb[i].b = dc_fixpt_sub(dc_fixpt_mul_int(
pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
}
/* todo: all these scale_gamma functions are inherently the same but
* take different structures as params or different format for ramp
* values. We could probably implement it in a more generic fashion
*/
static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
const struct regamma_ramp *ramp,
struct dividers dividers)
{
unsigned short max_driver = 0xFFFF;
unsigned short max_os = 0xFF00;
unsigned short scaler = max_os;
uint32_t i;
struct pwl_float_data *rgb = pwl_rgb;
struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1;
i = 0;
do {
if (ramp->gamma[i] > max_os ||
ramp->gamma[i + 256] > max_os ||
ramp->gamma[i + 512] > max_os) {
scaler = max_driver;
break;
}
i++;
} while (i != GAMMA_RGB_256_ENTRIES);
i = 0;
do {
rgb->r = dc_fixpt_from_fraction(
ramp->gamma[i], scaler);
rgb->g = dc_fixpt_from_fraction(
ramp->gamma[i + 256], scaler);
rgb->b = dc_fixpt_from_fraction(
ramp->gamma[i + 512], scaler);
++rgb;
++i;
} while (i != GAMMA_RGB_256_ENTRIES);
rgb->r = dc_fixpt_mul(rgb_last->r,
dividers.divider1);
rgb->g = dc_fixpt_mul(rgb_last->g,
dividers.divider1);
rgb->b = dc_fixpt_mul(rgb_last->b,
dividers.divider1);
++rgb;
rgb->r = dc_fixpt_mul(rgb_last->r,
dividers.divider2);
rgb->g = dc_fixpt_mul(rgb_last->g,
dividers.divider2);
rgb->b = dc_fixpt_mul(rgb_last->b,
dividers.divider2);
++rgb;
rgb->r = dc_fixpt_mul(rgb_last->r,
dividers.divider3);
rgb->g = dc_fixpt_mul(rgb_last->g,
dividers.divider3);
rgb->b = dc_fixpt_mul(rgb_last->b,
dividers.divider3);
}
/*
* RS3+ color transform DDI - 1D LUT adjustment is composed with regamma here
* Input is evenly distributed in the output color space as specified in
* SetTimings
*
* Interpolation details:
* 1D LUT has 4096 values which give curve correction in 0-1 float range
* for evenly spaced points in 0-1 range. lut1D[index] gives correction
* for index/4095.
* First we find index for which:
* index/4095 < regamma_y < (index+1)/4095 =>
* index < 4095*regamma_y < index + 1
* norm_y = 4095*regamma_y, and index is just truncating to nearest integer
* lut1 = lut1D[index], lut2 = lut1D[index+1]
*
* adjustedY is then linearly interpolating regamma Y between lut1 and lut2
*
* Custom degamma on Linux uses the same interpolation math, so is handled here
*/
static void apply_lut_1d(
const struct dc_gamma *ramp,
uint32_t num_hw_points,
struct dc_transfer_func_distributed_points *tf_pts)
{
int i = 0;
int color = 0;
struct fixed31_32 *regamma_y;
struct fixed31_32 norm_y;
struct fixed31_32 lut1;
struct fixed31_32 lut2;
const int max_lut_index = 4095;
const struct fixed31_32 penult_lut_index_f =
dc_fixpt_from_int(max_lut_index-1);
const struct fixed31_32 max_lut_index_f =
dc_fixpt_from_int(max_lut_index);
int32_t index = 0, index_next = 0;
struct fixed31_32 index_f;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
if (ramp->type != GAMMA_CS_TFM_1D && ramp->type != GAMMA_CUSTOM)
return; // this is not expected
for (i = 0; i < num_hw_points; i++) {
for (color = 0; color < 3; color++) {
if (color == 0)
regamma_y = &tf_pts->red[i];
else if (color == 1)
regamma_y = &tf_pts->green[i];
else
regamma_y = &tf_pts->blue[i];
norm_y = dc_fixpt_mul(max_lut_index_f,
*regamma_y);
index = dc_fixpt_floor(norm_y);
index_f = dc_fixpt_from_int(index);
if (index < 0)
continue;
if (index <= max_lut_index)
index_next = (index == max_lut_index) ? index : index+1;
else {
/* Here we are dealing with the last point in the curve,
* which in some cases might exceed the range given by
* max_lut_index. So we interpolate the value using
* max_lut_index and max_lut_index - 1.
*/
index = max_lut_index - 1;
index_next = max_lut_index;
index_f = penult_lut_index_f;
}
if (color == 0) {
lut1 = ramp->entries.red[index];
lut2 = ramp->entries.red[index_next];
} else if (color == 1) {
lut1 = ramp->entries.green[index];
lut2 = ramp->entries.green[index_next];
} else {
lut1 = ramp->entries.blue[index];
lut2 = ramp->entries.blue[index_next];
}
// we have everything now, so interpolate
delta_lut = dc_fixpt_sub(lut2, lut1);
delta_index = dc_fixpt_sub(norm_y, index_f);
*regamma_y = dc_fixpt_add(lut1,
dc_fixpt_mul(delta_index, delta_lut));
}
}
}
static void build_evenly_distributed_points(
struct gamma_pixel *points,
uint32_t numberof_points,
struct dividers dividers)
{
struct gamma_pixel *p = points;
struct gamma_pixel *p_last;
uint32_t i = 0;
// This function should not gets called with 0 as a parameter
ASSERT(numberof_points > 0);
p_last = p + numberof_points - 1;
do {
struct fixed31_32 value = dc_fixpt_from_fraction(i,
numberof_points - 1);
p->r = value;
p->g = value;
p->b = value;
++p;
++i;
} while (i < numberof_points);
p->r = dc_fixpt_div(p_last->r, dividers.divider1);
p->g = dc_fixpt_div(p_last->g, dividers.divider1);
p->b = dc_fixpt_div(p_last->b, dividers.divider1);
++p;
p->r = dc_fixpt_div(p_last->r, dividers.divider2);
p->g = dc_fixpt_div(p_last->g, dividers.divider2);
p->b = dc_fixpt_div(p_last->b, dividers.divider2);
++p;
p->r = dc_fixpt_div(p_last->r, dividers.divider3);
p->g = dc_fixpt_div(p_last->g, dividers.divider3);
p->b = dc_fixpt_div(p_last->b, dividers.divider3);
}
static inline void copy_rgb_regamma_to_coordinates_x(
struct hw_x_point *coordinates_x,
uint32_t hw_points_num,
const struct pwl_float_data_ex *rgb_ex)
{
struct hw_x_point *coords = coordinates_x;
uint32_t i = 0;
const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
while (i <= hw_points_num + 1) {
coords->regamma_y_red = rgb_regamma->r;
coords->regamma_y_green = rgb_regamma->g;
coords->regamma_y_blue = rgb_regamma->b;
++coords;
++rgb_regamma;
++i;
}
}
static bool calculate_interpolated_hardware_curve(
const struct dc_gamma *ramp,
struct pixel_gamma_point *coeff128,
struct pwl_float_data *rgb_user,
const struct hw_x_point *coordinates_x,
const struct gamma_pixel *axis_x,
uint32_t number_of_points,
struct dc_transfer_func_distributed_points *tf_pts)
{
const struct pixel_gamma_point *coeff = coeff128;
uint32_t max_entries = 3 - 1;
uint32_t i = 0;
for (i = 0; i < 3; i++) {
if (!build_custom_gamma_mapping_coefficients_worker(
ramp, coeff128, coordinates_x, axis_x, i,
number_of_points))
return false;
}
i = 0;
max_entries += ramp->num_entries;
/* TODO: float point case */
while (i <= number_of_points) {
tf_pts->red[i] = calculate_mapped_value(
rgb_user, coeff, CHANNEL_NAME_RED, max_entries);
tf_pts->green[i] = calculate_mapped_value(
rgb_user, coeff, CHANNEL_NAME_GREEN, max_entries);
tf_pts->blue[i] = calculate_mapped_value(
rgb_user, coeff, CHANNEL_NAME_BLUE, max_entries);
++coeff;
++i;
}
return true;
}
/* The "old" interpolation uses a complicated scheme to build an array of
* coefficients while also using an array of 0-255 normalized to 0-1
* Then there's another loop using both of the above + new scaled user ramp
* and we concatenate them. It also searches for points of interpolation and
* uses enums for positions.
*
* This function uses a different approach:
* user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255
* To find index for hwX , we notice the following:
* i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1
* See apply_lut_1d which is the same principle, but on 4K entry 1D LUT
*
* Once the index is known, combined Y is simply:
* user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index)
*
* We should switch to this method in all cases, it's simpler and faster
* ToDo one day - for now this only applies to ADL regamma to avoid regression
* for regular use cases (sRGB and PQ)
*/
static void interpolate_user_regamma(uint32_t hw_points_num,
struct pwl_float_data *rgb_user,
bool apply_degamma,
struct dc_transfer_func_distributed_points *tf_pts)
{
uint32_t i;
uint32_t color = 0;
int32_t index;
int32_t index_next;
struct fixed31_32 *tf_point;
struct fixed31_32 hw_x;
struct fixed31_32 norm_factor =
dc_fixpt_from_int(255);
struct fixed31_32 norm_x;
struct fixed31_32 index_f;
struct fixed31_32 lut1;
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
while (i != 32) {
tf_pts->red[i] = dc_fixpt_zero;
tf_pts->green[i] = dc_fixpt_zero;
tf_pts->blue[i] = dc_fixpt_zero;
++i;
}
while (i <= hw_points_num + 1) {
for (color = 0; color < 3; color++) {
if (color == 0)
tf_point = &tf_pts->red[i];
else if (color == 1)
tf_point = &tf_pts->green[i];
else
tf_point = &tf_pts->blue[i];
if (apply_degamma) {
if (color == 0)
hw_x = coordinates_x[i].regamma_y_red;
else if (color == 1)
hw_x = coordinates_x[i].regamma_y_green;
else
hw_x = coordinates_x[i].regamma_y_blue;
} else
hw_x = coordinates_x[i].x;
if (dc_fixpt_le(one, hw_x))
hw_x = one;
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)
continue;
index_f = dc_fixpt_from_int(index);
index_next = (index == 255) ? index : index + 1;
if (color == 0) {
lut1 = rgb_user[index].r;
lut2 = rgb_user[index_next].r;
} else if (color == 1) {
lut1 = rgb_user[index].g;
lut2 = rgb_user[index_next].g;
} else {
lut1 = rgb_user[index].b;
lut2 = rgb_user[index_next].b;
}
// we have everything now, so interpolate
delta_lut = dc_fixpt_sub(lut2, lut1);
delta_index = dc_fixpt_sub(norm_x, index_f);
*tf_point = dc_fixpt_add(lut1,
dc_fixpt_mul(delta_index, delta_lut));
}
++i;
}
}
static void build_new_custom_resulted_curve(
uint32_t hw_points_num,
struct dc_transfer_func_distributed_points *tf_pts)
{
uint32_t i = 0;
while (i != hw_points_num + 1) {
tf_pts->red[i] = dc_fixpt_clamp(
tf_pts->red[i], dc_fixpt_zero,
dc_fixpt_one);
tf_pts->green[i] = dc_fixpt_clamp(
tf_pts->green[i], dc_fixpt_zero,
dc_fixpt_one);
tf_pts->blue[i] = dc_fixpt_clamp(
tf_pts->blue[i], dc_fixpt_zero,
dc_fixpt_one);
++i;
}
}
static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num, struct calculate_buffer *cal_buffer)
{
uint32_t i;
struct gamma_coefficients coeff;
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
i = 0;
while (i != hw_points_num + 1) {
rgb->r = translate_from_linear_space_ex(
coord_x->x, &coeff, 0, cal_buffer);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
++rgb;
++i;
}
}
static bool map_regamma_hw_to_x_user(
const struct dc_gamma *ramp,
struct pixel_gamma_point *coeff128,
struct pwl_float_data *rgb_user,
struct hw_x_point *coords_x,
const struct gamma_pixel *axis_x,
const struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
struct dc_transfer_func_distributed_points *tf_pts,
bool map_user_ramp,
bool do_clamping)
{
/* setup to spare calculated ideal regamma values */
int i = 0;
struct hw_x_point *coords = coords_x;
const struct pwl_float_data_ex *regamma = rgb_regamma;
if (ramp && map_user_ramp) {
copy_rgb_regamma_to_coordinates_x(coords,
hw_points_num,
rgb_regamma);
calculate_interpolated_hardware_curve(
ramp, coeff128, rgb_user, coords, axis_x,
hw_points_num, tf_pts);
} else {
/* just copy current rgb_regamma into tf_pts */
while (i <= hw_points_num) {
tf_pts->red[i] = regamma->r;
tf_pts->green[i] = regamma->g;
tf_pts->blue[i] = regamma->b;
++regamma;
++i;
}
}
if (do_clamping) {
/* this should be named differently, all it does is clamp to 0-1 */
build_new_custom_resulted_curve(hw_points_num, tf_pts);
}
return true;
}
#define _EXTRA_POINTS 3
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
const struct regamma_lut *regamma,
struct calculate_buffer *cal_buffer,
const struct dc_gamma *ramp)
{
struct gamma_coefficients coeff;
const struct hw_x_point *coord_x = coordinates_x;
uint32_t i = 0;
do {
coeff.a0[i] = dc_fixpt_from_fraction(
regamma->coeff.A0[i], 10000000);
coeff.a1[i] = dc_fixpt_from_fraction(
regamma->coeff.A1[i], 1000);
coeff.a2[i] = dc_fixpt_from_fraction(
regamma->coeff.A2[i], 1000);
coeff.a3[i] = dc_fixpt_from_fraction(
regamma->coeff.A3[i], 1000);
coeff.user_gamma[i] = dc_fixpt_from_fraction(
regamma->coeff.gamma[i], 1000);
++i;
} while (i != 3);
i = 0;
/* fixed_pt library has problems handling too small values */
while (i != 32) {
output_tf->tf_pts.red[i] = dc_fixpt_zero;
output_tf->tf_pts.green[i] = dc_fixpt_zero;
output_tf->tf_pts.blue[i] = dc_fixpt_zero;
++coord_x;
++i;
}
while (i != MAX_HW_POINTS + 1) {
output_tf->tf_pts.red[i] = translate_from_linear_space_ex(
coord_x->x, &coeff, 0, cal_buffer);
output_tf->tf_pts.green[i] = translate_from_linear_space_ex(
coord_x->x, &coeff, 1, cal_buffer);
output_tf->tf_pts.blue[i] = translate_from_linear_space_ex(
coord_x->x, &coeff, 2, cal_buffer);
++coord_x;
++i;
}
if (ramp && ramp->type == GAMMA_CS_TFM_1D)
apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts);
// this function just clamps output to 0-1
build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts);
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
return true;
}
bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
const struct regamma_lut *regamma,
struct calculate_buffer *cal_buffer,
const struct dc_gamma *ramp)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
struct dividers dividers;
struct pwl_float_data *rgb_user = NULL;
struct pwl_float_data_ex *rgb_regamma = NULL;
bool ret = false;
if (regamma == NULL)
return false;
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
rgb_user = kcalloc(GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
rgb_regamma = kcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
sizeof(*rgb_regamma),
GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
dividers.divider1 = dc_fixpt_from_fraction(3, 2);
dividers.divider2 = dc_fixpt_from_int(2);
dividers.divider3 = dc_fixpt_from_fraction(5, 2);
scale_user_regamma_ramp(rgb_user, ®amma->ramp, dividers);
if (regamma->flags.bits.applyDegamma == 1) {
apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS, cal_buffer);
copy_rgb_regamma_to_coordinates_x(coordinates_x,
MAX_HW_POINTS, rgb_regamma);
}
interpolate_user_regamma(MAX_HW_POINTS, rgb_user,
regamma->flags.bits.applyDegamma, tf_pts);
// no custom HDR curves!
tf_pts->end_exponent = 0;
tf_pts->x_point_at_y1_red = 1;
tf_pts->x_point_at_y1_green = 1;
tf_pts->x_point_at_y1_blue = 1;
if (ramp && ramp->type == GAMMA_CS_TFM_1D)
apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts);
// this function just clamps output to 0-1
build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts);
ret = true;
kfree(rgb_regamma);
rgb_regamma_alloc_fail:
kfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct dc_transfer_func *input_tf,
const struct dc_gamma *ramp, bool map_user_ramp)
{
struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
struct dividers dividers;
struct pwl_float_data *rgb_user = NULL;
struct pwl_float_data_ex *curve = NULL;
struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
enum dc_transfer_func_predefined tf;
uint32_t i;
bool ret = false;
if (input_tf->type == TF_TYPE_BYPASS)
return false;
/* we can use hardcoded curve for plain SRGB TF
* If linear, it's bypass if no user ramp
*/
if (input_tf->type == TF_TYPE_PREDEFINED) {
if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||
input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
!map_user_ramp)
return true;
if (dc_caps != NULL &&
dc_caps->dpp.dcn_arch == 1) {
if (input_tf->tf == TRANSFER_FUNCTION_PQ &&
dc_caps->dpp.dgam_rom_caps.pq == 1)
return true;
if (input_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
dc_caps->dpp.dgam_rom_caps.gamma2_2 == 1)
return true;
// HLG OOTF not accounted for
if (input_tf->tf == TRANSFER_FUNCTION_HLG &&
dc_caps->dpp.dgam_rom_caps.hlg == 1)
return true;
}
}
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
if (map_user_ramp && ramp && ramp->type == GAMMA_RGB_256) {
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
GFP_KERNEL);
if (!axis_x)
goto axis_x_alloc_fail;
dividers.divider1 = dc_fixpt_from_fraction(3, 2);
dividers.divider2 = dc_fixpt_from_int(2);
dividers.divider3 = dc_fixpt_from_fraction(5, 2);
build_evenly_distributed_points(
axis_x,
ramp->num_entries,
dividers);
scale_gamma(rgb_user, ramp, dividers);
}
curve = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*curve),
GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
tf = input_tf->tf;
if (tf == TRANSFER_FUNCTION_PQ)
build_de_pq(curve,
MAX_HW_POINTS,
coordinates_x);
else if (tf == TRANSFER_FUNCTION_SRGB ||
tf == TRANSFER_FUNCTION_BT709 ||
tf == TRANSFER_FUNCTION_GAMMA22 ||
tf == TRANSFER_FUNCTION_GAMMA24 ||
tf == TRANSFER_FUNCTION_GAMMA26)
build_degamma(curve,
MAX_HW_POINTS,
coordinates_x,
tf);
else if (tf == TRANSFER_FUNCTION_HLG)
build_hlg_degamma(curve,
MAX_HW_POINTS,
coordinates_x,
80, 1000);
else if (tf == TRANSFER_FUNCTION_LINEAR) {
// just copy coordinates_x into curve
i = 0;
while (i != MAX_HW_POINTS + 1) {
curve[i].r = coordinates_x[i].x;
curve[i].g = curve[i].r;
curve[i].b = curve[i].r;
i++;
}
} else
goto invalid_tf_fail;
tf_pts->end_exponent = 0;
tf_pts->x_point_at_y1_red = 1;
tf_pts->x_point_at_y1_green = 1;
tf_pts->x_point_at_y1_blue = 1;
if (input_tf->tf == TRANSFER_FUNCTION_PQ) {
/* just copy current rgb_regamma into tf_pts */
struct pwl_float_data_ex *curvePt = curve;
int i = 0;
while (i <= MAX_HW_POINTS) {
tf_pts->red[i] = curvePt->r;
tf_pts->green[i] = curvePt->g;
tf_pts->blue[i] = curvePt->b;
++curvePt;
++i;
}
} else {
// clamps to 0-1
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
map_user_ramp && ramp && ramp->type == GAMMA_RGB_256,
true);
}
if (ramp && ramp->type == GAMMA_CUSTOM)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
invalid_tf_fail:
kvfree(coeff);
coeff_alloc_fail:
kvfree(curve);
curve_alloc_fail:
kvfree(axis_x);
axis_x_alloc_fail:
kvfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
static bool calculate_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points,
struct pwl_float_data_ex *rgb_regamma,
const struct hdr_tm_params *fs_params,
uint32_t sdr_ref_white_level,
struct calculate_buffer *cal_buffer)
{
uint32_t i;
bool ret = false;
if (trans == TRANSFER_FUNCTION_UNITY ||
trans == TRANSFER_FUNCTION_LINEAR) {
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
points->x_point_at_y1_green = 1;
points->x_point_at_y1_blue = 1;
for (i = 0; i <= MAX_HW_POINTS ; i++) {
rgb_regamma[i].r = coordinates_x[i].x;
rgb_regamma[i].g = coordinates_x[i].x;
rgb_regamma[i].b = coordinates_x[i].x;
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
points->end_exponent = 7;
points->x_point_at_y1_red = 125;
points->x_point_at_y1_green = 125;
points->x_point_at_y1_blue = 125;
build_pq(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
sdr_ref_white_level);
ret = true;
} else if (trans == TRANSFER_FUNCTION_GAMMA22 &&
fs_params != NULL && fs_params->skip_tm == 0) {
build_freesync_hdr(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
fs_params,
cal_buffer);
ret = true;
} else if (trans == TRANSFER_FUNCTION_HLG) {
points->end_exponent = 4;
points->x_point_at_y1_red = 12;
points->x_point_at_y1_green = 12;
points->x_point_at_y1_blue = 12;
build_hlg_regamma(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
80, 1000);
ret = true;
} else {
// trans == TRANSFER_FUNCTION_SRGB
// trans == TRANSFER_FUNCTION_BT709
// trans == TRANSFER_FUNCTION_GAMMA22
// trans == TRANSFER_FUNCTION_GAMMA24
// trans == TRANSFER_FUNCTION_GAMMA26
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
points->x_point_at_y1_green = 1;
points->x_point_at_y1_blue = 1;
build_regamma(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
trans,
cal_buffer);
ret = true;
}
return ret;
}
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp,
bool map_user_ramp,
bool can_rom_be_used,
const struct hdr_tm_params *fs_params,
struct calculate_buffer *cal_buffer)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
struct dividers dividers;
struct pwl_float_data *rgb_user = NULL;
struct pwl_float_data_ex *rgb_regamma = NULL;
struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
enum dc_transfer_func_predefined tf;
bool do_clamping = true;
bool ret = false;
if (output_tf->type == TF_TYPE_BYPASS)
return false;
/* we can use hardcoded curve for plain SRGB TF */
if (output_tf->type == TF_TYPE_PREDEFINED && can_rom_be_used == true &&
output_tf->tf == TRANSFER_FUNCTION_SRGB) {
if (ramp == NULL)
return true;
if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
(!map_user_ramp && ramp->type == GAMMA_RGB_256))
return true;
}
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
(map_user_ramp || ramp->type != GAMMA_RGB_256)) {
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
GFP_KERNEL);
if (!axis_x)
goto axis_x_alloc_fail;
dividers.divider1 = dc_fixpt_from_fraction(3, 2);
dividers.divider2 = dc_fixpt_from_int(2);
dividers.divider3 = dc_fixpt_from_fraction(5, 2);
build_evenly_distributed_points(
axis_x,
ramp->num_entries,
dividers);
if (ramp->type == GAMMA_RGB_256 && map_user_ramp)
scale_gamma(rgb_user, ramp, dividers);
else if (ramp->type == GAMMA_RGB_FLOAT_1024)
scale_gamma_dx(rgb_user, ramp, dividers);
}
rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
sizeof(*rgb_regamma),
GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
tf = output_tf->tf;
ret = calculate_curve(tf,
tf_pts,
rgb_regamma,
fs_params,
output_tf->sdr_ref_white_level,
cal_buffer);
if (ret) {
do_clamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
fs_params != NULL && fs_params->skip_tm == 0);
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axis_x, rgb_regamma,
MAX_HW_POINTS, tf_pts,
(map_user_ramp || (ramp && ramp->type != GAMMA_RGB_256)) &&
(ramp && ramp->type != GAMMA_CS_TFM_1D),
do_clamping);
if (ramp && ramp->type == GAMMA_CS_TFM_1D)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
}
kvfree(coeff);
coeff_alloc_fail:
kvfree(rgb_regamma);
rgb_regamma_alloc_fail:
kvfree(axis_x);
axis_x_alloc_fail:
kvfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
| linux-master | drivers/gpu/drm/amd/display/modules/color/color_gamma.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "color_table.h"
static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2];
static bool pq_initialized;
static bool de_pg_initialized;
bool mod_color_is_table_init(enum table_type type)
{
bool ret = false;
if (type == type_pq_table)
ret = pq_initialized;
if (type == type_de_pq_table)
ret = de_pg_initialized;
return ret;
}
struct fixed31_32 *mod_color_get_table(enum table_type type)
{
struct fixed31_32 *table = NULL;
if (type == type_pq_table)
table = pq_table;
if (type == type_de_pq_table)
table = de_pq_table;
return table;
}
void mod_color_set_table_init_state(enum table_type type, bool state)
{
if (type == type_pq_table)
pq_initialized = state;
if (type == type_de_pq_table)
de_pg_initialized = state;
}
| linux-master | drivers/gpu/drm/amd/display/modules/color/color_table.c |
/*
* Copyright 2016-2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dc.h"
#include "mod_freesync.h"
#include "core_types.h"
#define MOD_FREESYNC_MAX_CONCURRENT_STREAMS 32
#define MIN_REFRESH_RANGE 10
/* Refresh rate ramp at a fixed rate of 65 Hz/second */
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/* Threshold to exit fixed refresh rate */
#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 1
/* Number of consecutive frames to check before entering/exiting fixed refresh */
#define FIXED_REFRESH_ENTER_FRAME_COUNT 5
#define FIXED_REFRESH_EXIT_FRAME_COUNT 10
/* Flip interval workaround constants */
#define VSYNCS_BETWEEN_FLIP_THRESHOLD 2
#define FREESYNC_CONSEC_FLIP_AFTER_VSYNC 5
#define FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US 500
struct core_freesync {
struct mod_freesync public;
struct dc *dc;
};
#define MOD_FREESYNC_TO_CORE(mod_freesync)\
container_of(mod_freesync, struct core_freesync, public)
struct mod_freesync *mod_freesync_create(struct dc *dc)
{
struct core_freesync *core_freesync =
kzalloc(sizeof(struct core_freesync), GFP_KERNEL);
if (core_freesync == NULL)
goto fail_alloc_context;
if (dc == NULL)
goto fail_construct;
core_freesync->dc = dc;
return &core_freesync->public;
fail_construct:
kfree(core_freesync);
fail_alloc_context:
return NULL;
}
void mod_freesync_destroy(struct mod_freesync *mod_freesync)
{
struct core_freesync *core_freesync = NULL;
if (mod_freesync == NULL)
return;
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
kfree(core_freesync);
}
#if 0 /* Unused currently */
static unsigned int calc_refresh_in_uhz_from_duration(
unsigned int duration_in_ns)
{
unsigned int refresh_in_uhz =
((unsigned int)(div64_u64((1000000000ULL * 1000000),
duration_in_ns)));
return refresh_in_uhz;
}
#endif
static unsigned int calc_duration_in_us_from_refresh_in_uhz(
unsigned int refresh_in_uhz)
{
unsigned int duration_in_us =
((unsigned int)(div64_u64((1000000000ULL * 1000),
refresh_in_uhz)));
return duration_in_us;
}
static unsigned int calc_duration_in_us_from_v_total(
const struct dc_stream_state *stream,
const struct mod_vrr_params *in_vrr,
unsigned int v_total)
{
unsigned int duration_in_us =
(unsigned int)(div64_u64(((unsigned long long)(v_total)
* 10000) * stream->timing.h_total,
stream->timing.pix_clk_100hz));
return duration_in_us;
}
unsigned int mod_freesync_calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz)
{
unsigned int v_total;
unsigned int frame_duration_in_ns;
frame_duration_in_ns =
((unsigned int)(div64_u64((1000000000ULL * 1000000),
refresh_in_uhz)));
v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000000);
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total) {
ASSERT(v_total < stream->timing.v_total);
v_total = stream->timing.v_total;
}
return v_total;
}
static unsigned int calc_v_total_from_duration(
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
unsigned int duration_in_us)
{
unsigned int v_total = 0;
if (duration_in_us < vrr->min_duration_in_us)
duration_in_us = vrr->min_duration_in_us;
if (duration_in_us > vrr->max_duration_in_us)
duration_in_us = vrr->max_duration_in_us;
if (dc_is_hdmi_signal(stream->signal)) {
uint32_t h_total_up_scaled;
h_total_up_scaled = stream->timing.h_total * 10000;
v_total = div_u64((unsigned long long)duration_in_us
* stream->timing.pix_clk_100hz + (h_total_up_scaled - 1),
h_total_up_scaled);
} else {
v_total = div64_u64(div64_u64(((unsigned long long)(
duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
}
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total) {
ASSERT(v_total < stream->timing.v_total);
v_total = stream->timing.v_total;
}
return v_total;
}
static void update_v_total_for_static_ramp(
struct core_freesync *core_freesync,
const struct dc_stream_state *stream,
struct mod_vrr_params *in_out_vrr)
{
unsigned int v_total = 0;
unsigned int current_duration_in_us =
calc_duration_in_us_from_v_total(
stream, in_out_vrr,
in_out_vrr->adjust.v_total_max);
unsigned int target_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
in_out_vrr->fixed.target_refresh_in_uhz);
bool ramp_direction_is_up = (current_duration_in_us >
target_duration_in_us) ? true : false;
/* Calculate ratio between new and current frame duration with 3 digit */
unsigned int frame_duration_ratio = div64_u64(1000000,
(1000 + div64_u64(((unsigned long long)(
STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME) *
current_duration_in_us),
1000000)));
/* Calculate delta between new and current frame duration in us */
unsigned int frame_duration_delta = div64_u64(((unsigned long long)(
current_duration_in_us) *
(1000 - frame_duration_ratio)), 1000);
/* Adjust frame duration delta based on ratio between current and
* standard frame duration (frame duration at 60 Hz refresh rate).
*/
unsigned int ramp_rate_interpolated = div64_u64(((unsigned long long)(
frame_duration_delta) * current_duration_in_us), 16666);
/* Going to a higher refresh rate (lower frame duration) */
if (ramp_direction_is_up) {
/* Reduce frame duration */
current_duration_in_us -= ramp_rate_interpolated;
/* Adjust for frame duration below min */
if (current_duration_in_us <= target_duration_in_us) {
in_out_vrr->fixed.ramping_active = false;
in_out_vrr->fixed.ramping_done = true;
current_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
in_out_vrr->fixed.target_refresh_in_uhz);
}
/* Going to a lower refresh rate (larger frame duration) */
} else {
/* Increase frame duration */
current_duration_in_us += ramp_rate_interpolated;
/* Adjust for frame duration above max */
if (current_duration_in_us >= target_duration_in_us) {
in_out_vrr->fixed.ramping_active = false;
in_out_vrr->fixed.ramping_done = true;
current_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
in_out_vrr->fixed.target_refresh_in_uhz);
}
}
v_total = div64_u64(div64_u64(((unsigned long long)(
current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total)
v_total = stream->timing.v_total;
in_out_vrr->adjust.v_total_min = v_total;
in_out_vrr->adjust.v_total_max = v_total;
}
static void apply_below_the_range(struct core_freesync *core_freesync,
const struct dc_stream_state *stream,
unsigned int last_render_time_in_us,
struct mod_vrr_params *in_out_vrr)
{
unsigned int inserted_frame_duration_in_us = 0;
unsigned int mid_point_frames_ceil = 0;
unsigned int mid_point_frames_floor = 0;
unsigned int frame_time_in_us = 0;
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
unsigned int delta_from_mid_point_delta_in_us;
unsigned int max_render_time_in_us =
in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
} else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
if (!in_out_vrr->btr.btr_active) {
in_out_vrr->btr.btr_active = true;
}
}
/* BTR set to "not active" so disengage */
if (!in_out_vrr->btr.btr_active) {
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
/* Restore FreeSync */
in_out_vrr->adjust.v_total_min =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->min_refresh_in_uhz);
/* BTR set to "active" so engage */
} else {
/* Calculate number of midPoint frames that could fit within
* the render time interval - take ceil of this value
*/
mid_point_frames_ceil = (last_render_time_in_us +
in_out_vrr->btr.mid_point_in_us - 1) /
in_out_vrr->btr.mid_point_in_us;
if (mid_point_frames_ceil > 0) {
frame_time_in_us = last_render_time_in_us /
mid_point_frames_ceil;
delta_from_mid_point_in_us_1 =
(in_out_vrr->btr.mid_point_in_us >
frame_time_in_us) ?
(in_out_vrr->btr.mid_point_in_us - frame_time_in_us) :
(frame_time_in_us - in_out_vrr->btr.mid_point_in_us);
}
/* Calculate number of midPoint frames that could fit within
* the render time interval - take floor of this value
*/
mid_point_frames_floor = last_render_time_in_us /
in_out_vrr->btr.mid_point_in_us;
if (mid_point_frames_floor > 0) {
frame_time_in_us = last_render_time_in_us /
mid_point_frames_floor;
delta_from_mid_point_in_us_2 =
(in_out_vrr->btr.mid_point_in_us >
frame_time_in_us) ?
(in_out_vrr->btr.mid_point_in_us - frame_time_in_us) :
(frame_time_in_us - in_out_vrr->btr.mid_point_in_us);
}
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
if (mid_point_frames_ceil &&
(last_render_time_in_us / mid_point_frames_ceil) <
in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
*/
frames_to_insert = mid_point_frames_floor;
} else if (mid_point_frames_floor < 2) {
/* Check if FLOOR would result in non-LFC. In this case
* choose to use CEIL
*/
frames_to_insert = mid_point_frames_ceil;
} else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
/* If choosing CEIL results in a frame duration that is
* closer to the mid point of the range.
* Choose CEIL
*/
frames_to_insert = mid_point_frames_ceil;
} else {
/* If choosing FLOOR results in a frame duration that is
* closer to the mid point of the range.
* Choose FLOOR
*/
frames_to_insert = mid_point_frames_floor;
}
/* Prefer current frame multiplier when BTR is enabled unless it drifts
* too far from the midpoint
*/
if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
} else {
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
delta_from_mid_point_in_us_2;
}
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
}
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
if (frames_to_insert &&
(last_render_time_in_us / frames_to_insert) <
in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}
if (frames_to_insert > 0)
inserted_frame_duration_in_us = last_render_time_in_us /
frames_to_insert;
if (inserted_frame_duration_in_us < in_out_vrr->min_duration_in_us)
inserted_frame_duration_in_us = in_out_vrr->min_duration_in_us;
/* Cache the calculated variables */
in_out_vrr->btr.inserted_duration_in_us =
inserted_frame_duration_in_us;
in_out_vrr->btr.frames_to_insert = frames_to_insert;
in_out_vrr->btr.frame_counter = frames_to_insert;
}
}
static void apply_fixed_refresh(struct core_freesync *core_freesync,
const struct dc_stream_state *stream,
unsigned int last_render_time_in_us,
struct mod_vrr_params *in_out_vrr)
{
bool update = false;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
/* Compute the exit refresh rate and exit frame duration */
unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
+ (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
if (last_render_time_in_us < exit_frame_duration_in_us) {
/* Exit Fixed Refresh mode */
if (in_out_vrr->fixed.fixed_active) {
in_out_vrr->fixed.frame_counter++;
if (in_out_vrr->fixed.frame_counter >
FIXED_REFRESH_EXIT_FRAME_COUNT) {
in_out_vrr->fixed.frame_counter = 0;
in_out_vrr->fixed.fixed_active = false;
in_out_vrr->fixed.target_refresh_in_uhz = 0;
update = true;
}
} else
in_out_vrr->fixed.frame_counter = 0;
} else if (last_render_time_in_us > max_render_time_in_us) {
/* Enter Fixed Refresh mode */
if (!in_out_vrr->fixed.fixed_active) {
in_out_vrr->fixed.frame_counter++;
if (in_out_vrr->fixed.frame_counter >
FIXED_REFRESH_ENTER_FRAME_COUNT) {
in_out_vrr->fixed.frame_counter = 0;
in_out_vrr->fixed.fixed_active = true;
in_out_vrr->fixed.target_refresh_in_uhz =
in_out_vrr->max_refresh_in_uhz;
update = true;
}
} else
in_out_vrr->fixed.frame_counter = 0;
}
if (update) {
if (in_out_vrr->fixed.fixed_active) {
in_out_vrr->adjust.v_total_min =
mod_freesync_calc_v_total_from_refresh(
stream, in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
in_out_vrr->adjust.v_total_min;
} else {
in_out_vrr->adjust.v_total_min =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->min_refresh_in_uhz);
}
}
}
static void determine_flip_interval_workaround_req(struct mod_vrr_params *in_vrr,
unsigned int curr_time_stamp_in_us)
{
in_vrr->flip_interval.vsync_to_flip_in_us = curr_time_stamp_in_us -
in_vrr->flip_interval.v_update_timestamp_in_us;
/* Determine conditions for stopping workaround */
if (in_vrr->flip_interval.flip_interval_workaround_active &&
in_vrr->flip_interval.vsyncs_between_flip < VSYNCS_BETWEEN_FLIP_THRESHOLD &&
in_vrr->flip_interval.vsync_to_flip_in_us > FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US) {
in_vrr->flip_interval.flip_interval_detect_counter = 0;
in_vrr->flip_interval.program_flip_interval_workaround = true;
in_vrr->flip_interval.flip_interval_workaround_active = false;
} else {
/* Determine conditions for starting workaround */
if (in_vrr->flip_interval.vsyncs_between_flip >= VSYNCS_BETWEEN_FLIP_THRESHOLD &&
in_vrr->flip_interval.vsync_to_flip_in_us < FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US) {
/* Increase flip interval counter we have 2 vsyncs between flips and
* vsync to flip interval is less than 500us
*/
in_vrr->flip_interval.flip_interval_detect_counter++;
if (in_vrr->flip_interval.flip_interval_detect_counter > FREESYNC_CONSEC_FLIP_AFTER_VSYNC) {
/* Start workaround if we detect 5 consecutive instances of the above case */
in_vrr->flip_interval.program_flip_interval_workaround = true;
in_vrr->flip_interval.flip_interval_workaround_active = true;
}
} else {
/* Reset the flip interval counter if we condition is no longer met */
in_vrr->flip_interval.flip_interval_detect_counter = 0;
}
}
in_vrr->flip_interval.vsyncs_between_flip = 0;
}
static bool vrr_settings_require_update(struct core_freesync *core_freesync,
struct mod_freesync_config *in_config,
unsigned int min_refresh_in_uhz,
unsigned int max_refresh_in_uhz,
struct mod_vrr_params *in_vrr)
{
if (in_vrr->state != in_config->state) {
return true;
} else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED &&
in_vrr->fixed.target_refresh_in_uhz !=
in_config->fixed_refresh_in_uhz) {
return true;
} else if (in_vrr->min_refresh_in_uhz != min_refresh_in_uhz) {
return true;
} else if (in_vrr->max_refresh_in_uhz != max_refresh_in_uhz) {
return true;
}
return false;
}
bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
unsigned int *vmin,
unsigned int *vmax)
{
*vmin = stream->adjust.v_total_min;
*vmax = stream->adjust.v_total_max;
return true;
}
bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
struct dc_stream_state *stream,
unsigned int *nom_v_pos,
unsigned int *v_pos)
{
struct core_freesync *core_freesync = NULL;
struct crtc_position position;
if (mod_freesync == NULL)
return false;
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
if (dc_stream_get_crtc_position(core_freesync->dc, &stream, 1,
&position.vertical_count,
&position.nominal_vcount)) {
*nom_v_pos = position.nominal_vcount;
*v_pos = position.vertical_count;
return true;
}
return false;
}
static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket,
bool freesync_on_desktop)
{
/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
infopacket->sb[1] = 0x1A;
/* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
infopacket->sb[2] = 0x00;
/* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
infopacket->sb[3] = 0x00;
/* PB4 = Reserved */
/* PB5 = Reserved */
/* PB6 = [Bits 7:3 = Reserved] */
/* PB6 = [Bit 0 = FreeSync Supported] */
if (vrr->state != VRR_STATE_UNSUPPORTED)
infopacket->sb[6] |= 0x01;
/* PB6 = [Bit 1 = FreeSync Enabled] */
if (vrr->state != VRR_STATE_DISABLED &&
vrr->state != VRR_STATE_UNSUPPORTED)
infopacket->sb[6] |= 0x02;
if (freesync_on_desktop) {
/* PB6 = [Bit 2 = FreeSync Active] */
if (vrr->state != VRR_STATE_DISABLED &&
vrr->state != VRR_STATE_UNSUPPORTED)
infopacket->sb[6] |= 0x04;
} else {
if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
vrr->state == VRR_STATE_ACTIVE_FIXED)
infopacket->sb[6] |= 0x04;
}
// For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
/* PB7 = FreeSync Minimum refresh rate (Hz) */
if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
vrr->state == VRR_STATE_ACTIVE_FIXED) {
infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
} else {
infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
}
/* PB8 = FreeSync Maximum refresh rate (Hz)
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket,
bool freesync_on_desktop)
{
unsigned int min_refresh;
unsigned int max_refresh;
unsigned int fixed_refresh;
unsigned int min_programmed;
unsigned int max_programmed;
/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
infopacket->sb[1] = 0x1A;
/* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
infopacket->sb[2] = 0x00;
/* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
infopacket->sb[3] = 0x00;
/* PB4 = Reserved */
/* PB5 = Reserved */
/* PB6 = [Bits 7:3 = Reserved] */
/* PB6 = [Bit 0 = FreeSync Supported] */
if (vrr->state != VRR_STATE_UNSUPPORTED)
infopacket->sb[6] |= 0x01;
/* PB6 = [Bit 1 = FreeSync Enabled] */
if (vrr->state != VRR_STATE_DISABLED &&
vrr->state != VRR_STATE_UNSUPPORTED)
infopacket->sb[6] |= 0x02;
/* PB6 = [Bit 2 = FreeSync Active] */
if (freesync_on_desktop) {
if (vrr->state != VRR_STATE_DISABLED &&
vrr->state != VRR_STATE_UNSUPPORTED)
infopacket->sb[6] |= 0x04;
} else {
if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
vrr->state == VRR_STATE_ACTIVE_FIXED)
infopacket->sb[6] |= 0x04;
}
min_refresh = (vrr->min_refresh_in_uhz + 500000) / 1000000;
max_refresh = (vrr->max_refresh_in_uhz + 500000) / 1000000;
fixed_refresh = (vrr->fixed_refresh_in_uhz + 500000) / 1000000;
min_programmed = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? fixed_refresh :
(vrr->state == VRR_STATE_ACTIVE_VARIABLE) ? min_refresh :
(vrr->state == VRR_STATE_INACTIVE) ? min_refresh :
max_refresh; // Non-fs case, program nominal range
max_programmed = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? fixed_refresh :
(vrr->state == VRR_STATE_ACTIVE_VARIABLE) ? max_refresh :
max_refresh;// Non-fs case, program nominal range
/* PB7 = FreeSync Minimum refresh rate (Hz) */
infopacket->sb[7] = min_programmed & 0xFF;
/* PB8 = FreeSync Maximum refresh rate (Hz) */
infopacket->sb[8] = max_programmed & 0xFF;
/* PB11 : MSB FreeSync Minimum refresh rate [Hz] - bits 9:8 */
infopacket->sb[11] = (min_programmed >> 8) & 0x03;
/* PB12 : MSB FreeSync Maximum refresh rate [Hz] - bits 9:8 */
infopacket->sb[12] = (max_programmed >> 8) & 0x03;
/* PB16 : Reserved bits 7:1, FixedRate bit 0 */
infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0;
}
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
if (app_tf != TRANSFER_FUNC_UNKNOWN) {
infopacket->valid = true;
if (app_tf != TRANSFER_FUNC_PQ2084) {
infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
if (app_tf == TRANSFER_FUNC_GAMMA_22)
infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
}
}
}
static void build_vrr_infopacket_header_v1(enum signal_type signal,
struct dc_info_packet *infopacket,
unsigned int *payload_size)
{
if (dc_is_hdmi_signal(signal)) {
/* HEADER */
/* HB0 = Packet Type = 0x83 (Source Product
* Descriptor InfoFrame)
*/
infopacket->hb0 = DC_HDMI_INFOFRAME_TYPE_SPD;
/* HB1 = Version = 0x01 */
infopacket->hb1 = 0x01;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
infopacket->hb2 = 0x08;
*payload_size = 0x08;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
/* HB0 = Secondary-data Packet ID = 0 - Only non-zero
* when used to associate audio related info packets
*/
infopacket->hb0 = 0x00;
/* HB1 = Packet Type = 0x83 (Source Product
* Descriptor InfoFrame)
*/
infopacket->hb1 = DC_HDMI_INFOFRAME_TYPE_SPD;
/* HB2 = [Bits 7:0 = Least significant eight bits -
* For INFOFRAME, the value must be 1Bh]
*/
infopacket->hb2 = 0x1B;
/* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1]
* [Bits 1:0 = Most significant two bits = 0x00]
*/
infopacket->hb3 = 0x04;
*payload_size = 0x1B;
}
}
static void build_vrr_infopacket_header_v2(enum signal_type signal,
struct dc_info_packet *infopacket,
unsigned int *payload_size)
{
if (dc_is_hdmi_signal(signal)) {
/* HEADER */
/* HB0 = Packet Type = 0x83 (Source Product
* Descriptor InfoFrame)
*/
infopacket->hb0 = DC_HDMI_INFOFRAME_TYPE_SPD;
/* HB1 = Version = 0x02 */
infopacket->hb1 = 0x02;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
infopacket->hb2 = 0x09;
*payload_size = 0x09;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
/* HB0 = Secondary-data Packet ID = 0 - Only non-zero
* when used to associate audio related info packets
*/
infopacket->hb0 = 0x00;
/* HB1 = Packet Type = 0x83 (Source Product
* Descriptor InfoFrame)
*/
infopacket->hb1 = DC_HDMI_INFOFRAME_TYPE_SPD;
/* HB2 = [Bits 7:0 = Least significant eight bits -
* For INFOFRAME, the value must be 1Bh]
*/
infopacket->hb2 = 0x1B;
/* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x2]
* [Bits 1:0 = Most significant two bits = 0x00]
*/
infopacket->hb3 = 0x08;
*payload_size = 0x1B;
}
}
static void build_vrr_infopacket_header_v3(enum signal_type signal,
struct dc_info_packet *infopacket,
unsigned int *payload_size)
{
unsigned char version;
version = 3;
if (dc_is_hdmi_signal(signal)) {
/* HEADER */
/* HB0 = Packet Type = 0x83 (Source Product
* Descriptor InfoFrame)
*/
infopacket->hb0 = DC_HDMI_INFOFRAME_TYPE_SPD;
/* HB1 = Version = 0x03 */
infopacket->hb1 = version;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length] */
infopacket->hb2 = 0x10;
*payload_size = 0x10;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
/* HB0 = Secondary-data Packet ID = 0 - Only non-zero
* when used to associate audio related info packets
*/
infopacket->hb0 = 0x00;
/* HB1 = Packet Type = 0x83 (Source Product
* Descriptor InfoFrame)
*/
infopacket->hb1 = DC_HDMI_INFOFRAME_TYPE_SPD;
/* HB2 = [Bits 7:0 = Least significant eight bits -
* For INFOFRAME, the value must be 1Bh]
*/
infopacket->hb2 = 0x1B;
/* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x2]
* [Bits 1:0 = Most significant two bits = 0x00]
*/
infopacket->hb3 = (version & 0x3F) << 2;
*payload_size = 0x1B;
}
}
static void build_vrr_infopacket_checksum(unsigned int *payload_size,
struct dc_info_packet *infopacket)
{
/* Calculate checksum */
unsigned int idx = 0;
unsigned char checksum = 0;
checksum += infopacket->hb0;
checksum += infopacket->hb1;
checksum += infopacket->hb2;
checksum += infopacket->hb3;
for (idx = 1; idx <= *payload_size; idx++)
checksum += infopacket->sb[idx];
/* PB0 = Checksum (one byte complement) */
infopacket->sb[0] = (unsigned char)(0x100 - checksum);
infopacket->valid = true;
}
static void build_vrr_infopacket_v1(enum signal_type signal,
const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket,
bool freesync_on_desktop)
{
/* SPD info packet for FreeSync */
unsigned int payload_size = 0;
build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
build_vrr_infopacket_data_v1(vrr, infopacket, freesync_on_desktop);
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
}
static void build_vrr_infopacket_v2(enum signal_type signal,
const struct mod_vrr_params *vrr,
enum color_transfer_func app_tf,
struct dc_info_packet *infopacket,
bool freesync_on_desktop)
{
unsigned int payload_size = 0;
build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
build_vrr_infopacket_data_v1(vrr, infopacket, freesync_on_desktop);
build_vrr_infopacket_fs2_data(app_tf, infopacket);
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
}
static void build_vrr_infopacket_v3(enum signal_type signal,
const struct mod_vrr_params *vrr,
enum color_transfer_func app_tf,
struct dc_info_packet *infopacket,
bool freesync_on_desktop)
{
unsigned int payload_size = 0;
build_vrr_infopacket_header_v3(signal, infopacket, &payload_size);
build_vrr_infopacket_data_v3(vrr, infopacket, freesync_on_desktop);
build_vrr_infopacket_fs2_data(app_tf, infopacket);
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
}
static void build_vrr_infopacket_sdp_v1_3(enum vrr_packet_type packet_type,
struct dc_info_packet *infopacket)
{
uint8_t idx = 0, size = 0;
size = ((packet_type == PACKET_TYPE_FS_V1) ? 0x08 :
(packet_type == PACKET_TYPE_FS_V3) ? 0x10 :
0x09);
for (idx = infopacket->hb2; idx > 1; idx--) // Data Byte Count: 0x1B
infopacket->sb[idx] = infopacket->sb[idx-1];
infopacket->sb[1] = size; // Length
infopacket->sb[0] = (infopacket->hb3 >> 2) & 0x3F;//Version
infopacket->hb3 = (0x13 << 2); // Header,SDP 1.3
infopacket->hb2 = 0x1D;
}
void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
enum vrr_packet_type packet_type,
enum color_transfer_func app_tf,
struct dc_info_packet *infopacket,
bool pack_sdp_v1_3)
{
/* SPD info packet for FreeSync
* VTEM info packet for HdmiVRR
* Check if Freesync is supported. Return if false. If true,
* set the corresponding bit in the info packet
*/
if (!vrr->send_info_frame)
return;
switch (packet_type) {
case PACKET_TYPE_FS_V3:
build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
break;
case PACKET_TYPE_FS_V2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
break;
case PACKET_TYPE_VRR:
case PACKET_TYPE_FS_V1:
default:
build_vrr_infopacket_v1(stream->signal, vrr, infopacket, stream->freesync_on_desktop);
}
if (true == pack_sdp_v1_3 &&
true == dc_is_dp_signal(stream->signal) &&
packet_type != PACKET_TYPE_VRR &&
packet_type != PACKET_TYPE_VTEM)
build_vrr_infopacket_sdp_v1_3(packet_type, infopacket);
}
void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
struct mod_freesync_config *in_config,
struct mod_vrr_params *in_out_vrr)
{
struct core_freesync *core_freesync = NULL;
unsigned long long nominal_field_rate_in_uhz = 0;
unsigned long long rounded_nominal_in_uhz = 0;
unsigned int refresh_range = 0;
unsigned long long min_refresh_in_uhz = 0;
unsigned long long max_refresh_in_uhz = 0;
unsigned long long min_hardware_refresh_in_uhz = 0;
if (mod_freesync == NULL)
return;
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
/* Calculate nominal field rate for stream */
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) {
min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
(stream->timing.h_total * stream->ctx->dc->caps.max_v_total));
}
/* Limit minimum refresh rate to what can be supported by hardware */
min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ?
min_hardware_refresh_in_uhz : in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
/* Full range may be larger than current video timing, so cap at nominal */
if (max_refresh_in_uhz > nominal_field_rate_in_uhz)
max_refresh_in_uhz = nominal_field_rate_in_uhz;
/* Full range may be larger than current video timing, so cap at nominal */
if (min_refresh_in_uhz > max_refresh_in_uhz)
min_refresh_in_uhz = max_refresh_in_uhz;
/* If a monitor reports exactly max refresh of 2x of min, enforce it on nominal */
rounded_nominal_in_uhz =
div_u64(nominal_field_rate_in_uhz + 50000, 100000) * 100000;
if (in_config->max_refresh_in_uhz == (2 * in_config->min_refresh_in_uhz) &&
in_config->max_refresh_in_uhz == rounded_nominal_in_uhz)
min_refresh_in_uhz = div_u64(nominal_field_rate_in_uhz, 2);
if (!vrr_settings_require_update(core_freesync,
in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz,
in_out_vrr))
return;
in_out_vrr->state = in_config->state;
in_out_vrr->send_info_frame = in_config->vsif_supported;
if (in_config->state == VRR_STATE_UNSUPPORTED) {
in_out_vrr->state = VRR_STATE_UNSUPPORTED;
in_out_vrr->supported = false;
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
return;
} else {
in_out_vrr->min_refresh_in_uhz = (unsigned int)min_refresh_in_uhz;
in_out_vrr->max_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
(unsigned int)min_refresh_in_uhz);
in_out_vrr->max_refresh_in_uhz = (unsigned int)max_refresh_in_uhz;
in_out_vrr->min_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
(unsigned int)max_refresh_in_uhz);
if (in_config->state == VRR_STATE_ACTIVE_FIXED)
in_out_vrr->fixed_refresh_in_uhz = in_config->fixed_refresh_in_uhz;
else
in_out_vrr->fixed_refresh_in_uhz = 0;
refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) -
+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
in_out_vrr->supported = true;
}
in_out_vrr->fixed.ramping_active = in_config->ramping;
in_out_vrr->btr.btr_enabled = in_config->btr;
if (in_out_vrr->max_refresh_in_uhz < (2 * in_out_vrr->min_refresh_in_uhz))
in_out_vrr->btr.btr_enabled = false;
else {
in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
2 * in_out_vrr->min_duration_in_us;
if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
}
in_out_vrr->btr.btr_active = false;
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->fixed.fixed_active = false;
in_out_vrr->fixed.target_refresh_in_uhz = 0;
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
if (in_out_vrr->state == VRR_STATE_UNSUPPORTED) {
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
} else if (in_out_vrr->state == VRR_STATE_DISABLED) {
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
} else if (in_out_vrr->state == VRR_STATE_INACTIVE) {
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
refresh_range >= MIN_REFRESH_RANGE) {
in_out_vrr->adjust.v_total_min =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->min_refresh_in_uhz);
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
in_out_vrr->fixed.target_refresh_in_uhz =
in_out_vrr->fixed_refresh_in_uhz;
if (in_out_vrr->fixed.ramping_active &&
in_out_vrr->fixed.fixed_active) {
/* Do not update vtotals if ramping is already active
* in order to continue ramp from current refresh.
*/
in_out_vrr->fixed.fixed_active = true;
} else {
in_out_vrr->fixed.fixed_active = true;
in_out_vrr->adjust.v_total_min =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->fixed.target_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
in_out_vrr->adjust.v_total_min;
}
} else {
in_out_vrr->state = VRR_STATE_INACTIVE;
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
}
}
void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
const struct dc_plane_state *plane,
const struct dc_stream_state *stream,
unsigned int curr_time_stamp_in_us,
struct mod_vrr_params *in_out_vrr)
{
struct core_freesync *core_freesync = NULL;
unsigned int last_render_time_in_us = 0;
if (mod_freesync == NULL)
return;
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
if (in_out_vrr->supported &&
in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
last_render_time_in_us = curr_time_stamp_in_us -
plane->time.prev_update_time_in_us;
if (in_out_vrr->btr.btr_enabled) {
apply_below_the_range(core_freesync,
stream,
last_render_time_in_us,
in_out_vrr);
} else {
apply_fixed_refresh(core_freesync,
stream,
last_render_time_in_us,
in_out_vrr);
}
determine_flip_interval_workaround_req(in_out_vrr,
curr_time_stamp_in_us);
}
}
void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
struct mod_vrr_params *in_out_vrr)
{
struct core_freesync *core_freesync = NULL;
unsigned int cur_timestamp_in_us;
unsigned long long cur_tick;
if ((mod_freesync == NULL) || (stream == NULL) || (in_out_vrr == NULL))
return;
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
if (in_out_vrr->supported == false)
return;
cur_tick = dm_get_timestamp(core_freesync->dc->ctx);
cur_timestamp_in_us = (unsigned int)
div_u64(dm_get_elapse_time_in_ns(core_freesync->dc->ctx, cur_tick, 0), 1000);
in_out_vrr->flip_interval.vsyncs_between_flip++;
in_out_vrr->flip_interval.v_update_timestamp_in_us = cur_timestamp_in_us;
if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
(in_out_vrr->flip_interval.flip_interval_workaround_active ||
(!in_out_vrr->flip_interval.flip_interval_workaround_active &&
in_out_vrr->flip_interval.program_flip_interval_workaround))) {
// set freesync vmin vmax to nominal for workaround
in_out_vrr->adjust.v_total_min =
mod_freesync_calc_v_total_from_refresh(
stream, in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
in_out_vrr->adjust.v_total_min;
in_out_vrr->flip_interval.program_flip_interval_workaround = false;
in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup = true;
return;
}
if (in_out_vrr->state != VRR_STATE_ACTIVE_VARIABLE &&
in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup) {
in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup = false;
in_out_vrr->flip_interval.flip_interval_detect_counter = 0;
in_out_vrr->flip_interval.vsyncs_between_flip = 0;
in_out_vrr->flip_interval.vsync_to_flip_in_us = 0;
}
/* Below the Range Logic */
/* Only execute if in fullscreen mode */
if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
in_out_vrr->btr.btr_active) {
/* TODO: pass in flag for Pre-DCE12 ASIC
* in order for frame variable duration to take affect,
* it needs to be done one VSYNC early, which is at
* frameCounter == 1.
* For DCE12 and newer updates to V_TOTAL_MIN/MAX
* will take affect on current frame
*/
if (in_out_vrr->btr.frames_to_insert ==
in_out_vrr->btr.frame_counter) {
in_out_vrr->adjust.v_total_min =
calc_v_total_from_duration(stream,
in_out_vrr,
in_out_vrr->btr.inserted_duration_in_us);
in_out_vrr->adjust.v_total_max =
in_out_vrr->adjust.v_total_min;
}
if (in_out_vrr->btr.frame_counter > 0)
in_out_vrr->btr.frame_counter--;
/* Restore FreeSync */
if (in_out_vrr->btr.frame_counter == 0) {
in_out_vrr->adjust.v_total_min =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->min_refresh_in_uhz);
}
}
/* If in fullscreen freesync mode or in video, do not program
* static screen ramp values
*/
if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE)
in_out_vrr->fixed.ramping_active = false;
/* Gradual Static Screen Ramping Logic
* Execute if ramp is active and user enabled freesync static screen
*/
if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED &&
in_out_vrr->fixed.ramping_active) {
update_v_total_for_static_ramp(
core_freesync, stream, in_out_vrr);
}
}
void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
const struct mod_vrr_params *vrr,
unsigned int *v_total_min, unsigned int *v_total_max,
unsigned int *event_triggers,
unsigned int *window_min, unsigned int *window_max,
unsigned int *lfc_mid_point_in_us,
unsigned int *inserted_frames,
unsigned int *inserted_duration_in_us)
{
if (mod_freesync == NULL)
return;
if (vrr->supported) {
*v_total_min = vrr->adjust.v_total_min;
*v_total_max = vrr->adjust.v_total_max;
*event_triggers = 0;
*lfc_mid_point_in_us = vrr->btr.mid_point_in_us;
*inserted_frames = vrr->btr.frames_to_insert;
*inserted_duration_in_us = vrr->btr.inserted_duration_in_us;
}
}
unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream)
{
unsigned long long nominal_field_rate_in_uhz = 0;
unsigned int total = stream->timing.h_total * stream->timing.v_total;
/* Calculate nominal field rate for stream, rounded up to nearest integer */
nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz;
nominal_field_rate_in_uhz *= 100000000ULL;
nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
return nominal_field_rate_in_uhz;
}
unsigned long long mod_freesync_calc_field_rate_from_timing(
unsigned int vtotal, unsigned int htotal, unsigned int pix_clk)
{
unsigned long long field_rate_in_uhz = 0;
unsigned int total = htotal * vtotal;
/* Calculate nominal field rate for stream, rounded up to nearest integer */
field_rate_in_uhz = pix_clk;
field_rate_in_uhz *= 1000000ULL;
field_rate_in_uhz = div_u64(field_rate_in_uhz, total);
return field_rate_in_uhz;
}
bool mod_freesync_get_freesync_enabled(struct mod_vrr_params *pVrr)
{
return (pVrr->state != VRR_STATE_UNSUPPORTED) && (pVrr->state != VRR_STATE_DISABLED);
}
bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
uint32_t max_refresh_cap_in_uhz,
uint32_t nominal_field_rate_in_uhz)
{
/* Typically nominal refresh calculated can have some fractional part.
* Allow for some rounding error of actual video timing by taking floor
* of caps and request. Round the nominal refresh rate.
*
* Dividing will convert everything to units in Hz although input
* variable name is in uHz!
*
* Also note, this takes care of rounding error on the nominal refresh
* so by rounding error we only expect it to be off by a small amount,
* such as < 0.1 Hz. i.e. 143.9xxx or 144.1xxx.
*
* Example 1. Caps Min = 40 Hz, Max = 144 Hz
* Request Min = 40 Hz, Max = 144 Hz
* Nominal = 143.5x Hz rounded to 144 Hz
* This function should allow this as valid request
*
* Example 2. Caps Min = 40 Hz, Max = 144 Hz
* Request Min = 40 Hz, Max = 144 Hz
* Nominal = 144.4x Hz rounded to 144 Hz
* This function should allow this as valid request
*
* Example 3. Caps Min = 40 Hz, Max = 144 Hz
* Request Min = 40 Hz, Max = 144 Hz
* Nominal = 120.xx Hz rounded to 120 Hz
* This function should return NOT valid since the requested
* max is greater than current timing's nominal
*
* Example 4. Caps Min = 40 Hz, Max = 120 Hz
* Request Min = 40 Hz, Max = 120 Hz
* Nominal = 144.xx Hz rounded to 144 Hz
* This function should return NOT valid since the nominal
* is greater than the capability's max refresh
*/
nominal_field_rate_in_uhz =
div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
min_refresh_cap_in_uhz /= 1000000;
max_refresh_cap_in_uhz /= 1000000;
/* Check nominal is within range */
if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz ||
nominal_field_rate_in_uhz < min_refresh_cap_in_uhz)
return false;
/* If nominal is less than max, limit the max allowed refresh rate */
if (nominal_field_rate_in_uhz < max_refresh_cap_in_uhz)
max_refresh_cap_in_uhz = nominal_field_rate_in_uhz;
/* Check min is within range */
if (min_refresh_cap_in_uhz > max_refresh_cap_in_uhz)
return false;
/* For variable range, check for at least 10 Hz range */
if (nominal_field_rate_in_uhz - min_refresh_cap_in_uhz < 10)
return false;
return true;
}
| linux-master | drivers/gpu/drm/amd/display/modules/freesync/freesync.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "hdcp.h"
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
#define DP_CP_IRQ (1 << 2)
enum mod_hdcp_ddc_message_id {
MOD_HDCP_MESSAGE_ID_INVALID = -1,
/* HDCP 1.4 */
MOD_HDCP_MESSAGE_ID_READ_BKSV = 0,
MOD_HDCP_MESSAGE_ID_READ_RI_R0,
MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
MOD_HDCP_MESSAGE_ID_WRITE_AN,
MOD_HDCP_MESSAGE_ID_READ_VH_X,
MOD_HDCP_MESSAGE_ID_READ_VH_0,
MOD_HDCP_MESSAGE_ID_READ_VH_1,
MOD_HDCP_MESSAGE_ID_READ_VH_2,
MOD_HDCP_MESSAGE_ID_READ_VH_3,
MOD_HDCP_MESSAGE_ID_READ_VH_4,
MOD_HDCP_MESSAGE_ID_READ_BCAPS,
MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
MOD_HDCP_MESSAGE_ID_READ_BINFO,
/* HDCP 2.2 */
MOD_HDCP_MESSAGE_ID_HDCP2VERSION,
MOD_HDCP_MESSAGE_ID_RX_CAPS,
MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
MOD_HDCP_MESSAGE_ID_MAX
};
static const uint8_t hdcp_i2c_offsets[] = {
[MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
[MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
[MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
[MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
[MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
[MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
[MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
[MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
[MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
[MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
[MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
[MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
[MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
[MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
[MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
[MOD_HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
[MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
[MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
[MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0
};
static const uint32_t hdcp_dpcd_addrs[] = {
[MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
[MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
[MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
[MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
[MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
[MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
[MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
[MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
[MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
[MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
[MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
[MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
[MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
[MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
[MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
[MOD_HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
[MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
[MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
[MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
[MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
};
static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
enum mod_hdcp_ddc_message_id msg_id,
uint8_t *buf,
uint32_t buf_len)
{
bool success = true;
uint32_t cur_size = 0;
uint32_t data_offset = 0;
if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
hdcp_dpcd_addrs[msg_id] + data_offset,
buf + data_offset,
cur_size);
if (!success)
break;
buf_len -= cur_size;
data_offset += cur_size;
}
} else {
success = hdcp->config.ddc.funcs.read_i2c(
hdcp->config.ddc.handle,
HDCP_I2C_ADDR,
hdcp_i2c_offsets[msg_id],
buf,
(uint32_t)buf_len);
}
return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
}
static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp,
enum mod_hdcp_ddc_message_id msg_id,
uint8_t *buf,
uint32_t buf_len,
uint8_t read_size)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE;
uint32_t cur_size = 0;
uint32_t data_offset = 0;
while (buf_len > 0) {
cur_size = MIN(buf_len, read_size);
status = read(hdcp, msg_id, buf + data_offset, cur_size);
if (status != MOD_HDCP_STATUS_SUCCESS)
break;
buf_len -= cur_size;
data_offset += cur_size;
}
return status;
}
static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
enum mod_hdcp_ddc_message_id msg_id,
uint8_t *buf,
uint32_t buf_len)
{
bool success = true;
uint32_t cur_size = 0;
uint32_t data_offset = 0;
if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
success = hdcp->config.ddc.funcs.write_dpcd(
hdcp->config.ddc.handle,
hdcp_dpcd_addrs[msg_id] + data_offset,
buf + data_offset,
cur_size);
if (!success)
break;
buf_len -= cur_size;
data_offset += cur_size;
}
} else {
hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
memmove(&hdcp->buf[1], buf, buf_len);
success = hdcp->config.ddc.funcs.write_i2c(
hdcp->config.ddc.handle,
HDCP_I2C_ADDR,
hdcp->buf,
(uint32_t)(buf_len+1));
}
return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
}
enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp)
{
return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV,
hdcp->auth.msg.hdcp1.bksv,
sizeof(hdcp->auth.msg.hdcp1.bksv));
}
enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp)
{
return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS,
&hdcp->auth.msg.hdcp1.bcaps,
sizeof(hdcp->auth.msg.hdcp1.bcaps));
}
enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
1);
else
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
sizeof(hdcp->auth.msg.hdcp1.bstatus));
return status;
}
enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp)
{
return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0,
(uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
sizeof(hdcp->auth.msg.hdcp1.r0p));
}
/* special case, reading repeatedly at the same address, don't use read() */
enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
hdcp->auth.msg.hdcp1.ksvlist,
hdcp->auth.msg.hdcp1.ksvlist_size,
KSV_READ_SIZE);
else
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
(uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist,
hdcp->auth.msg.hdcp1.ksvlist_size);
return status;
}
enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0,
&hdcp->auth.msg.hdcp1.vp[0], 4);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1,
&hdcp->auth.msg.hdcp1.vp[4], 4);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2,
&hdcp->auth.msg.hdcp1.vp[8], 4);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3,
&hdcp->auth.msg.hdcp1.vp[12], 4);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4,
&hdcp->auth.msg.hdcp1.vp[16], 4);
out:
return status;
}
enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO,
(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
else
status = MOD_HDCP_STATUS_INVALID_OPERATION;
return status;
}
enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp)
{
return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
hdcp->auth.msg.hdcp1.aksv,
sizeof(hdcp->auth.msg.hdcp1.aksv));
}
enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp)
{
return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
&hdcp->auth.msg.hdcp1.ainfo,
sizeof(hdcp->auth.msg.hdcp1.ainfo));
}
enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)
{
return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN,
hdcp->auth.msg.hdcp1.an,
sizeof(hdcp->auth.msg.hdcp1.an));
}
enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = MOD_HDCP_STATUS_INVALID_OPERATION;
else
status = read(hdcp, MOD_HDCP_MESSAGE_ID_HDCP2VERSION,
&hdcp->auth.msg.hdcp2.hdcp2version_hdmi,
sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi));
return status;
}
enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (!is_dp_hdcp(hdcp))
status = MOD_HDCP_STATUS_INVALID_OPERATION;
else
status = read(hdcp, MOD_HDCP_MESSAGE_ID_RX_CAPS,
hdcp->auth.msg.hdcp2.rxcaps_dp,
sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp));
return status;
}
enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
&hdcp->auth.msg.hdcp2.rxstatus_dp,
1);
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus,
sizeof(hdcp->auth.msg.hdcp2.rxstatus));
}
return status;
}
enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
hdcp->auth.msg.hdcp2.ake_cert+1,
sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
hdcp->auth.msg.hdcp2.ake_cert,
sizeof(hdcp->auth.msg.hdcp2.ake_cert));
}
return status;
}
enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
hdcp->auth.msg.hdcp2.ake_h_prime+1,
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
hdcp->auth.msg.hdcp2.ake_h_prime,
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
}
return status;
}
enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
hdcp->auth.msg.hdcp2.ake_pairing_info+1,
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
hdcp->auth.msg.hdcp2.ake_pairing_info,
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
}
return status;
}
enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
hdcp->auth.msg.hdcp2.lc_l_prime+1,
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
hdcp->auth.msg.hdcp2.lc_l_prime,
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
}
return status;
}
enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (is_dp_hdcp(hdcp)) {
uint32_t device_count = 0;
uint32_t rx_id_list_size = 0;
uint32_t bytes_read = 0;
hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
hdcp->auth.msg.hdcp2.rx_id_list+1,
HDCP_MAX_AUX_TRANSACTION_SIZE);
if (status == MOD_HDCP_STATUS_SUCCESS) {
bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE;
device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
(HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
rx_id_list_size = MIN((21 + 5 * device_count),
(sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1));
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read,
(rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE);
}
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
hdcp->auth.msg.hdcp2.rx_id_list,
hdcp->auth.msg.hdcp2.rx_id_list_size);
}
return status;
}
enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
}
return status;
}
enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
hdcp->auth.msg.hdcp2.ake_init+1,
sizeof(hdcp->auth.msg.hdcp2.ake_init)-1);
else
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
hdcp->auth.msg.hdcp2.ake_init,
sizeof(hdcp->auth.msg.hdcp2.ake_init));
return status;
}
enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
hdcp->auth.msg.hdcp2.ake_no_stored_km+1,
sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)-1);
else
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
hdcp->auth.msg.hdcp2.ake_no_stored_km,
sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
return status;
}
enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
hdcp->auth.msg.hdcp2.ake_stored_km+1,
sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)-1);
else
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
hdcp->auth.msg.hdcp2.ake_stored_km,
sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
return status;
}
enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
hdcp->auth.msg.hdcp2.lc_init+1,
sizeof(hdcp->auth.msg.hdcp2.lc_init)-1);
else
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
hdcp->auth.msg.hdcp2.lc_init,
sizeof(hdcp->auth.msg.hdcp2.lc_init));
return status;
}
enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = write(hdcp,
MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
hdcp->auth.msg.hdcp2.ske_eks+1,
sizeof(hdcp->auth.msg.hdcp2.ske_eks)-1);
else
status = write(hdcp,
MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
hdcp->auth.msg.hdcp2.ske_eks,
sizeof(hdcp->auth.msg.hdcp2.ske_eks));
return status;
}
enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
hdcp->auth.msg.hdcp2.repeater_auth_ack+1,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)-1);
else
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
hdcp->auth.msg.hdcp2.repeater_auth_ack,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
return status;
}
enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = write(hdcp,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
hdcp->auth.msg.hdcp2.repeater_auth_stream_manage+1,
hdcp->auth.msg.hdcp2.stream_manage_size-1);
else
status = write(hdcp,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
hdcp->auth.msg.hdcp2.stream_manage_size);
return status;
}
enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
hdcp->auth.msg.hdcp2.content_stream_type_dp+1,
sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)-1);
else
status = MOD_HDCP_STATUS_INVALID_OPERATION;
return status;
}
enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
{
uint8_t clear_cp_irq_bit = DP_CP_IRQ;
uint32_t size = 1;
if (is_dp_hdcp(hdcp)) {
uint32_t cp_irq_addrs = (hdcp->connection.link.dp.rev >= 0x14)
? DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0:DP_DEVICE_SERVICE_IRQ_VECTOR;
return hdcp->config.ddc.funcs.write_dpcd(hdcp->config.ddc.handle, cp_irq_addrs,
&clear_cp_irq_bit, size) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
}
return MOD_HDCP_STATUS_INVALID_OPERATION;
}
| linux-master | drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "hdcp.h"
enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_connection *conn = &hdcp->connection;
struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
switch (current_state(hdcp)) {
case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
if (input->hdcp2version_read != PASS ||
input->hdcp2_capable_check != PASS) {
adjust->hdcp2.disable = 1;
callback_in_ms(0, output);
set_state_id(hdcp, output, HDCP_INITIALIZED);
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A1_SEND_AKE_INIT);
}
break;
case H2_A1_SEND_AKE_INIT:
if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->ake_init_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
set_watchdog_in_ms(hdcp, 100, output);
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A1_VALIDATE_AKE_CERT);
break;
case H2_A1_VALIDATE_AKE_CERT:
if (input->ake_cert_available != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1A-08: consider ake timeout a failure */
/* some hdmi receivers are not ready for HDCP
* immediately after video becomes active,
* delay 1s before retry on first HDCP message
* timeout.
*/
fail_and_restart_in_ms(1000, &status, output);
} else {
/* continue ake cert polling*/
callback_in_ms(10, output);
increment_stay_counter(hdcp);
}
break;
} else if (input->ake_cert_read != PASS ||
input->ake_cert_validation != PASS) {
/*
* 1A-09: consider invalid ake cert a failure
* 1A-10: consider receiver id listed in SRM a failure
*/
fail_and_restart_in_ms(0, &status, output);
break;
}
if (conn->is_km_stored &&
!adjust->hdcp2.force_no_stored_km) {
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A1_SEND_STORED_KM);
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A1_SEND_NO_STORED_KM);
}
break;
case H2_A1_SEND_NO_STORED_KM:
if (input->no_stored_km_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
if (adjust->hdcp2.increase_h_prime_timeout)
set_watchdog_in_ms(hdcp, 2000, output);
else
set_watchdog_in_ms(hdcp, 1000, output);
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A1_READ_H_PRIME);
break;
case H2_A1_READ_H_PRIME:
if (input->h_prime_available != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1A-11-3: consider h' timeout a failure */
fail_and_restart_in_ms(1000, &status, output);
} else {
/* continue h' polling */
callback_in_ms(100, output);
increment_stay_counter(hdcp);
}
break;
} else if (input->h_prime_read != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
set_watchdog_in_ms(hdcp, 200, output);
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME);
break;
case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
if (input->pairing_available != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1A-12: consider pairing info timeout
* a failure
*/
fail_and_restart_in_ms(0, &status, output);
} else {
/* continue pairing info polling */
callback_in_ms(20, output);
increment_stay_counter(hdcp);
}
break;
} else if (input->pairing_info_read != PASS ||
input->h_prime_validation != PASS) {
/* 1A-11-1: consider invalid h' a failure */
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
case H2_A1_SEND_STORED_KM:
if (input->stored_km_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
set_watchdog_in_ms(hdcp, 200, output);
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A1_VALIDATE_H_PRIME);
break;
case H2_A1_VALIDATE_H_PRIME:
if (input->h_prime_available != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1A-11-2: consider h' timeout a failure */
fail_and_restart_in_ms(1000, &status, output);
} else {
/* continue h' polling */
callback_in_ms(20, output);
increment_stay_counter(hdcp);
}
break;
} else if (input->h_prime_read != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->h_prime_validation != PASS) {
/* 1A-11-1: consider invalid h' a failure */
adjust->hdcp2.force_no_stored_km = 1;
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
case H2_A2_LOCALITY_CHECK:
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
input->lc_init_write != PASS ||
input->l_prime_available_poll != PASS ||
input->l_prime_read != PASS) {
/*
* 1A-05: consider disconnection after LC init a failure
* 1A-13-1: consider invalid l' a failure
* 1A-13-2: consider l' timeout a failure
*/
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
callback_in_ms(0, output);
increment_stay_counter(hdcp);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
if (conn->is_repeater) {
set_watchdog_in_ms(hdcp, 3000, output);
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A6_WAIT_FOR_RX_ID_LIST);
} else {
/* some CTS equipment requires a delay GREATER than
* 200 ms, so delay 210 ms instead of 200 ms
*/
callback_in_ms(210, output);
set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION);
}
break;
case H2_ENABLE_ENCRYPTION:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS) {
/*
* 1A-07: restart hdcp on REAUTH_REQ
* 1B-08: restart hdcp on REAUTH_REQ
*/
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
} else if (input->enable_encryption != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A5_AUTHENTICATED);
set_auth_complete(hdcp, output);
break;
case H2_A5_AUTHENTICATED:
if (input->rxstatus_read == FAIL ||
input->reauth_request_check == FAIL) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
}
callback_in_ms(500, output);
increment_stay_counter(hdcp);
break;
case H2_A6_WAIT_FOR_RX_ID_LIST:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (!event_ctx->rx_id_list_ready) {
if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1B-02: consider rx id list timeout a failure */
/* some CTS equipment's actual timeout
* measurement is slightly greater than 3000 ms.
* Delay 100 ms to ensure it is fully timeout
* before re-authentication.
*/
fail_and_restart_in_ms(100, &status, output);
} else {
callback_in_ms(300, output);
increment_stay_counter(hdcp);
}
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS ||
input->rx_id_list_read != PASS ||
input->device_count_check != PASS ||
input->rx_id_list_validation != PASS ||
input->repeater_auth_ack_write != PASS) {
/* 1B-03: consider invalid v' a failure
* 1B-04: consider MAX_DEVS_EXCEEDED a failure
* 1B-05: consider MAX_CASCADE_EXCEEDED a failure
* 1B-06: consider invalid seq_num_V a failure
* 1B-09: consider seq_num_V rollover a failure
*/
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
break;
case H2_A9_SEND_STREAM_MANAGEMENT:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
} else if (input->prepare_stream_manage != PASS ||
input->stream_manage_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
set_watchdog_in_ms(hdcp, 100, output);
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A9_VALIDATE_STREAM_READY);
break;
case H2_A9_VALIDATE_STREAM_READY:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
} else if (input->stream_ready_available != PASS) {
if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1B-10-2: restart content stream management on
* stream ready timeout
*/
hdcp->auth.count.stream_management_retry_count++;
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
} else {
callback_in_ms(10, output);
increment_stay_counter(hdcp);
}
break;
} else if (input->stream_ready_read != PASS ||
input->stream_ready_validation != PASS) {
/*
* 1B-10-1: restart content stream management
* on invalid M'
*/
if (hdcp->auth.count.stream_management_retry_count > 10) {
fail_and_restart_in_ms(0, &status, output);
} else {
hdcp->auth.count.stream_management_retry_count++;
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
}
break;
}
callback_in_ms(200, output);
set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION);
break;
default:
status = MOD_HDCP_STATUS_INVALID_STATE;
fail_and_restart_in_ms(0, &status, output);
break;
}
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_connection *conn = &hdcp->connection;
struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
switch (current_state(hdcp)) {
case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
if (input->rx_caps_read_dp != PASS ||
input->hdcp2_capable_check != PASS) {
adjust->hdcp2.disable = 1;
callback_in_ms(0, output);
set_state_id(hdcp, output, HDCP_INITIALIZED);
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A1_SEND_AKE_INIT);
}
break;
case D2_A1_SEND_AKE_INIT:
if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->ake_init_write != PASS) {
/* possibly display not ready */
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(100, output);
set_state_id(hdcp, output, D2_A1_VALIDATE_AKE_CERT);
break;
case D2_A1_VALIDATE_AKE_CERT:
if (input->ake_cert_read != PASS ||
input->ake_cert_validation != PASS) {
/*
* 1A-08: consider invalid ake cert a failure
* 1A-09: consider receiver id listed in SRM a failure
*/
fail_and_restart_in_ms(0, &status, output);
break;
}
if (conn->is_km_stored &&
!adjust->hdcp2.force_no_stored_km) {
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A1_SEND_STORED_KM);
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A1_SEND_NO_STORED_KM);
}
break;
case D2_A1_SEND_NO_STORED_KM:
if (input->no_stored_km_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
if (adjust->hdcp2.increase_h_prime_timeout)
set_watchdog_in_ms(hdcp, 2000, output);
else
set_watchdog_in_ms(hdcp, 1000, output);
set_state_id(hdcp, output, D2_A1_READ_H_PRIME);
break;
case D2_A1_READ_H_PRIME:
if (input->h_prime_available != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
/* 1A-10-3: consider h' timeout a failure */
fail_and_restart_in_ms(1000, &status, output);
else
increment_stay_counter(hdcp);
break;
} else if (input->h_prime_read != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
set_watchdog_in_ms(hdcp, 200, output);
set_state_id(hdcp, output, D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME);
break;
case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
if (input->pairing_available != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
/*
* 1A-11: consider pairing info timeout
* a failure
*/
fail_and_restart_in_ms(0, &status, output);
else
increment_stay_counter(hdcp);
break;
} else if (input->pairing_info_read != PASS ||
input->h_prime_validation != PASS) {
/* 1A-10-1: consider invalid h' a failure */
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
break;
case D2_A1_SEND_STORED_KM:
if (input->stored_km_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
set_watchdog_in_ms(hdcp, 200, output);
set_state_id(hdcp, output, D2_A1_VALIDATE_H_PRIME);
break;
case D2_A1_VALIDATE_H_PRIME:
if (input->h_prime_available != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
/* 1A-10-2: consider h' timeout a failure */
fail_and_restart_in_ms(1000, &status, output);
else
increment_stay_counter(hdcp);
break;
} else if (input->h_prime_read != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->h_prime_validation != PASS) {
/* 1A-10-1: consider invalid h' a failure */
adjust->hdcp2.force_no_stored_km = 1;
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
break;
case D2_A2_LOCALITY_CHECK:
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
input->lc_init_write != PASS ||
input->l_prime_read != PASS) {
/* 1A-12: consider invalid l' a failure */
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
callback_in_ms(0, output);
increment_stay_counter(hdcp);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
if (conn->is_repeater) {
set_watchdog_in_ms(hdcp, 3000, output);
set_state_id(hdcp, output, D2_A6_WAIT_FOR_RX_ID_LIST);
} else {
callback_in_ms(1, output);
set_state_id(hdcp, output, D2_SEND_CONTENT_STREAM_TYPE);
}
break;
case D2_SEND_CONTENT_STREAM_TYPE:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS ||
input->link_integrity_check_dp != PASS ||
input->content_stream_type_write != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(210, output);
set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION);
break;
case D2_ENABLE_ENCRYPTION:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS ||
input->link_integrity_check_dp != PASS) {
/*
* 1A-07: restart hdcp on REAUTH_REQ
* 1B-08: restart hdcp on REAUTH_REQ
*/
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
} else if (input->enable_encryption != PASS ||
(is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
}
set_state_id(hdcp, output, D2_A5_AUTHENTICATED);
set_auth_complete(hdcp, output);
break;
case D2_A5_AUTHENTICATED:
if (input->rxstatus_read == FAIL ||
input->reauth_request_check == FAIL) {
fail_and_restart_in_ms(100, &status, output);
break;
} else if (input->link_integrity_check_dp == FAIL) {
if (hdcp->connection.hdcp2_retry_count >= 1)
adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
}
increment_stay_counter(hdcp);
break;
case D2_A6_WAIT_FOR_RX_ID_LIST:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS ||
input->link_integrity_check_dp != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (!event_ctx->rx_id_list_ready) {
if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
/* 1B-02: consider rx id list timeout a failure */
fail_and_restart_in_ms(0, &status, output);
else
increment_stay_counter(hdcp);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS ||
input->link_integrity_check_dp != PASS ||
input->rx_id_list_read != PASS ||
input->device_count_check != PASS ||
input->rx_id_list_validation != PASS ||
input->repeater_auth_ack_write != PASS) {
/*
* 1B-03: consider invalid v' a failure
* 1B-04: consider MAX_DEVS_EXCEEDED a failure
* 1B-05: consider MAX_CASCADE_EXCEEDED a failure
* 1B-06: consider invalid seq_num_V a failure
* 1B-09: consider seq_num_V rollover a failure
*/
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
break;
case D2_A9_SEND_STREAM_MANAGEMENT:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS ||
input->link_integrity_check_dp != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready) {
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
} else if (input->prepare_stream_manage != PASS ||
input->stream_manage_write != PASS) {
if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK)
fail_and_restart_in_ms(0, &status, output);
else
increment_stay_counter(hdcp);
break;
}
callback_in_ms(100, output);
set_state_id(hdcp, output, D2_A9_VALIDATE_STREAM_READY);
break;
case D2_A9_VALIDATE_STREAM_READY:
if (input->rxstatus_read != PASS ||
input->reauth_request_check != PASS ||
input->link_integrity_check_dp != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready) {
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
break;
} else if (input->stream_ready_read != PASS ||
input->stream_ready_validation != PASS) {
/*
* 1B-10-1: restart content stream management
* on invalid M'
* 1B-10-2: consider stream ready timeout a failure
*/
if (hdcp->auth.count.stream_management_retry_count > 10) {
fail_and_restart_in_ms(0, &status, output);
} else if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) {
hdcp->auth.count.stream_management_retry_count++;
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
} else {
increment_stay_counter(hdcp);
}
break;
}
callback_in_ms(200, output);
set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION);
break;
default:
status = MOD_HDCP_STATUS_INVALID_STATE;
fail_and_restart_in_ms(0, &status, output);
break;
}
return status;
}
| linux-master | drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "hdcp.h"
enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_connection *conn = &hdcp->connection;
struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
switch (current_state(hdcp)) {
case H1_A0_WAIT_FOR_ACTIVE_RX:
if (input->bksv_read != PASS || input->bcaps_read != PASS) {
/* 1A-04: repeatedly attempts on port access failure */
callback_in_ms(500, output);
increment_stay_counter(hdcp);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
break;
case H1_A1_EXCHANGE_KSVS:
if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->an_write != PASS ||
input->aksv_write != PASS ||
input->bksv_read != PASS ||
input->bksv_validation != PASS ||
input->ainfo_write == FAIL) {
/* 1A-05: consider invalid bksv a failure */
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(300, output);
set_state_id(hdcp, output,
H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER);
break;
case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
if (input->bcaps_read != PASS ||
input->r0p_read != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->rx_validation != PASS) {
/* 1A-06: consider invalid r0' a failure */
/* 1A-08: consider bksv listed in SRM a failure */
/*
* some slow RX will fail rx validation when it is
* not ready. give it more time to react before retry.
*/
fail_and_restart_in_ms(1000, &status, output);
break;
} else if (!conn->is_repeater && input->encryption != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
if (conn->is_repeater) {
callback_in_ms(0, output);
set_watchdog_in_ms(hdcp, 5000, output);
set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY);
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
set_auth_complete(hdcp, output);
}
break;
case H1_A45_AUTHENTICATED:
if (input->link_maintenance == FAIL) {
/* 1A-07: consider invalid ri' a failure */
/* 1A-07a: consider read ri' not returned a failure */
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(500, output);
increment_stay_counter(hdcp);
break;
case H1_A8_WAIT_FOR_READY:
if (input->ready_check != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1B-03: fail hdcp on ksv list READY timeout */
/* prevent black screen in next attempt */
adjust->hdcp1.postpone_encryption = 1;
fail_and_restart_in_ms(0, &status, output);
} else {
/* continue ksv list READY polling*/
callback_in_ms(500, output);
increment_stay_counter(hdcp);
}
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H1_A9_READ_KSV_LIST);
break;
case H1_A9_READ_KSV_LIST:
if (input->bstatus_read != PASS ||
input->max_cascade_check != PASS ||
input->max_devs_check != PASS ||
input->device_count_check != PASS ||
input->ksvlist_read != PASS ||
input->vp_read != PASS ||
input->ksvlist_vp_validation != PASS ||
input->encryption != PASS) {
/* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */
/* 1B-05: consider MAX_DEVS_EXCEEDED a failure */
/* 1B-04: consider invalid v' a failure */
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
set_auth_complete(hdcp, output);
break;
default:
status = MOD_HDCP_STATUS_INVALID_STATE;
fail_and_restart_in_ms(0, &status, output);
break;
}
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_connection *conn = &hdcp->connection;
struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
switch (current_state(hdcp)) {
case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
if (input->bcaps_read != PASS) {
/* 1A-04: no authentication on bcaps read failure */
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->hdcp_capable_dp != PASS) {
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
break;
case D1_A1_EXCHANGE_KSVS:
if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->an_write != PASS ||
input->aksv_write != PASS ||
input->bksv_read != PASS ||
input->bksv_validation != PASS ||
input->ainfo_write == FAIL) {
/* 1A-05: consider invalid bksv a failure */
fail_and_restart_in_ms(0, &status, output);
break;
}
set_watchdog_in_ms(hdcp, 100, output);
set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME);
break;
case D1_A23_WAIT_FOR_R0_PRIME:
if (input->bstatus_read != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->r0p_available_dp != PASS) {
if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
fail_and_restart_in_ms(0, &status, output);
else
increment_stay_counter(hdcp);
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER);
break;
case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
if (input->r0p_read != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->rx_validation != PASS) {
if (hdcp->state.stay_count < 2 &&
!hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
} else {
/*
* 1A-06: consider invalid r0' a failure
* after 3 attempts.
* 1A-08: consider bksv listed in SRM a failure
*/
/*
* some slow RX will fail rx validation when it is
* not ready. give it more time to react before retry.
*/
fail_and_restart_in_ms(1000, &status, output);
}
break;
} else if ((!conn->is_repeater && input->encryption != PASS) ||
(!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) {
fail_and_restart_in_ms(200, &status, output);
break;
}
if (conn->is_repeater) {
set_watchdog_in_ms(hdcp, 5000, output);
set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
} else {
set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
set_auth_complete(hdcp, output);
}
break;
case D1_A4_AUTHENTICATED:
if (input->link_integrity_check == FAIL ||
input->reauth_request_check == FAIL) {
/* 1A-07: restart hdcp on a link integrity failure */
fail_and_restart_in_ms(0, &status, output);
break;
}
break;
case D1_A6_WAIT_FOR_READY:
if (input->link_integrity_check == FAIL ||
input->reauth_request_check == FAIL) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->ready_check != PASS) {
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1B-04: fail hdcp on ksv list READY timeout */
/* prevent black screen in next attempt */
adjust->hdcp1.postpone_encryption = 1;
fail_and_restart_in_ms(0, &status, output);
} else {
increment_stay_counter(hdcp);
}
break;
}
callback_in_ms(0, output);
set_state_id(hdcp, output, D1_A7_READ_KSV_LIST);
break;
case D1_A7_READ_KSV_LIST:
if (input->binfo_read_dp != PASS ||
input->max_cascade_check != PASS ||
input->max_devs_check != PASS) {
/* 1B-06: consider MAX_DEVS_EXCEEDED a failure */
/* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->device_count_check != PASS) {
/*
* some slow dongle doesn't update
* device count as soon as downstream is connected.
* give it more time to react.
*/
adjust->hdcp1.postpone_encryption = 1;
fail_and_restart_in_ms(1000, &status, output);
break;
} else if (input->ksvlist_read != PASS ||
input->vp_read != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->ksvlist_vp_validation != PASS) {
if (hdcp->state.stay_count < 2 &&
!hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
} else {
/*
* 1B-05: consider invalid v' a failure
* after 3 attempts.
*/
fail_and_restart_in_ms(0, &status, output);
}
break;
} else if (input->encryption != PASS ||
(is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
}
set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
set_auth_complete(hdcp, output);
break;
default:
fail_and_restart_in_ms(0, &status, output);
break;
}
return status;
}
| linux-master | drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "hdcp.h"
static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
uint64_t n = 0;
uint8_t count = 0;
u8 bksv[sizeof(n)] = { };
memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
n = *(uint64_t *)bksv;
while (n) {
count++;
n &= (n - 1);
}
return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
}
static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
{
if (is_dp_hdcp(hdcp))
return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_READY) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
return (hdcp->auth.msg.hdcp1.bcaps & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
}
static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
{
return (hdcp->auth.msg.hdcp1.bcaps & DP_BCAPS_HDCP_CAPABLE) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;
}
static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
status = (hdcp->auth.msg.hdcp1.bstatus &
DP_BSTATUS_R0_PRIME_READY) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;
} else {
status = MOD_HDCP_STATUS_INVALID_OPERATION;
}
return status;
}
static inline enum mod_hdcp_status check_link_integrity_dp(
struct mod_hdcp *hdcp)
{
return (hdcp->auth.msg.hdcp1.bstatus &
DP_BSTATUS_LINK_FAILURE) ?
MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
static inline enum mod_hdcp_status check_no_reauthentication_request_dp(
struct mod_hdcp *hdcp)
{
return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_REAUTH_REQ) ?
MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :
MOD_HDCP_STATUS_SUCCESS;
}
static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp >> 8)
? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE
: MOD_HDCP_STATUS_SUCCESS;
else
status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus >> 8)
? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE
: MOD_HDCP_STATUS_SUCCESS;
return status;
}
static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp) ?
MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
else
status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus) ?
MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
return status;
}
static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
{
return is_dp_hdcp(hdcp) ?
DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.binfo_dp) :
DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.bstatus);
}
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* Avoid device count == 0 to do authentication */
if (0 == get_device_count(hdcp)) {
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
}
/* Some MST display may choose to report the internal panel as an HDCP RX.
* To update this condition with 1(because the immediate repeater's internal
* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp).
* Device count must be greater than or equal to tracked hdcp displays.
*/
return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
&input->bksv_read, &status,
hdcp, "bksv_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
&input->bcaps_read, &status,
hdcp, "bcaps_read"))
goto out;
out:
return status;
}
static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
&input->create_session, &status,
hdcp, "create_session"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_an,
&input->an_write, &status,
hdcp, "an_write"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv,
&input->aksv_write, &status,
hdcp, "aksv_write"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
&input->bksv_read, &status,
hdcp, "bksv_read"))
goto out;
if (!mod_hdcp_execute_and_set(validate_bksv,
&input->bksv_validation, &status,
hdcp, "bksv_validation"))
goto out;
if (hdcp->auth.msg.hdcp1.ainfo) {
if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo,
&input->ainfo_write, &status,
hdcp, "ainfo_write"))
goto out;
}
out:
return status;
}
static enum mod_hdcp_status computations_validate_rx_test_for_repeater(
struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p,
&input->r0p_read, &status,
hdcp, "r0p_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx,
&input->rx_validation, &status,
hdcp, "rx_validation"))
goto out;
if (hdcp->connection.is_repeater) {
if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption)
if (!mod_hdcp_execute_and_set(
mod_hdcp_hdcp1_enable_encryption,
&input->encryption, &status,
hdcp, "encryption"))
goto out;
} else {
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
&input->encryption, &status,
hdcp, "encryption"))
goto out;
if (is_dp_mst_hdcp(hdcp))
if (!mod_hdcp_execute_and_set(
mod_hdcp_hdcp1_enable_dp_stream_encryption,
&input->stream_encryption_dp, &status,
hdcp, "stream_encryption_dp"))
goto out;
}
out:
return status;
}
static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
&input->link_maintenance, &status,
hdcp, "link_maintenance");
out:
return status;
}
static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
event_ctx->unexpected_event = 1;
goto out;
}
if (is_dp_hdcp(hdcp)) {
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
&input->bstatus_read, &status,
hdcp, "bstatus_read"))
goto out;
if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
&input->link_integrity_check, &status,
hdcp, "link_integrity_check"))
goto out;
if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
&input->reauth_request_check, &status,
hdcp, "reauth_request_check"))
goto out;
} else {
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
&input->bcaps_read, &status,
hdcp, "bcaps_read"))
goto out;
}
if (!mod_hdcp_execute_and_set(check_ksv_ready,
&input->ready_check, &status,
hdcp, "ready_check"))
goto out;
out:
return status;
}
static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
uint8_t device_count;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (is_dp_hdcp(hdcp)) {
if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo,
&input->binfo_read_dp, &status,
hdcp, "binfo_read_dp"))
goto out;
} else {
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
&input->bstatus_read, &status,
hdcp, "bstatus_read"))
goto out;
}
if (!mod_hdcp_execute_and_set(check_no_max_cascade,
&input->max_cascade_check, &status,
hdcp, "max_cascade_check"))
goto out;
if (!mod_hdcp_execute_and_set(check_no_max_devs,
&input->max_devs_check, &status,
hdcp, "max_devs_check"))
goto out;
if (!mod_hdcp_execute_and_set(check_device_count,
&input->device_count_check, &status,
hdcp, "device_count_check"))
goto out;
device_count = get_device_count(hdcp);
hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist,
&input->ksvlist_read, &status,
hdcp, "ksvlist_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp,
&input->vp_read, &status,
hdcp, "vp_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp,
&input->ksvlist_vp_validation, &status,
hdcp, "ksvlist_vp_validation"))
goto out;
if (input->encryption != PASS)
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
&input->encryption, &status,
hdcp, "encryption"))
goto out;
if (is_dp_mst_hdcp(hdcp))
if (!mod_hdcp_execute_and_set(
mod_hdcp_hdcp1_enable_dp_stream_encryption,
&input->stream_encryption_dp, &status,
hdcp, "stream_encryption_dp"))
goto out;
out:
return status;
}
static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
&input->bcaps_read, &status,
hdcp, "bcaps_read"))
goto out;
if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp,
&input->hdcp_capable_dp, &status,
hdcp, "hdcp_capable_dp"))
goto out;
out:
return status;
}
static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
&input->bstatus_read, &status,
hdcp, "bstatus_read"))
goto out;
if (!mod_hdcp_execute_and_set(check_r0p_available_dp,
&input->r0p_available_dp, &status,
hdcp, "r0p_available_dp"))
goto out;
out:
return status;
}
static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
event_ctx->unexpected_event = 1;
goto out;
}
if (status == MOD_HDCP_STATUS_SUCCESS)
mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
&input->bstatus_read, &status,
hdcp, "bstatus_read");
if (status == MOD_HDCP_STATUS_SUCCESS)
mod_hdcp_execute_and_set(check_link_integrity_dp,
&input->link_integrity_check, &status,
hdcp, "link_integrity_check");
if (status == MOD_HDCP_STATUS_SUCCESS)
mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
&input->reauth_request_check, &status,
hdcp, "reauth_request_check");
out:
return status;
}
uint8_t mod_hdcp_execute_and_set(
mod_hdcp_action func, uint8_t *flag,
enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str)
{
*status = func(hdcp);
if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) {
HDCP_INPUT_PASS_TRACE(hdcp, str);
*flag = PASS;
} else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) {
HDCP_INPUT_FAIL_TRACE(hdcp, str);
*flag = FAIL;
}
return (*status == MOD_HDCP_STATUS_SUCCESS);
}
enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
switch (current_state(hdcp)) {
case H1_A0_WAIT_FOR_ACTIVE_RX:
status = wait_for_active_rx(hdcp, event_ctx, input);
break;
case H1_A1_EXCHANGE_KSVS:
status = exchange_ksvs(hdcp, event_ctx, input);
break;
case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
status = computations_validate_rx_test_for_repeater(hdcp,
event_ctx, input);
break;
case H1_A45_AUTHENTICATED:
status = authenticated(hdcp, event_ctx, input);
break;
case H1_A8_WAIT_FOR_READY:
status = wait_for_ready(hdcp, event_ctx, input);
break;
case H1_A9_READ_KSV_LIST:
status = read_ksv_list(hdcp, event_ctx, input);
break;
default:
status = MOD_HDCP_STATUS_INVALID_STATE;
break;
}
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
switch (current_state(hdcp)) {
case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
break;
case D1_A1_EXCHANGE_KSVS:
status = exchange_ksvs(hdcp, event_ctx, input);
break;
case D1_A23_WAIT_FOR_R0_PRIME:
status = wait_for_r0_prime_dp(hdcp, event_ctx, input);
break;
case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
status = computations_validate_rx_test_for_repeater(
hdcp, event_ctx, input);
break;
case D1_A4_AUTHENTICATED:
status = authenticated_dp(hdcp, event_ctx, input);
break;
case D1_A6_WAIT_FOR_READY:
status = wait_for_ready(hdcp, event_ctx, input);
break;
case D1_A7_READ_KSV_LIST:
status = read_ksv_list(hdcp, event_ctx, input);
break;
default:
status = MOD_HDCP_STATUS_INVALID_STATE;
break;
}
return status;
}
| linux-master | drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "hdcp.h"
void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
uint8_t *buf, uint32_t buf_size)
{
const uint8_t bytes_per_line = 16,
byte_size = 3,
newline_size = 1,
terminator_size = 1;
uint32_t line_count = msg_size / bytes_per_line,
trailing_bytes = msg_size % bytes_per_line;
uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count +
byte_size * trailing_bytes + newline_size + terminator_size;
uint32_t buf_pos = 0;
uint32_t i = 0;
if (buf_size >= target_size) {
for (i = 0; i < msg_size; i++) {
if (i % bytes_per_line == 0)
buf[buf_pos++] = '\n';
sprintf(&buf[buf_pos], "%02X ", msg[i]);
buf_pos += byte_size;
}
buf[buf_pos++] = '\0';
}
}
void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp)
{
if (is_hdcp1(hdcp)) {
HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv,
sizeof(hdcp->auth.msg.hdcp1.bksv));
HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps,
sizeof(hdcp->auth.msg.hdcp1.bcaps));
HDCP_DDC_READ_TRACE(hdcp, "BSTATUS",
(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
sizeof(hdcp->auth.msg.hdcp1.bstatus));
HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an,
sizeof(hdcp->auth.msg.hdcp1.an));
HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv,
sizeof(hdcp->auth.msg.hdcp1.aksv));
HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo,
sizeof(hdcp->auth.msg.hdcp1.ainfo));
HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'",
(uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
sizeof(hdcp->auth.msg.hdcp1.r0p));
HDCP_DDC_READ_TRACE(hdcp, "BINFO",
(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist,
hdcp->auth.msg.hdcp1.ksvlist_size);
HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp,
sizeof(hdcp->auth.msg.hdcp1.vp));
} else if (is_hdcp2(hdcp)) {
HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version",
&hdcp->auth.msg.hdcp2.hdcp2version_hdmi,
sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi));
HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp,
sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp));
HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init,
sizeof(hdcp->auth.msg.hdcp2.ake_init));
HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert,
sizeof(hdcp->auth.msg.hdcp2.ake_cert));
HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM",
hdcp->auth.msg.hdcp2.ake_stored_km,
sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM",
hdcp->auth.msg.hdcp2.ake_no_stored_km,
sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime,
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
HDCP_DDC_READ_TRACE(hdcp, "Pairing Info",
hdcp->auth.msg.hdcp2.ake_pairing_info,
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init,
sizeof(hdcp->auth.msg.hdcp2.lc_init));
HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime,
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks,
sizeof(hdcp->auth.msg.hdcp2.ske_eks));
HDCP_DDC_READ_TRACE(hdcp, "Rx Status",
(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus,
sizeof(hdcp->auth.msg.hdcp2.rxstatus));
HDCP_DDC_READ_TRACE(hdcp, "Rx Id List",
hdcp->auth.msg.hdcp2.rx_id_list,
hdcp->auth.msg.hdcp2.rx_id_list_size);
HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack",
hdcp->auth.msg.hdcp2.repeater_auth_ack,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management",
hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
hdcp->auth.msg.hdcp2.stream_manage_size);
HDCP_DDC_READ_TRACE(hdcp, "Stream Ready",
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type",
hdcp->auth.msg.hdcp2.content_stream_type_dp,
sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
}
}
char *mod_hdcp_status_to_str(int32_t status)
{
switch (status) {
case MOD_HDCP_STATUS_SUCCESS:
return "MOD_HDCP_STATUS_SUCCESS";
case MOD_HDCP_STATUS_FAILURE:
return "MOD_HDCP_STATUS_FAILURE";
case MOD_HDCP_STATUS_RESET_NEEDED:
return "MOD_HDCP_STATUS_RESET_NEEDED";
case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
case MOD_HDCP_STATUS_INVALID_STATE:
return "MOD_HDCP_STATUS_INVALID_STATE";
case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
case MOD_HDCP_STATUS_DDC_FAILURE:
return "MOD_HDCP_STATUS_DDC_FAILURE";
case MOD_HDCP_STATUS_INVALID_OPERATION:
return "MOD_HDCP_STATUS_INVALID_OPERATION";
case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE:
return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE";
case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE";
case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE";
case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE";
case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING:
return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING";
case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING:
return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING";
case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING:
return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE";
case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED:
return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE";
case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE";
case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING:
return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE";
case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE";
case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE";
case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED:
return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE";
case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE";
case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST:
return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST";
case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE";
case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";
case MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE:
return "MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE";
default:
return "MOD_HDCP_STATUS_UNKNOWN";
}
}
char *mod_hdcp_state_id_to_str(int32_t id)
{
switch (id) {
case HDCP_UNINITIALIZED:
return "HDCP_UNINITIALIZED";
case HDCP_INITIALIZED:
return "HDCP_INITIALIZED";
case HDCP_CP_NOT_DESIRED:
return "HDCP_CP_NOT_DESIRED";
case H1_A0_WAIT_FOR_ACTIVE_RX:
return "H1_A0_WAIT_FOR_ACTIVE_RX";
case H1_A1_EXCHANGE_KSVS:
return "H1_A1_EXCHANGE_KSVS";
case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER";
case H1_A45_AUTHENTICATED:
return "H1_A45_AUTHENTICATED";
case H1_A8_WAIT_FOR_READY:
return "H1_A8_WAIT_FOR_READY";
case H1_A9_READ_KSV_LIST:
return "H1_A9_READ_KSV_LIST";
case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
return "D1_A0_DETERMINE_RX_HDCP_CAPABLE";
case D1_A1_EXCHANGE_KSVS:
return "D1_A1_EXCHANGE_KSVS";
case D1_A23_WAIT_FOR_R0_PRIME:
return "D1_A23_WAIT_FOR_R0_PRIME";
case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER";
case D1_A4_AUTHENTICATED:
return "D1_A4_AUTHENTICATED";
case D1_A6_WAIT_FOR_READY:
return "D1_A6_WAIT_FOR_READY";
case D1_A7_READ_KSV_LIST:
return "D1_A7_READ_KSV_LIST";
case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
return "H2_A0_KNOWN_HDCP2_CAPABLE_RX";
case H2_A1_SEND_AKE_INIT:
return "H2_A1_SEND_AKE_INIT";
case H2_A1_VALIDATE_AKE_CERT:
return "H2_A1_VALIDATE_AKE_CERT";
case H2_A1_SEND_NO_STORED_KM:
return "H2_A1_SEND_NO_STORED_KM";
case H2_A1_READ_H_PRIME:
return "H2_A1_READ_H_PRIME";
case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
return "H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME";
case H2_A1_SEND_STORED_KM:
return "H2_A1_SEND_STORED_KM";
case H2_A1_VALIDATE_H_PRIME:
return "H2_A1_VALIDATE_H_PRIME";
case H2_A2_LOCALITY_CHECK:
return "H2_A2_LOCALITY_CHECK";
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
return "H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER";
case H2_ENABLE_ENCRYPTION:
return "H2_ENABLE_ENCRYPTION";
case H2_A5_AUTHENTICATED:
return "H2_A5_AUTHENTICATED";
case H2_A6_WAIT_FOR_RX_ID_LIST:
return "H2_A6_WAIT_FOR_RX_ID_LIST";
case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
return "H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK";
case H2_A9_SEND_STREAM_MANAGEMENT:
return "H2_A9_SEND_STREAM_MANAGEMENT";
case H2_A9_VALIDATE_STREAM_READY:
return "H2_A9_VALIDATE_STREAM_READY";
case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
return "D2_A0_DETERMINE_RX_HDCP_CAPABLE";
case D2_A1_SEND_AKE_INIT:
return "D2_A1_SEND_AKE_INIT";
case D2_A1_VALIDATE_AKE_CERT:
return "D2_A1_VALIDATE_AKE_CERT";
case D2_A1_SEND_NO_STORED_KM:
return "D2_A1_SEND_NO_STORED_KM";
case D2_A1_READ_H_PRIME:
return "D2_A1_READ_H_PRIME";
case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
return "D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME";
case D2_A1_SEND_STORED_KM:
return "D2_A1_SEND_STORED_KM";
case D2_A1_VALIDATE_H_PRIME:
return "D2_A1_VALIDATE_H_PRIME";
case D2_A2_LOCALITY_CHECK:
return "D2_A2_LOCALITY_CHECK";
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
return "D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER";
case D2_SEND_CONTENT_STREAM_TYPE:
return "D2_SEND_CONTENT_STREAM_TYPE";
case D2_ENABLE_ENCRYPTION:
return "D2_ENABLE_ENCRYPTION";
case D2_A5_AUTHENTICATED:
return "D2_A5_AUTHENTICATED";
case D2_A6_WAIT_FOR_RX_ID_LIST:
return "D2_A6_WAIT_FOR_RX_ID_LIST";
case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
return "D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK";
case D2_A9_SEND_STREAM_MANAGEMENT:
return "D2_A9_SEND_STREAM_MANAGEMENT";
case D2_A9_VALIDATE_STREAM_READY:
return "D2_A9_VALIDATE_STREAM_READY";
default:
return "UNKNOWN_STATE_ID";
}
}
| linux-master | drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/delay.h>
#include "hdcp.h"
static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp)
{
uint8_t is_ready = 0;
if (is_dp_hdcp(hdcp))
is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
else
is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) &&
(HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
return is_ready ? MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
}
static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) &&
HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[2]) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
else
status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
return status;
}
static inline enum mod_hdcp_status check_reauthentication_request(
struct mod_hdcp *hdcp)
{
uint8_t ret = 0;
if (is_dp_hdcp(hdcp))
ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
else
ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[1]) ?
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
return ret;
}
static inline enum mod_hdcp_status check_link_integrity_failure_dp(
struct mod_hdcp *hdcp)
{
return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
uint16_t size;
if (is_dp_hdcp(hdcp)) {
status = MOD_HDCP_STATUS_SUCCESS;
} else {
status = mod_hdcp_read_rxstatus(hdcp);
if (status == MOD_HDCP_STATUS_SUCCESS) {
size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0];
status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING;
}
}
return status;
}
static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
uint8_t size;
status = mod_hdcp_read_rxstatus(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
if (is_dp_hdcp(hdcp)) {
status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
} else {
size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0];
status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
}
out:
return status;
}
static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
uint8_t size;
status = mod_hdcp_read_rxstatus(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
if (is_dp_hdcp(hdcp)) {
status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
} else {
size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0];
status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
}
out:
return status;
}
static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
uint8_t size;
uint16_t max_wait = 20; // units of ms
uint16_t num_polls = 5;
uint16_t wait_time = max_wait / num_polls;
if (is_dp_hdcp(hdcp))
status = MOD_HDCP_STATUS_INVALID_OPERATION;
else
for (; num_polls; num_polls--) {
msleep(wait_time);
status = mod_hdcp_read_rxstatus(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
break;
size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0];
status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING;
if (status == MOD_HDCP_STATUS_SUCCESS)
break;
}
return status;
}
static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
uint8_t size;
if (is_dp_hdcp(hdcp)) {
status = MOD_HDCP_STATUS_INVALID_OPERATION;
} else {
status = mod_hdcp_read_rxstatus(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0];
status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING;
}
out:
return status;
}
static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
{
return HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
(HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
}
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* Avoid device count == 0 to do authentication */
if (0 == get_device_count(hdcp)) {
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
}
/* Some MST display may choose to report the internal panel as an HDCP RX. */
/* To update this condition with 1(because the immediate repeater's internal */
/* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */
/* Device count must be greater than or equal to tracked hdcp displays. */
return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
static uint8_t process_rxstatus(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input,
enum mod_hdcp_status *status)
{
if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxstatus,
&input->rxstatus_read, status,
hdcp, "rxstatus_read"))
goto out;
if (!mod_hdcp_execute_and_set(check_reauthentication_request,
&input->reauth_request_check, status,
hdcp, "reauth_request_check"))
goto out;
if (is_dp_hdcp(hdcp)) {
if (!mod_hdcp_execute_and_set(check_link_integrity_failure_dp,
&input->link_integrity_check_dp, status,
hdcp, "link_integrity_check_dp"))
goto out;
}
if (hdcp->connection.is_repeater)
if (check_receiver_id_list_ready(hdcp) ==
MOD_HDCP_STATUS_SUCCESS) {
HDCP_INPUT_PASS_TRACE(hdcp, "rx_id_list_ready");
event_ctx->rx_id_list_ready = 1;
if (is_dp_hdcp(hdcp))
hdcp->auth.msg.hdcp2.rx_id_list_size =
sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
else
hdcp->auth.msg.hdcp2.rx_id_list_size =
HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0];
}
out:
return (*status == MOD_HDCP_STATUS_SUCCESS);
}
static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
&input->hdcp2version_read, &status,
hdcp, "hdcp2version_read"))
goto out;
if (!mod_hdcp_execute_and_set(check_hdcp2_capable,
&input->hdcp2_capable_check, &status,
hdcp, "hdcp2_capable"))
goto out;
out:
return status;
}
static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
&input->create_session, &status,
hdcp, "create_session"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_ake_init,
&input->ake_init_prepare, &status,
hdcp, "ake_init_prepare"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_ake_init,
&input->ake_init_write, &status,
hdcp, "ake_init_write"))
goto out;
out:
return status;
}
static enum mod_hdcp_status validate_ake_cert(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
event_ctx->unexpected_event = 1;
goto out;
}
if (is_hdmi_dvi_sl_hdcp(hdcp))
if (!mod_hdcp_execute_and_set(check_ake_cert_available,
&input->ake_cert_available, &status,
hdcp, "ake_cert_available"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_ake_cert,
&input->ake_cert_read, &status,
hdcp, "ake_cert_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_ake_cert,
&input->ake_cert_validation, &status,
hdcp, "ake_cert_validation"))
goto out;
out:
return status;
}
static enum mod_hdcp_status send_no_stored_km(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_write_no_stored_km,
&input->no_stored_km_write, &status,
hdcp, "no_stored_km_write"))
goto out;
out:
return status;
}
static enum mod_hdcp_status read_h_prime(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(check_h_prime_available,
&input->h_prime_available, &status,
hdcp, "h_prime_available"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime,
&input->h_prime_read, &status,
hdcp, "h_prime_read"))
goto out;
out:
return status;
}
static enum mod_hdcp_status read_pairing_info_and_validate_h_prime(
struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(check_pairing_info_available,
&input->pairing_available, &status,
hdcp, "pairing_available"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_pairing_info,
&input->pairing_info_read, &status,
hdcp, "pairing_info_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime,
&input->h_prime_validation, &status,
hdcp, "h_prime_validation"))
goto out;
out:
return status;
}
static enum mod_hdcp_status send_stored_km(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_write_stored_km,
&input->stored_km_write, &status,
hdcp, "stored_km_write"))
goto out;
out:
return status;
}
static enum mod_hdcp_status validate_h_prime(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(check_h_prime_available,
&input->h_prime_available, &status,
hdcp, "h_prime_available"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime,
&input->h_prime_read, &status,
hdcp, "h_prime_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime,
&input->h_prime_validation, &status,
hdcp, "h_prime_validation"))
goto out;
out:
return status;
}
static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
&input->lc_init_prepare, &status,
hdcp, "lc_init_prepare"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
&input->lc_init_write, &status,
hdcp, "lc_init_write"))
goto out;
if (is_dp_hdcp(hdcp))
msleep(16);
else
if (!mod_hdcp_execute_and_set(poll_l_prime_available,
&input->l_prime_available_poll, &status,
hdcp, "l_prime_available_poll"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
&input->l_prime_read, &status,
hdcp, "l_prime_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
&input->l_prime_validation, &status,
hdcp, "l_prime_validation"))
goto out;
out:
return status;
}
static enum mod_hdcp_status exchange_ks_and_test_for_repeater(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_eks,
&input->eks_prepare, &status,
hdcp, "eks_prepare"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_eks,
&input->eks_write, &status,
hdcp, "eks_write"))
goto out;
out:
return status;
}
static enum mod_hdcp_status enable_encryption(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
event_ctx->unexpected_event = 1;
goto out;
}
if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
process_rxstatus(hdcp, event_ctx, input, &status);
goto out;
}
if (is_hdmi_dvi_sl_hdcp(hdcp)) {
if (!process_rxstatus(hdcp, event_ctx, input, &status))
goto out;
if (event_ctx->rx_id_list_ready)
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_enable_encryption,
&input->enable_encryption, &status,
hdcp, "enable_encryption"))
goto out;
if (is_dp_mst_hdcp(hdcp)) {
if (!mod_hdcp_execute_and_set(
mod_hdcp_hdcp2_enable_dp_stream_encryption,
&input->stream_encryption_dp, &status,
hdcp, "stream_encryption_dp"))
goto out;
}
out:
return status;
}
static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
event_ctx->unexpected_event = 1;
goto out;
}
process_rxstatus(hdcp, event_ctx, input, &status);
out:
return status;
}
static enum mod_hdcp_status wait_for_rx_id_list(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!process_rxstatus(hdcp, event_ctx, input, &status))
goto out;
if (!event_ctx->rx_id_list_ready) {
status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
goto out;
}
out:
return status;
}
static enum mod_hdcp_status verify_rx_id_list_and_send_ack(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
event_ctx->unexpected_event = 1;
goto out;
}
if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
process_rxstatus(hdcp, event_ctx, input, &status);
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_read_rx_id_list,
&input->rx_id_list_read,
&status, hdcp, "receiver_id_list_read"))
goto out;
if (!mod_hdcp_execute_and_set(check_device_count,
&input->device_count_check,
&status, hdcp, "device_count_check"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_rx_id_list,
&input->rx_id_list_validation,
&status, hdcp, "rx_id_list_validation"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_repeater_auth_ack,
&input->repeater_auth_ack_write,
&status, hdcp, "repeater_auth_ack_write"))
goto out;
out:
return status;
}
static enum mod_hdcp_status send_stream_management(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
event_ctx->unexpected_event = 1;
goto out;
}
if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
process_rxstatus(hdcp, event_ctx, input, &status);
goto out;
}
if (is_hdmi_dvi_sl_hdcp(hdcp)) {
if (!process_rxstatus(hdcp, event_ctx, input, &status))
goto out;
if (event_ctx->rx_id_list_ready)
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_stream_management,
&input->prepare_stream_manage,
&status, hdcp, "prepare_stream_manage"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_stream_manage,
&input->stream_manage_write,
&status, hdcp, "stream_manage_write"))
goto out;
out:
return status;
}
static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
event_ctx->unexpected_event = 1;
goto out;
}
if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
process_rxstatus(hdcp, event_ctx, input, &status);
goto out;
}
if (is_hdmi_dvi_sl_hdcp(hdcp)) {
if (!process_rxstatus(hdcp, event_ctx, input, &status))
goto out;
if (event_ctx->rx_id_list_ready) {
goto out;
}
}
if (is_hdmi_dvi_sl_hdcp(hdcp))
if (!mod_hdcp_execute_and_set(check_stream_ready_available,
&input->stream_ready_available,
&status, hdcp, "stream_ready_available"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_read_stream_ready,
&input->stream_ready_read,
&status, hdcp, "stream_ready_read"))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_stream_ready,
&input->stream_ready_validation,
&status, hdcp, "stream_ready_validation"))
goto out;
out:
return status;
}
static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxcaps,
&input->rx_caps_read_dp,
&status, hdcp, "rx_caps_read_dp"))
goto out;
if (!mod_hdcp_execute_and_set(check_hdcp2_capable,
&input->hdcp2_capable_check, &status,
hdcp, "hdcp2_capable_check"))
goto out;
out:
return status;
}
static enum mod_hdcp_status send_content_stream_type_dp(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
event_ctx->unexpected_event = 1;
goto out;
}
if (!process_rxstatus(hdcp, event_ctx, input, &status))
goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_content_type,
&input->content_stream_type_write, &status,
hdcp, "content_stream_type_write"))
goto out;
out:
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
switch (current_state(hdcp)) {
case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
status = known_hdcp2_capable_rx(hdcp, event_ctx, input);
break;
case H2_A1_SEND_AKE_INIT:
status = send_ake_init(hdcp, event_ctx, input);
break;
case H2_A1_VALIDATE_AKE_CERT:
status = validate_ake_cert(hdcp, event_ctx, input);
break;
case H2_A1_SEND_NO_STORED_KM:
status = send_no_stored_km(hdcp, event_ctx, input);
break;
case H2_A1_READ_H_PRIME:
status = read_h_prime(hdcp, event_ctx, input);
break;
case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
status = read_pairing_info_and_validate_h_prime(hdcp,
event_ctx, input);
break;
case H2_A1_SEND_STORED_KM:
status = send_stored_km(hdcp, event_ctx, input);
break;
case H2_A1_VALIDATE_H_PRIME:
status = validate_h_prime(hdcp, event_ctx, input);
break;
case H2_A2_LOCALITY_CHECK:
status = locality_check(hdcp, event_ctx, input);
break;
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
status = exchange_ks_and_test_for_repeater(hdcp, event_ctx, input);
break;
case H2_ENABLE_ENCRYPTION:
status = enable_encryption(hdcp, event_ctx, input);
break;
case H2_A5_AUTHENTICATED:
status = authenticated(hdcp, event_ctx, input);
break;
case H2_A6_WAIT_FOR_RX_ID_LIST:
status = wait_for_rx_id_list(hdcp, event_ctx, input);
break;
case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input);
break;
case H2_A9_SEND_STREAM_MANAGEMENT:
status = send_stream_management(hdcp, event_ctx, input);
break;
case H2_A9_VALIDATE_STREAM_READY:
status = validate_stream_ready(hdcp, event_ctx, input);
break;
default:
status = MOD_HDCP_STATUS_INVALID_STATE;
break;
}
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
switch (current_state(hdcp)) {
case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
break;
case D2_A1_SEND_AKE_INIT:
status = send_ake_init(hdcp, event_ctx, input);
break;
case D2_A1_VALIDATE_AKE_CERT:
status = validate_ake_cert(hdcp, event_ctx, input);
break;
case D2_A1_SEND_NO_STORED_KM:
status = send_no_stored_km(hdcp, event_ctx, input);
break;
case D2_A1_READ_H_PRIME:
status = read_h_prime(hdcp, event_ctx, input);
break;
case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
status = read_pairing_info_and_validate_h_prime(hdcp,
event_ctx, input);
break;
case D2_A1_SEND_STORED_KM:
status = send_stored_km(hdcp, event_ctx, input);
break;
case D2_A1_VALIDATE_H_PRIME:
status = validate_h_prime(hdcp, event_ctx, input);
break;
case D2_A2_LOCALITY_CHECK:
status = locality_check(hdcp, event_ctx, input);
break;
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
status = exchange_ks_and_test_for_repeater(hdcp,
event_ctx, input);
break;
case D2_SEND_CONTENT_STREAM_TYPE:
status = send_content_stream_type_dp(hdcp, event_ctx, input);
break;
case D2_ENABLE_ENCRYPTION:
status = enable_encryption(hdcp, event_ctx, input);
break;
case D2_A5_AUTHENTICATED:
status = authenticated(hdcp, event_ctx, input);
break;
case D2_A6_WAIT_FOR_RX_ID_LIST:
status = wait_for_rx_id_list(hdcp, event_ctx, input);
break;
case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input);
break;
case D2_A9_SEND_STREAM_MANAGEMENT:
status = send_stream_management(hdcp, event_ctx, input);
break;
case D2_A9_VALIDATE_STREAM_READY:
status = validate_stream_ready(hdcp, event_ctx, input);
break;
default:
status = MOD_HDCP_STATUS_INVALID_STATE;
break;
}
return status;
}
| linux-master | drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "hdcp.h"
static void push_error_status(struct mod_hdcp *hdcp,
enum mod_hdcp_status status)
{
struct mod_hdcp_trace *trace = &hdcp->connection.trace;
if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
trace->errors[trace->error_count].status = status;
trace->errors[trace->error_count].state_id = hdcp->state.id;
trace->error_count++;
HDCP_ERROR_TRACE(hdcp, status);
}
if (is_hdcp1(hdcp)) {
hdcp->connection.hdcp1_retry_count++;
if (hdcp->connection.hdcp1_retry_count == MAX_NUM_OF_ATTEMPTS)
hdcp->connection.link.adjust.hdcp1.disable = 1;
} else if (is_hdcp2(hdcp)) {
hdcp->connection.hdcp2_retry_count++;
if (hdcp->connection.hdcp2_retry_count == MAX_NUM_OF_ATTEMPTS)
hdcp->connection.link.adjust.hdcp2.disable = 1;
}
}
static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
{
int i, is_auth_needed = 0;
/* if all displays on the link don't need authentication,
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
hdcp->displays[i].adjust.disable != MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION) {
is_auth_needed = 1;
break;
}
}
return is_auth_needed &&
!hdcp->connection.link.adjust.hdcp1.disable &&
!hdcp->connection.is_hdcp1_revoked;
}
static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
{
int i, is_auth_needed = 0;
/* if all displays on the link don't need authentication,
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
hdcp->displays[i].adjust.disable != MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION) {
is_auth_needed = 1;
break;
}
}
return is_auth_needed &&
!hdcp->connection.link.adjust.hdcp2.disable &&
!hdcp->connection.is_hdcp2_revoked;
}
static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
union mod_hdcp_transition_input *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (is_in_initialized_state(hdcp)) {
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
/* initialize transition input */
memset(input, 0, sizeof(union mod_hdcp_transition_input));
} else if (is_in_cp_not_desired_state(hdcp)) {
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
goto out;
}
} else if (is_in_hdcp1_states(hdcp)) {
status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
} else if (is_in_hdcp1_dp_states(hdcp)) {
status = mod_hdcp_hdcp1_dp_execution(hdcp,
event_ctx, &input->hdcp1);
} else if (is_in_hdcp2_states(hdcp)) {
status = mod_hdcp_hdcp2_execution(hdcp, event_ctx, &input->hdcp2);
} else if (is_in_hdcp2_dp_states(hdcp)) {
status = mod_hdcp_hdcp2_dp_execution(hdcp,
event_ctx, &input->hdcp2);
} else {
event_ctx->unexpected_event = 1;
goto out;
}
out:
return status;
}
static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
union mod_hdcp_transition_input *input,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (event_ctx->unexpected_event)
goto out;
if (is_in_initialized_state(hdcp)) {
if (is_dp_hdcp(hdcp))
if (is_cp_desired_hdcp2(hdcp)) {
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A0_DETERMINE_RX_HDCP_CAPABLE);
} else if (is_cp_desired_hdcp1(hdcp)) {
callback_in_ms(0, output);
set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
set_auth_complete(hdcp, output);
}
else if (is_hdmi_dvi_sl_hdcp(hdcp))
if (is_cp_desired_hdcp2(hdcp)) {
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A0_KNOWN_HDCP2_CAPABLE_RX);
} else if (is_cp_desired_hdcp1(hdcp)) {
callback_in_ms(0, output);
set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);
} else {
callback_in_ms(0, output);
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
set_auth_complete(hdcp, output);
}
else {
callback_in_ms(0, output);
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
set_auth_complete(hdcp, output);
}
} else if (is_in_cp_not_desired_state(hdcp)) {
increment_stay_counter(hdcp);
} else if (is_in_hdcp1_states(hdcp)) {
status = mod_hdcp_hdcp1_transition(hdcp,
event_ctx, &input->hdcp1, output);
} else if (is_in_hdcp1_dp_states(hdcp)) {
status = mod_hdcp_hdcp1_dp_transition(hdcp,
event_ctx, &input->hdcp1, output);
} else if (is_in_hdcp2_states(hdcp)) {
status = mod_hdcp_hdcp2_transition(hdcp,
event_ctx, &input->hdcp2, output);
} else if (is_in_hdcp2_dp_states(hdcp)) {
status = mod_hdcp_hdcp2_dp_transition(hdcp,
event_ctx, &input->hdcp2, output);
} else {
status = MOD_HDCP_STATUS_INVALID_STATE;
}
out:
return status;
}
static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (is_hdcp1(hdcp)) {
if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) {
/* TODO - update psp to unify create session failure
* recovery between hdcp1 and 2.
*/
mod_hdcp_hdcp1_destroy_session(hdcp);
}
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
set_state_id(hdcp, output, HDCP_INITIALIZED);
} else if (is_hdcp2(hdcp)) {
if (hdcp->auth.trans_input.hdcp2.create_session == PASS) {
status = mod_hdcp_hdcp2_destroy_session(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS) {
output->callback_needed = 0;
output->watchdog_timer_needed = 0;
goto out;
}
}
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
set_state_id(hdcp, output, HDCP_INITIALIZED);
} else if (is_in_cp_not_desired_state(hdcp)) {
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
set_state_id(hdcp, output, HDCP_INITIALIZED);
}
out:
/* stop callback and watchdog requests from previous authentication*/
output->watchdog_timer_stop = 1;
output->callback_stop = 1;
return status;
}
static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
memset(output, 0, sizeof(struct mod_hdcp_output));
status = reset_authentication(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
if (current_state(hdcp) != HDCP_UNINITIALIZED) {
HDCP_TOP_RESET_CONN_TRACE(hdcp);
set_state_id(hdcp, output, HDCP_UNINITIALIZED);
}
memset(&hdcp->connection, 0, sizeof(hdcp->connection));
out:
return status;
}
static enum mod_hdcp_status update_display_adjustments(struct mod_hdcp *hdcp,
struct mod_hdcp_display *display,
struct mod_hdcp_display_adjustment *adj)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_NOT_IMPLEMENTED;
if (is_in_authenticated_states(hdcp) &&
is_dp_mst_hdcp(hdcp) &&
display->adjust.disable == true &&
adj->disable == false) {
display->adjust.disable = false;
if (is_hdcp1(hdcp))
status = mod_hdcp_hdcp1_enable_dp_stream_encryption(hdcp);
else if (is_hdcp2(hdcp))
status = mod_hdcp_hdcp2_enable_dp_stream_encryption(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
display->adjust.disable = true;
}
if (status == MOD_HDCP_STATUS_SUCCESS &&
memcmp(adj, &display->adjust,
sizeof(struct mod_hdcp_display_adjustment)) != 0)
status = MOD_HDCP_STATUS_NOT_IMPLEMENTED;
return status;
}
/*
* Implementation of functions in mod_hdcp.h
*/
size_t mod_hdcp_get_memory_size(void)
{
return sizeof(struct mod_hdcp);
}
enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
struct mod_hdcp_config *config)
{
struct mod_hdcp_output output;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
memset(&output, 0, sizeof(output));
hdcp->config = *config;
HDCP_TOP_INTERFACE_TRACE(hdcp);
status = reset_connection(hdcp, &output);
if (status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, status);
return status;
}
enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_output output;
HDCP_TOP_INTERFACE_TRACE(hdcp);
memset(&output, 0, sizeof(output));
status = reset_connection(hdcp, &output);
if (status == MOD_HDCP_STATUS_SUCCESS)
memset(hdcp, 0, sizeof(struct mod_hdcp));
else
push_error_status(hdcp, status);
return status;
}
enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
struct mod_hdcp_link *link, struct mod_hdcp_display *display,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_display *display_container = NULL;
HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index);
memset(output, 0, sizeof(struct mod_hdcp_output));
/* skip inactive display */
if (display->state != MOD_HDCP_DISPLAY_ACTIVE) {
status = MOD_HDCP_STATUS_SUCCESS;
goto out;
}
/* check existing display container */
if (get_active_display_at_index(hdcp, display->index)) {
status = MOD_HDCP_STATUS_SUCCESS;
goto out;
}
/* find an empty display container */
display_container = get_empty_display_container(hdcp);
if (!display_container) {
status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND;
goto out;
}
/* reset existing authentication status */
status = reset_authentication(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
/* reset retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* add display to connection */
hdcp->connection.link = *link;
*display_container = *display;
status = mod_hdcp_add_display_to_topology(hdcp, display_container);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
/* request authentication */
if (current_state(hdcp) != HDCP_INITIALIZED)
set_state_id(hdcp, output, HDCP_INITIALIZED);
callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output);
out:
if (status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, status);
return status;
}
enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
uint8_t index, struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_display *display = NULL;
HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index);
memset(output, 0, sizeof(struct mod_hdcp_output));
/* find display in connection */
display = get_active_display_at_index(hdcp, index);
if (!display) {
status = MOD_HDCP_STATUS_SUCCESS;
goto out;
}
/* stop current authentication */
status = reset_authentication(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
/* clear retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* remove display */
status = mod_hdcp_remove_display_from_topology(hdcp, index);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
memset(display, 0, sizeof(struct mod_hdcp_display));
/* request authentication when connection is not reset */
if (current_state(hdcp) != HDCP_UNINITIALIZED)
callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
output);
out:
if (status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, status);
return status;
}
enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
uint8_t index,
struct mod_hdcp_link_adjustment *link_adjust,
struct mod_hdcp_display_adjustment *display_adjust,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_display *display = NULL;
HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index);
memset(output, 0, sizeof(struct mod_hdcp_output));
/* find display in connection */
display = get_active_display_at_index(hdcp, index);
if (!display) {
status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
goto out;
}
/* skip if no changes */
if (memcmp(link_adjust, &hdcp->connection.link.adjust,
sizeof(struct mod_hdcp_link_adjustment)) == 0 &&
memcmp(display_adjust, &display->adjust,
sizeof(struct mod_hdcp_display_adjustment)) == 0) {
status = MOD_HDCP_STATUS_SUCCESS;
goto out;
}
if (memcmp(link_adjust, &hdcp->connection.link.adjust,
sizeof(struct mod_hdcp_link_adjustment)) == 0 &&
memcmp(display_adjust, &display->adjust,
sizeof(struct mod_hdcp_display_adjustment)) != 0) {
status = update_display_adjustments(hdcp, display, display_adjust);
if (status != MOD_HDCP_STATUS_NOT_IMPLEMENTED)
goto out;
}
/* stop current authentication */
status = reset_authentication(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
/* clear retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* set new adjustment */
hdcp->connection.link.adjust = *link_adjust;
display->adjust = *display_adjust;
/* request authentication when connection is not reset */
if (current_state(hdcp) != HDCP_UNINITIALIZED)
/* wait 100ms to debounce simultaneous updates for different indices */
callback_in_ms(100, output);
out:
if (status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, status);
return status;
}
enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
uint8_t index, struct mod_hdcp_display_query *query)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
struct mod_hdcp_display *display = NULL;
/* find display in connection */
display = get_active_display_at_index(hdcp, index);
if (!display) {
status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
goto out;
}
/* populate query */
query->link = &hdcp->connection.link;
query->display = display;
query->trace = &hdcp->connection.trace;
query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
if (is_display_encryption_enabled(display)) {
if (is_hdcp1(hdcp)) {
query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
} else if (is_hdcp2(hdcp)) {
if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
else if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1)
query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
else
query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON;
}
} else {
query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
}
out:
return status;
}
enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
struct mod_hdcp_output *output)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
HDCP_TOP_INTERFACE_TRACE(hdcp);
status = reset_connection(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, status);
return status;
}
enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
enum mod_hdcp_event event, struct mod_hdcp_output *output)
{
enum mod_hdcp_status exec_status, trans_status, reset_status, status;
struct mod_hdcp_event_context event_ctx;
HDCP_EVENT_TRACE(hdcp, event);
memset(output, 0, sizeof(struct mod_hdcp_output));
memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context));
event_ctx.event = event;
/* execute and transition */
exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input);
trans_status = transition(
hdcp, &event_ctx, &hdcp->auth.trans_input, output);
if (trans_status == MOD_HDCP_STATUS_SUCCESS) {
status = MOD_HDCP_STATUS_SUCCESS;
} else if (exec_status == MOD_HDCP_STATUS_SUCCESS) {
status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE;
push_error_status(hdcp, status);
} else {
status = exec_status;
push_error_status(hdcp, status);
}
/* reset authentication if needed */
if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
mod_hdcp_log_ddc_trace(hdcp);
reset_status = reset_authentication(hdcp, output);
if (reset_status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, reset_status);
}
/* Clear CP_IRQ status if needed */
if (event_ctx.event == MOD_HDCP_EVENT_CPIRQ) {
status = mod_hdcp_clear_cp_irq_status(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, status);
}
return status;
}
enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
enum signal_type signal)
{
enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF;
switch (signal) {
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_HDMI_TYPE_A:
mode = MOD_HDCP_MODE_DEFAULT;
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
case SIGNAL_TYPE_DISPLAY_PORT_MST:
mode = MOD_HDCP_MODE_DP;
break;
default:
break;
}
return mode;
}
| linux-master | drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#define MAX_NUM_DISPLAYS 24
#include "hdcp.h"
#include "amdgpu.h"
#include "hdcp_psp.h"
static void hdcp2_message_init(struct mod_hdcp *hdcp,
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *in)
{
in->session_handle = hdcp->auth.id;
in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg1_desc.msg_size = 0;
in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg2_desc.msg_size = 0;
in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg3_desc.msg_size = 0;
}
static enum mod_hdcp_status remove_display_from_topology_v2(
struct mod_hdcp *hdcp, uint8_t index)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
struct mod_hdcp_display *display =
get_active_display_at_index(hdcp, index);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
if (!display || !is_display_active(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
} else {
display->state = MOD_HDCP_DISPLAY_ACTIVE;
HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
}
mutex_unlock(&psp->dtm_context.mutex);
return status;
}
static enum mod_hdcp_status remove_display_from_topology_v3(
struct mod_hdcp *hdcp, uint8_t index)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
struct mod_hdcp_display *display =
get_active_display_at_index(hdcp, index);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
if (!display || !is_display_active(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V3;
dtm_cmd->dtm_in_message.topology_update_v3.display_handle = display->index;
dtm_cmd->dtm_in_message.topology_update_v3.is_active = 0;
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
mutex_unlock(&psp->dtm_context.mutex);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = remove_display_from_topology_v2(hdcp, index);
if (status != MOD_HDCP_STATUS_SUCCESS)
display->state = MOD_HDCP_DISPLAY_INACTIVE;
} else {
display->state = MOD_HDCP_DISPLAY_ACTIVE;
HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
}
return status;
}
static enum mod_hdcp_status add_display_to_topology_v2(
struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
struct mod_hdcp_link *link = &hdcp->connection.link;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
if (is_dp_hdcp(hdcp))
dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_enabled;
dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
display->state = MOD_HDCP_DISPLAY_INACTIVE;
status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
} else {
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
}
mutex_unlock(&psp->dtm_context.mutex);
return status;
}
static enum mod_hdcp_status add_display_to_topology_v3(
struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
struct mod_hdcp_link *link = &hdcp->connection.link;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V3;
dtm_cmd->dtm_in_message.topology_update_v3.display_handle = display->index;
dtm_cmd->dtm_in_message.topology_update_v3.is_active = 1;
dtm_cmd->dtm_in_message.topology_update_v3.controller = display->controller;
dtm_cmd->dtm_in_message.topology_update_v3.ddc_line = link->ddc_line;
dtm_cmd->dtm_in_message.topology_update_v3.link_enc = link->link_enc_idx;
dtm_cmd->dtm_in_message.topology_update_v3.stream_enc = display->stream_enc_idx;
if (is_dp_hdcp(hdcp))
dtm_cmd->dtm_in_message.topology_update_v3.is_assr = link->dp.assr_enabled;
dtm_cmd->dtm_in_message.topology_update_v3.dp_mst_vcid = display->vc_id;
dtm_cmd->dtm_in_message.topology_update_v3.max_hdcp_supported_version =
TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3;
dtm_cmd->dtm_in_message.topology_update_v3.encoder_type = TA_DTM_ENCODER_TYPE__DIG;
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
dtm_cmd->dtm_in_message.topology_update_v3.phy_id = link->phy_idx;
dtm_cmd->dtm_in_message.topology_update_v3.link_hdcp_cap = link->hdcp_supported_informational;
dtm_cmd->dtm_in_message.topology_update_v3.dio_output_type = link->dp.usb4_enabled ?
TA_DTM_DIO_OUTPUT_TYPE__DPIA :
TA_DTM_DIO_OUTPUT_TYPE__DIRECT;
dtm_cmd->dtm_in_message.topology_update_v3.dio_output_id = link->dio_output_id;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
mutex_unlock(&psp->dtm_context.mutex);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = add_display_to_topology_v2(hdcp, display);
if (status != MOD_HDCP_STATUS_SUCCESS)
display->state = MOD_HDCP_DISPLAY_INACTIVE;
} else {
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
}
return status;
}
enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct mod_hdcp *hdcp, uint8_t index)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
if (hdcp->config.psp.caps.dtm_v3_supported)
status = remove_display_from_topology_v3(hdcp, index);
else
status = remove_display_from_topology_v2(hdcp, index);
return status;
}
enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
struct mod_hdcp_display *display)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (hdcp->config.psp.caps.dtm_v3_supported)
status = add_display_to_topology_v3(hdcp, display);
else
status = add_display_to_topology_v2(hdcp, display);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct mod_hdcp_display *display = get_first_active_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->hdcp_context.context.initialized) {
DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
return MOD_HDCP_STATUS_FAILURE;
}
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
mutex_lock(&psp->hdcp_context.mutex);
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
} else {
hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
sizeof(hdcp->auth.msg.hdcp1.aksv));
memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
sizeof(hdcp->auth.msg.hdcp1.an));
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
uint8_t i = 0;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
} else {
HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
if (is_display_encryption_enabled(&hdcp->displays[i])) {
hdcp->displays[i].state =
MOD_HDCP_DISPLAY_ACTIVE;
HDCP_HDCP1_DISABLED_TRACE(
hdcp, hdcp->displays[i].index);
}
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv,
TA_HDCP__HDCP1_KSV_SIZE);
hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p;
hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
/* needs second part of authentication */
hdcp->connection.is_repeater = 1;
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
hdcp->connection.is_repeater = 0;
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
hdcp->connection.is_hdcp1_revoked = 1;
status = MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
} else
status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE;
} else if (!is_dp_mst_hdcp(hdcp)) {
display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size;
memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist,
hdcp->auth.msg.hdcp1.ksvlist_size);
memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp,
sizeof(hdcp->auth.msg.hdcp1.vp));
hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo =
is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
status = MOD_HDCP_STATUS_SUCCESS;
} else if (hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
hdcp->connection.is_hdcp1_revoked = 1;
status = MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED;
} else {
status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
int i = 0;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
break;
}
hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id;
hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level != 1)
status = MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->hdcp_context.context.initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
return MOD_HDCP_STATUS_FAILURE;
}
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0;
else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1)
hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1;
else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_MAX)
hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
else
hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
uint8_t i = 0;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
} else {
HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
if (is_display_encryption_enabled(&hdcp->displays[i])) {
hdcp->displays[i].state =
MOD_HDCP_DISPLAY_ACTIVE;
HDCP_HDCP2_DISABLED_TRACE(
hdcp, hdcp->displays[i].index);
}
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_INIT;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
else
memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
sizeof(hdcp->auth.msg.hdcp2.ake_init));
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT;
msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT;
memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_cert,
sizeof(hdcp->auth.msg.hdcp2.ake_cert));
msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM;
msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
} else {
memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km,
&msg_out->prepare.transmitter_message[0],
sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
&msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
if (msg_out->process.msg1_status ==
TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
hdcp->connection.is_km_stored =
msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater =
msg_out->process.is_repeater ? 1 : 0;
status = MOD_HDCP_STATUS_SUCCESS;
} else if (msg_out->process.msg1_status ==
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
hdcp->connection.is_hdcp2_revoked = 1;
status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
} else {
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
}
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME;
msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME;
memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_h_prime,
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
if (!hdcp->connection.is_km_stored) {
msg_in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO;
msg_in->process.msg2_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO;
memcpy(&msg_in->process.receiver_message[sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)],
hdcp->auth.msg.hdcp2.ake_pairing_info, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
}
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
else if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
else if (!hdcp->connection.is_km_stored &&
msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__LC_INIT;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
else
memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
sizeof(hdcp->auth.msg.hdcp2.lc_init));
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME;
msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME;
memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.lc_l_prime,
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS;
if (is_dp_hdcp(hdcp))
msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
} else {
memcpy(hdcp->auth.msg.hdcp2.ske_eks,
&msg_out->prepare.transmitter_message[0],
sizeof(hdcp->auth.msg.hdcp2.ske_eks));
msg_out->prepare.msg1_desc.msg_size =
sizeof(hdcp->auth.msg.hdcp2.ske_eks);
if (is_dp_hdcp(hdcp)) {
memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
&msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
}
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
} else if (!is_dp_mst_hdcp(hdcp)) {
display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST;
msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.rx_id_list,
sizeof(hdcp->auth.msg.hdcp2.rx_id_list));
msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
} else {
memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack,
&msg_out->prepare.transmitter_message[0],
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
if (msg_out->process.msg1_status ==
TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
status = MOD_HDCP_STATUS_SUCCESS;
} else if (msg_out->process.msg1_status ==
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
hdcp->connection.is_hdcp2_revoked = 1;
status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
} else {
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
}
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
uint8_t i;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
continue;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
break;
hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_SUCCESS;
else
status = MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE;
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
status = MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
} else {
hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
&msg_out->prepare.transmitter_message[0],
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
}
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
hdcp2_message_init(hdcp, msg_in);
msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY;
msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready);
memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
status = MOD_HDCP_STATUS_SUCCESS;
else
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
| linux-master | drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "mod_info_packet.h"
#include "core_types.h"
#include "dc_types.h"
#include "mod_shared.h"
#include "mod_freesync.h"
#include "dc.h"
enum vsc_packet_revision {
vsc_packet_undefined = 0,
//01h = VSC SDP supports only 3D stereo.
vsc_packet_rev1 = 1,
//02h = 3D stereo + PSR.
vsc_packet_rev2 = 2,
//03h = 3D stereo + PSR2.
vsc_packet_rev3 = 3,
//04h = 3D stereo + PSR/PSR2 + Y-coordinate.
vsc_packet_rev4 = 4,
//05h = 3D stereo + PSR/PSR2 + Y-coordinate + Pixel Encoding/Colorimetry Format
vsc_packet_rev5 = 5,
};
#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
#define HF_VSIF_VERSION 1
// VTEM Byte Offset
#define VTEM_PB0 0
#define VTEM_PB1 1
#define VTEM_PB2 2
#define VTEM_PB3 3
#define VTEM_PB4 4
#define VTEM_PB5 5
#define VTEM_PB6 6
#define VTEM_MD0 7
#define VTEM_MD1 8
#define VTEM_MD2 9
#define VTEM_MD3 10
// VTEM Byte Masks
//PB0
#define MASK_VTEM_PB0__RESERVED0 0x01
#define MASK_VTEM_PB0__SYNC 0x02
#define MASK_VTEM_PB0__VFR 0x04
#define MASK_VTEM_PB0__AFR 0x08
#define MASK_VTEM_PB0__DS_TYPE 0x30
//0: Periodic pseudo-static EM Data Set
//1: Periodic dynamic EM Data Set
//2: Unique EM Data Set
//3: Reserved
#define MASK_VTEM_PB0__END 0x40
#define MASK_VTEM_PB0__NEW 0x80
//PB1
#define MASK_VTEM_PB1__RESERVED1 0xFF
//PB2
#define MASK_VTEM_PB2__ORGANIZATION_ID 0xFF
//0: This is a Vendor Specific EM Data Set
//1: This EM Data Set is defined by This Specification (HDMI 2.1 r102.clean)
//2: This EM Data Set is defined by CTA-861-G
//3: This EM Data Set is defined by VESA
//PB3
#define MASK_VTEM_PB3__DATA_SET_TAG_MSB 0xFF
//PB4
#define MASK_VTEM_PB4__DATA_SET_TAG_LSB 0xFF
//PB5
#define MASK_VTEM_PB5__DATA_SET_LENGTH_MSB 0xFF
//PB6
#define MASK_VTEM_PB6__DATA_SET_LENGTH_LSB 0xFF
//PB7-27 (20 bytes):
//PB7 = MD0
#define MASK_VTEM_MD0__VRR_EN 0x01
#define MASK_VTEM_MD0__M_CONST 0x02
#define MASK_VTEM_MD0__QMS_EN 0x04
#define MASK_VTEM_MD0__RESERVED2 0x08
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
//MD1
#define MASK_VTEM_MD1__BASE_VFRONT 0xFF
//MD2
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
#define MASK_VTEM_MD2__RB 0x04
#define MASK_VTEM_MD2__NEXT_TFR 0xF8
//MD3
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
enum ColorimetryRGBDP {
ColorimetryRGB_DP_sRGB = 0,
ColorimetryRGB_DP_AdobeRGB = 3,
ColorimetryRGB_DP_P3 = 4,
ColorimetryRGB_DP_CustomColorProfile = 5,
ColorimetryRGB_DP_ITU_R_BT2020RGB = 6,
};
enum ColorimetryYCCDP {
ColorimetryYCC_DP_ITU601 = 0,
ColorimetryYCC_DP_ITU709 = 1,
ColorimetryYCC_DP_AdobeYCC = 5,
ColorimetryYCC_DP_ITU2020YCC = 6,
ColorimetryYCC_DP_ITU2020YCbCr = 7,
};
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet,
enum dc_color_space cs,
enum color_transfer_func tf)
{
unsigned int vsc_packet_revision = vsc_packet_undefined;
unsigned int i;
unsigned int pixelEncoding = 0;
unsigned int colorimetryFormat = 0;
bool stereo3dSupport = false;
if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
vsc_packet_revision = vsc_packet_rev1;
stereo3dSupport = true;
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
vsc_packet_revision = vsc_packet_rev4;
else if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
vsc_packet_revision = vsc_packet_rev5;
/* VSC packet not needed based on the features
* supported by this DP display
*/
if (vsc_packet_revision == vsc_packet_undefined)
return;
if (vsc_packet_revision == vsc_packet_rev4) {
/* Secondary-data Packet ID = 0*/
info_packet->hb0 = 0x00;
/* 07h - Packet Type Value indicating Video
* Stream Configuration packet
*/
info_packet->hb1 = 0x07;
/* 04h = VSC SDP supporting 3D stereo + PSR/PSR2 + Y-coordinate
* (applies to eDP v1.4 or higher).
*/
info_packet->hb2 = 0x04;
/* 0Eh = VSC SDP supporting 3D stereo + PSR2
* (HB2 = 04h), with Y-coordinate of first scan
* line of the SU region
*/
info_packet->hb3 = 0x0E;
for (i = 0; i < 28; i++)
info_packet->sb[i] = 0;
info_packet->valid = true;
}
if (vsc_packet_revision == vsc_packet_rev2) {
/* Secondary-data Packet ID = 0*/
info_packet->hb0 = 0x00;
/* 07h - Packet Type Value indicating Video
* Stream Configuration packet
*/
info_packet->hb1 = 0x07;
/* 02h = VSC SDP supporting 3D stereo and PSR
* (applies to eDP v1.3 or higher).
*/
info_packet->hb2 = 0x02;
/* 08h = VSC packet supporting 3D stereo + PSR
* (HB2 = 02h).
*/
info_packet->hb3 = 0x08;
for (i = 0; i < 28; i++)
info_packet->sb[i] = 0;
info_packet->valid = true;
}
if (vsc_packet_revision == vsc_packet_rev1) {
info_packet->hb0 = 0x00; // Secondary-data Packet ID = 0
info_packet->hb1 = 0x07; // 07h = Packet Type Value indicating Video Stream Configuration packet
info_packet->hb2 = 0x01; // 01h = Revision number. VSC SDP supporting 3D stereo only
info_packet->hb3 = 0x01; // 01h = VSC SDP supporting 3D stereo only (HB2 = 01h).
info_packet->valid = true;
}
if (stereo3dSupport) {
/* ==============================================================================================================|
* A. STEREO 3D
* ==============================================================================================================|
* VSC Payload (1 byte) From DP1.2 spec
*
* Bits 3:0 (Stereo Interface Method Code) | Bits 7:4 (Stereo Interface Method Specific Parameter)
* -----------------------------------------------------------------------------------------------------
* 0 = Non Stereo Video | Must be set to 0x0
* -----------------------------------------------------------------------------------------------------
* 1 = Frame/Field Sequential | 0x0: L + R view indication based on MISC1 bit 2:1
* | 0x1: Right when Stereo Signal = 1
* | 0x2: Left when Stereo Signal = 1
* | (others reserved)
* -----------------------------------------------------------------------------------------------------
* 2 = Stacked Frame | 0x0: Left view is on top and right view on bottom
* | (others reserved)
* -----------------------------------------------------------------------------------------------------
* 3 = Pixel Interleaved | 0x0: horiz interleaved, right view pixels on even lines
* | 0x1: horiz interleaved, right view pixels on odd lines
* | 0x2: checker board, start with left view pixel
* | 0x3: vertical interleaved, start with left view pixels
* | 0x4: vertical interleaved, start with right view pixels
* | (others reserved)
* -----------------------------------------------------------------------------------------------------
* 4 = Side-by-side | 0x0: left half represents left eye view
* | 0x1: left half represents right eye view
*/
switch (stream->timing.timing_3d_format) {
case TIMING_3D_FORMAT_HW_FRAME_PACKING:
case TIMING_3D_FORMAT_SW_FRAME_PACKING:
case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
case TIMING_3D_FORMAT_TB_SW_PACKED:
info_packet->sb[0] = 0x02; // Stacked Frame, Left view is on top and right view on bottom.
break;
case TIMING_3D_FORMAT_DP_HDMI_INBAND_FA:
case TIMING_3D_FORMAT_INBAND_FA:
info_packet->sb[0] = 0x01; // Frame/Field Sequential, L + R view indication based on MISC1 bit 2:1
break;
case TIMING_3D_FORMAT_SIDE_BY_SIDE:
case TIMING_3D_FORMAT_SBS_SW_PACKED:
info_packet->sb[0] = 0x04; // Side-by-side
break;
default:
info_packet->sb[0] = 0x00; // No Stereo Video, Shall be cleared to 0x0.
break;
}
}
/* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication.
* Added in DP1.3, a DP Source device is allowed to indicate the pixel encoding/colorimetry
* format to the DP Sink device with VSC SDP only when the DP Sink device supports it
* (i.e., VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED bit in the DPRX_FEATURE_ENUMERATION_LIST
* register (DPCD Address 02210h, bit 3) is set to 1).
* (Requires VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED bit set to 1 in DPCD 02210h. This
* DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4
* (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate
* the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
* MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
*/
if (vsc_packet_revision == vsc_packet_rev5) {
/* Secondary-data Packet ID = 0 */
info_packet->hb0 = 0x00;
/* 07h - Packet Type Value indicating Video Stream Configuration packet */
info_packet->hb1 = 0x07;
/* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication. */
info_packet->hb2 = 0x05;
/* 13h = VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/Colorimetry Format indication (HB2 = 05h). */
info_packet->hb3 = 0x13;
info_packet->valid = true;
/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
* Data Bytes DB 18~16
* Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
* ----------------------------------------------------------------------------------------------------
* 0x0 = sRGB | 0 = RGB
* 0x1 = RGB Wide Gamut Fixed Point
* 0x2 = RGB Wide Gamut Floating Point
* 0x3 = AdobeRGB
* 0x4 = DCI-P3
* 0x5 = CustomColorProfile
* (others reserved)
* ----------------------------------------------------------------------------------------------------
* 0x0 = ITU-R BT.601 | 1 = YCbCr444
* 0x1 = ITU-R BT.709
* 0x2 = xvYCC601
* 0x3 = xvYCC709
* 0x4 = sYCC601
* 0x5 = AdobeYCC601
* 0x6 = ITU-R BT.2020 Y'cC'bcC'rc
* 0x7 = ITU-R BT.2020 Y'C'bC'r
* (others reserved)
* ----------------------------------------------------------------------------------------------------
* 0x0 = ITU-R BT.601 | 2 = YCbCr422
* 0x1 = ITU-R BT.709
* 0x2 = xvYCC601
* 0x3 = xvYCC709
* 0x4 = sYCC601
* 0x5 = AdobeYCC601
* 0x6 = ITU-R BT.2020 Y'cC'bcC'rc
* 0x7 = ITU-R BT.2020 Y'C'bC'r
* (others reserved)
* ----------------------------------------------------------------------------------------------------
* 0x0 = ITU-R BT.601 | 3 = YCbCr420
* 0x1 = ITU-R BT.709
* 0x2 = xvYCC601
* 0x3 = xvYCC709
* 0x4 = sYCC601
* 0x5 = AdobeYCC601
* 0x6 = ITU-R BT.2020 Y'cC'bcC'rc
* 0x7 = ITU-R BT.2020 Y'C'bC'r
* (others reserved)
* ----------------------------------------------------------------------------------------------------
* 0x0 =DICOM Part14 Grayscale | 4 = Yonly
* Display Function
* (others reserved)
*/
/* Set Pixel Encoding */
switch (stream->timing.pixel_encoding) {
case PIXEL_ENCODING_RGB:
pixelEncoding = 0x0; /* RGB = 0h */
break;
case PIXEL_ENCODING_YCBCR444:
pixelEncoding = 0x1; /* YCbCr444 = 1h */
break;
case PIXEL_ENCODING_YCBCR422:
pixelEncoding = 0x2; /* YCbCr422 = 2h */
break;
case PIXEL_ENCODING_YCBCR420:
pixelEncoding = 0x3; /* YCbCr420 = 3h */
break;
default:
pixelEncoding = 0x0; /* default RGB = 0h */
break;
}
/* Set Colorimetry format based on pixel encoding */
switch (stream->timing.pixel_encoding) {
case PIXEL_ENCODING_RGB:
if ((cs == COLOR_SPACE_SRGB) ||
(cs == COLOR_SPACE_SRGB_LIMITED))
colorimetryFormat = ColorimetryRGB_DP_sRGB;
else if (cs == COLOR_SPACE_ADOBERGB)
colorimetryFormat = ColorimetryRGB_DP_AdobeRGB;
else if ((cs == COLOR_SPACE_2020_RGB_FULLRANGE) ||
(cs == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
colorimetryFormat = ColorimetryRGB_DP_ITU_R_BT2020RGB;
break;
case PIXEL_ENCODING_YCBCR444:
case PIXEL_ENCODING_YCBCR422:
case PIXEL_ENCODING_YCBCR420:
/* Note: xvYCC probably not supported correctly here on DP since colorspace translation
* loses distinction between BT601 vs xvYCC601 in translation
*/
if (cs == COLOR_SPACE_YCBCR601)
colorimetryFormat = ColorimetryYCC_DP_ITU601;
else if (cs == COLOR_SPACE_YCBCR709)
colorimetryFormat = ColorimetryYCC_DP_ITU709;
else if (cs == COLOR_SPACE_ADOBERGB)
colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
else if (cs == COLOR_SPACE_2020_YCBCR)
colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22)
colorimetryFormat = ColorimetryYCC_DP_ITU709;
break;
default:
colorimetryFormat = ColorimetryRGB_DP_sRGB;
break;
}
info_packet->sb[16] = (pixelEncoding << 4) | colorimetryFormat;
/* Set color depth */
switch (stream->timing.display_color_depth) {
case COLOR_DEPTH_666:
/* NOTE: This is actually not valid for YCbCr pixel encoding to have 6 bpc
* as of DP1.4 spec, but value of 0 probably reserved here for potential future use.
*/
info_packet->sb[17] = 0;
break;
case COLOR_DEPTH_888:
info_packet->sb[17] = 1;
break;
case COLOR_DEPTH_101010:
info_packet->sb[17] = 2;
break;
case COLOR_DEPTH_121212:
info_packet->sb[17] = 3;
break;
/*case COLOR_DEPTH_141414: -- NO SUCH FORMAT IN DP SPEC */
case COLOR_DEPTH_161616:
info_packet->sb[17] = 4;
break;
default:
info_packet->sb[17] = 0;
break;
}
/* all YCbCr are always limited range */
if ((cs == COLOR_SPACE_SRGB_LIMITED) ||
(cs == COLOR_SPACE_2020_RGB_LIMITEDRANGE) ||
(pixelEncoding != 0x0)) {
info_packet->sb[17] |= 0x80; /* DB17 bit 7 set to 1 for CEA timing. */
}
/* Content Type (Bits 2:0)
* 0 = Not defined.
* 1 = Graphics.
* 2 = Photo.
* 3 = Video.
* 4 = Game.
*/
info_packet->sb[18] = 0;
}
}
/**
* mod_build_hf_vsif_infopacket - Prepare HDMI Vendor Specific info frame.
* Follows HDMI Spec to build up Vendor Specific info frame
*
* @stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
* @info_packet: output structure where to store VSIF
*/
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet)
{
unsigned int length = 5;
bool hdmi_vic_mode = false;
uint8_t checksum = 0;
uint32_t i = 0;
enum dc_timing_3d_format format;
info_packet->valid = false;
format = stream->timing.timing_3d_format;
if (stream->view_format == VIEW_3D_FORMAT_NONE)
format = TIMING_3D_FORMAT_NONE;
if (stream->timing.hdmi_vic != 0
&& stream->timing.h_total >= 3840
&& stream->timing.v_total >= 2160
&& format == TIMING_3D_FORMAT_NONE)
hdmi_vic_mode = true;
if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode)
return;
info_packet->sb[1] = 0x03;
info_packet->sb[2] = 0x0C;
info_packet->sb[3] = 0x00;
if (format != TIMING_3D_FORMAT_NONE)
info_packet->sb[4] = (2 << 5);
else if (hdmi_vic_mode)
info_packet->sb[4] = (1 << 5);
switch (format) {
case TIMING_3D_FORMAT_HW_FRAME_PACKING:
case TIMING_3D_FORMAT_SW_FRAME_PACKING:
info_packet->sb[5] = (0x0 << 4);
break;
case TIMING_3D_FORMAT_SIDE_BY_SIDE:
case TIMING_3D_FORMAT_SBS_SW_PACKED:
info_packet->sb[5] = (0x8 << 4);
length = 6;
break;
case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
case TIMING_3D_FORMAT_TB_SW_PACKED:
info_packet->sb[5] = (0x6 << 4);
break;
default:
break;
}
if (hdmi_vic_mode)
info_packet->sb[5] = stream->timing.hdmi_vic;
info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR;
info_packet->hb1 = 0x01;
info_packet->hb2 = (uint8_t) (length);
checksum += info_packet->hb0;
checksum += info_packet->hb1;
checksum += info_packet->hb2;
for (i = 1; i <= length; i++)
checksum += info_packet->sb[i];
info_packet->sb[0] = (uint8_t) (0x100 - checksum);
info_packet->valid = true;
}
void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
enum adaptive_sync_type asType,
const struct AS_Df_params *param,
struct dc_info_packet *info_packet)
{
info_packet->valid = false;
memset(info_packet, 0, sizeof(struct dc_info_packet));
switch (asType) {
case ADAPTIVE_SYNC_TYPE_DP:
if (stream != NULL)
mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet);
break;
case FREESYNC_TYPE_PCON_IN_WHITELIST:
mod_build_adaptive_sync_infopacket_v1(info_packet);
break;
case ADAPTIVE_SYNC_TYPE_EDP:
mod_build_adaptive_sync_infopacket_v1(info_packet);
break;
case ADAPTIVE_SYNC_TYPE_NONE:
case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST:
default:
break;
}
}
void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet)
{
info_packet->valid = true;
// HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length}
info_packet->hb0 = 0x00;
info_packet->hb1 = 0x22;
info_packet->hb2 = AS_SDP_VER_1;
info_packet->hb3 = 0x00;
}
void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream,
const struct AS_Df_params *param,
struct dc_info_packet *info_packet)
{
info_packet->valid = true;
// HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length}
info_packet->hb0 = 0x00;
info_packet->hb1 = 0x22;
info_packet->hb2 = AS_SDP_VER_2;
info_packet->hb3 = AS_DP_SDP_LENGTH;
//Payload
info_packet->sb[0] = param->supportMode; //1: AVT; 0: FAVT
info_packet->sb[1] = (stream->timing.v_total & 0x00FF);
info_packet->sb[2] = (stream->timing.v_total & 0xFF00) >> 8;
//info_packet->sb[3] = 0x00; Target RR, not use fot AVT
info_packet->sb[4] = (param->increase.support << 6 | param->decrease.support << 7);
info_packet->sb[5] = param->increase.frame_duration_hex;
info_packet->sb[6] = param->decrease.frame_duration_hex;
}
| linux-master | drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include "acp_gfx_if.h"
#define ACP_MODE_I2S 0
#define ACP_MODE_AZ 1
#define mmACP_AZALIA_I2S_SELECT 0x51d4
int amd_acp_hw_init(struct cgs_device *cgs_device,
unsigned acp_version_major, unsigned acp_version_minor)
{
unsigned int acp_mode = ACP_MODE_I2S;
if ((acp_version_major == 2) && (acp_version_minor == 2))
acp_mode = cgs_read_register(cgs_device,
mmACP_AZALIA_I2S_SELECT);
if (acp_mode != ACP_MODE_I2S)
return -ENODEV;
return 0;
}
| linux-master | drivers/gpu/drm/amd/acp/acp_hw.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfd_priv.h"
#include "kfd_events.h"
#include "soc15_int.h"
#include "kfd_device_queue_manager.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "kfd_smi_events.h"
#include "kfd_debug.h"
/*
* GFX11 SQ Interrupts
*
* There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit
* packet to the Interrupt Handler:
* Auto - Generated by the SQG (various cmd overflows, timestamps etc)
* Wave - Generated by S_SENDMSG through a shader program
* Error - HW generated errors (Illegal instructions, Memviols, EDC etc)
*
* The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus
* 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such:
*
* - context_id1[7:6]
* Encoding type (0 = Auto, 1 = Wave, 2 = Error)
*
* - context_id0[26]
* PRIV bit indicates that Wave S_SEND or error occurred within trap
*
* - context_id0[24:0]
* 25-bit data with the following layout per encoding type:
* Auto - only context_id0[8:0] is used, which reports various interrupts
* generated by SQG. The rest is 0.
* Wave - user data sent from m0 via S_SENDMSG (context_id0[23:0])
* Error - Error Type (context_id0[24:21]), Error Details (context_id0[20:0])
*
* The other context_id bits show coordinates (SE/SH/CU/SIMD/WGP) for wave
* S_SENDMSG and Errors. These are 0 for Auto.
*/
enum SQ_INTERRUPT_WORD_ENCODING {
SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
SQ_INTERRUPT_WORD_ENCODING_INST,
SQ_INTERRUPT_WORD_ENCODING_ERROR,
};
enum SQ_INTERRUPT_ERROR_TYPE {
SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
};
/* SQ_INTERRUPT_WORD_AUTO_CTXID */
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE__SHIFT 0
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT__SHIFT 1
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF_FULL__SHIFT 2
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__REG_TIMESTAMP__SHIFT 3
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__CMD_TIMESTAMP__SHIFT 4
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__HOST_CMD_OVERFLOW__SHIFT 5
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__HOST_REG_OVERFLOW__SHIFT 6
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__IMMED_OVERFLOW__SHIFT 7
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR__SHIFT 8
#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING__SHIFT 6
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_MASK 0x00000001
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT_MASK 0x00000002
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF_FULL_MASK 0x00000004
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__REG_TIMESTAMP_MASK 0x00000008
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__CMD_TIMESTAMP_MASK 0x00000010
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__HOST_CMD_OVERFLOW_MASK 0x00000020
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__HOST_REG_OVERFLOW_MASK 0x00000040
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__IMMED_OVERFLOW_MASK 0x00000080
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR_MASK 0x00000100
#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING_MASK 0x000000c0
/* SQ_INTERRUPT_WORD_WAVE_CTXID */
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA__SHIFT 0
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SH_ID__SHIFT 25
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV__SHIFT 26
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID__SHIFT 27
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SIMD_ID__SHIFT 0
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID__SHIFT 2
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING__SHIFT 6
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA_MASK 0x00ffffff /* [23:0] */
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SH_ID_MASK 0x02000000 /* [25] */
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK 0x04000000 /* [26] */
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID_MASK 0xf8000000 /* [31:27] */
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SIMD_ID_MASK 0x00000003 /* [33:32] */
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID_MASK 0x0000003c /* [37:34] */
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING_MASK 0x000000c0 /* [39:38] */
/* SQ_INTERRUPT_WORD_ERROR_CTXID */
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__DETAIL__SHIFT 0
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__TYPE__SHIFT 21
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__SH_ID__SHIFT 25
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__PRIV__SHIFT 26
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__WAVE_ID__SHIFT 27
#define SQ_INTERRUPT_WORD_ERROR_CTXID1__SIMD_ID__SHIFT 0
#define SQ_INTERRUPT_WORD_ERROR_CTXID1__WGP_ID__SHIFT 2
#define SQ_INTERRUPT_WORD_ERROR_CTXID1__ENCODING__SHIFT 6
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__DETAIL_MASK 0x001fffff /* [20:0] */
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__TYPE_MASK 0x01e00000 /* [24:21] */
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__SH_ID_MASK 0x02000000 /* [25] */
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__PRIV_MASK 0x04000000 /* [26] */
#define SQ_INTERRUPT_WORD_ERROR_CTXID0__WAVE_ID_MASK 0xf8000000 /* [31:27] */
#define SQ_INTERRUPT_WORD_ERROR_CTXID1__SIMD_ID_MASK 0x00000003 /* [33:32] */
#define SQ_INTERRUPT_WORD_ERROR_CTXID1__WGP_ID_MASK 0x0000003c /* [37:34] */
#define SQ_INTERRUPT_WORD_ERROR_CTXID1__ENCODING_MASK 0x000000c0 /* [39:38] */
/*
* The debugger will send user data(m0) with PRIV=1 to indicate it requires
* notification from the KFD with the following queue id (DOORBELL_ID) and
* trap code (TRAP_CODE).
*/
#define KFD_CTXID0_TRAP_CODE_SHIFT 10
#define KFD_CTXID0_TRAP_CODE_MASK 0xfffc00
#define KFD_CTXID0_CP_BAD_OP_ECODE_MASK 0x3ffffff
#define KFD_CTXID0_DOORBELL_ID_MASK 0x0003ff
#define KFD_CTXID0_TRAP_CODE(ctxid0) (((ctxid0) & \
KFD_CTXID0_TRAP_CODE_MASK) >> \
KFD_CTXID0_TRAP_CODE_SHIFT)
#define KFD_CTXID0_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \
KFD_CTXID0_CP_BAD_OP_ECODE_MASK) >> \
KFD_CTXID0_TRAP_CODE_SHIFT)
#define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \
KFD_CTXID0_DOORBELL_ID_MASK)
static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
{
pr_debug(
"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_BUF_FULL),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, REG_TIMESTAMP),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, CMD_TIMESTAMP),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_CMD_OVERFLOW),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_REG_OVERFLOW),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, IMMED_OVERFLOW),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR));
}
static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
{
pr_debug(
"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, WAVE_ID),
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, SIMD_ID),
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID));
}
static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
{
pr_warn(
"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, PRIV),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, WAVE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, SIMD_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID));
}
static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
uint16_t pasid, uint16_t source_id)
{
int ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return;
/* all queues of a process will be unmapped in one time */
if (atomic_read(&p->poison)) {
kfd_unref_process(p);
return;
}
atomic_set(&p->poison, 1);
kfd_unref_process(p);
switch (source_id) {
case SOC15_INTSRC_SQ_INTERRUPT_MSG:
if (dev->dqm->ops.reset_queues)
ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
break;
case SOC21_INTSRC_SDMA_ECC:
default:
break;
}
kfd_signal_poison_consumed_event(dev, pasid);
/* resetting queue passes, do page retirement without gpu reset
resetting queue fails, fallback to gpu reset solution */
if (!ret)
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
else
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
}
static bool event_interrupt_isr_v11(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
{
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
uint32_t context_id0;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle interrupts from KFD VMIDs */
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
(vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd))
return false;
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
if ((source_id == SOC15_INTSRC_CP_END_OF_PIPE) &&
(context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG))
return false;
pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
client_id, source_id, vmid, pasid);
pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
return false;
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
*/
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
source_id == SOC21_INTSRC_SDMA_TRAP ||
KFD_IRQ_IS_FENCE(client_id, source_id) ||
(((client_id == SOC21_IH_CLIENTID_VMC) ||
((client_id == SOC21_IH_CLIENTID_GFX) &&
(source_id == UTCL2_1_0__SRCID__FAULT))) &&
!amdgpu_no_queue_eviction_on_vm_fault);
}
static void event_interrupt_wq_v11(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, ring_id, pasid, vmid;
uint32_t context_id0, context_id1;
uint8_t sq_int_enc, sq_int_priv, sq_int_errtype;
struct kfd_vm_fault_info info = {0};
struct kfd_hsa_memory_exception_data exception_data;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
/* VMC, UTCL2 */
if (client_id == SOC21_IH_CLIENTID_VMC ||
((client_id == SOC21_IH_CLIENTID_GFX) &&
(source_id == UTCL2_1_0__SRCID__FAULT))) {
info.vmid = vmid;
info.mc_id = client_id;
info.page_addr = ih_ring_entry[4] |
(uint64_t)(ih_ring_entry[5] & 0xf) << 32;
info.prot_valid = ring_id & 0x08;
info.prot_read = ring_id & 0x10;
info.prot_write = ring_id & 0x20;
memset(&exception_data, 0, sizeof(exception_data));
exception_data.gpu_id = dev->id;
exception_data.va = (info.page_addr) << PAGE_SHIFT;
exception_data.failure.NotPresent = info.prot_valid ? 1 : 0;
exception_data.failure.NoExecute = info.prot_exec ? 1 : 0;
exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
exception_data.failure.imprecise = 0;
kfd_set_dbg_ev_from_interrupt(dev, pasid, -1,
KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
&exception_data, sizeof(exception_data));
kfd_smi_event_update_vmfault(dev, pasid);
/* GRBM, SDMA, SE, PMM */
} else if (client_id == SOC21_IH_CLIENTID_GRBM_CP ||
client_id == SOC21_IH_CLIENTID_GFX) {
/* CP */
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
NULL, 0);
/* SDMA */
else if (source_id == SOC21_INTSRC_SDMA_TRAP)
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
else if (source_id == SOC21_INTSRC_SDMA_ECC) {
event_interrupt_poison_consumption_v11(dev, pasid, source_id);
return;
}
/* SQ */
else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
sq_int_enc = REG_GET_FIELD(context_id1,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (sq_int_enc) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
print_sq_intr_info_auto(context_id0, context_id1);
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
NULL, 0)))
return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
sq_int_errtype = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE);
if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
event_interrupt_poison_consumption_v11(
dev, pasid, source_id);
return;
}
break;
default:
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
}
} else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
kfd_process_close_interrupt_drain(pasid);
}
}
const struct kfd_event_interrupt_class event_interrupt_class_v11 = {
.interrupt_isr = event_interrupt_isr_v11,
.interrupt_wq = event_interrupt_wq_v11,
};
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_kernel_queue.h"
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_ai.h"
#include "kfd_pm4_headers_aldebaran.h"
#include "kfd_pm4_opcodes.h"
#include "gc/gc_10_1_0_sh_mask.h"
static int pm_map_process_v9(struct packet_manager *pm,
uint32_t *buffer, struct qcm_process_device *qpd)
{
struct pm4_mes_map_process *packet;
uint64_t vm_page_table_base_addr = qpd->page_table_base;
struct kfd_node *kfd = pm->dqm->dev;
struct kfd_process_device *pdd =
container_of(qpd, struct kfd_process_device, qpd);
packet = (struct pm4_mes_map_process *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled &&
pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) {
packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid;
packet->bitfields2.new_debug = 1;
}
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
if (qpd->tba_addr) {
packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
/* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is
* not defined, so setting it won't do any harm.
*/
packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8)
| 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT;
packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
}
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
packet->vm_context_page_table_base_addr_lo32 =
lower_32_bits(vm_page_table_base_addr);
packet->vm_context_page_table_base_addr_hi32 =
upper_32_bits(vm_page_table_base_addr);
return 0;
}
static int pm_map_process_aldebaran(struct packet_manager *pm,
uint32_t *buffer, struct qcm_process_device *qpd)
{
struct pm4_mes_map_process_aldebaran *packet;
uint64_t vm_page_table_base_addr = qpd->page_table_base;
struct kfd_dev *kfd = pm->dqm->dev->kfd;
struct kfd_process_device *pdd =
container_of(qpd, struct kfd_process_device, qpd);
int i;
packet = (struct pm4_mes_map_process_aldebaran *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process_aldebaran));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override |
pdd->spi_dbg_launch_mode;
if (pdd->process->debug_trap_enabled) {
for (i = 0; i < kfd->device_info.num_of_watch_points; i++)
packet->tcp_watch_cntl[i] = pdd->watch_points[i];
packet->bitfields2.single_memops =
!!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP);
}
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
if (qpd->tba_addr) {
packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
}
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
packet->vm_context_page_table_base_addr_lo32 =
lower_32_bits(vm_page_table_base_addr);
packet->vm_context_page_table_base_addr_hi32 =
upper_32_bits(vm_page_table_base_addr);
return 0;
}
static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
uint64_t ib, size_t ib_size_in_dwords, bool chain)
{
struct pm4_mes_runlist *packet;
int concurrent_proc_cnt = 0;
struct kfd_node *kfd = pm->dqm->dev;
/* Determine the number of processes to map together to HW:
* it can not exceed the number of VMIDs available to the
* scheduler, and it is determined by the smaller of the number
* of processes in the runlist and kfd module parameter
* hws_max_conc_proc.
* Note: the arbitration between the number of VMIDs and
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
concurrent_proc_cnt = min(pm->dqm->processes_count,
kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_runlist));
packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
sizeof(struct pm4_mes_runlist));
packet->bitfields4.ib_size = ib_size_in_dwords;
packet->bitfields4.chain = chain ? 1 : 0;
packet->bitfields4.offload_polling = 0;
packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0;
packet->bitfields4.valid = 1;
packet->bitfields4.process_cnt = concurrent_proc_cnt;
packet->ordinal2 = lower_32_bits(ib);
packet->ib_base_hi = upper_32_bits(ib);
return 0;
}
static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
struct scheduling_resources *res)
{
struct pm4_mes_set_resources *packet;
packet = (struct pm4_mes_set_resources *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
sizeof(struct pm4_mes_set_resources));
packet->bitfields2.queue_type =
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
packet->gws_mask_lo = lower_32_bits(res->gws_mask);
packet->gws_mask_hi = upper_32_bits(res->gws_mask);
packet->queue_mask_lo = lower_32_bits(res->queue_mask);
packet->queue_mask_hi = upper_32_bits(res->queue_mask);
return 0;
}
static inline bool pm_use_ext_eng(struct kfd_dev *dev)
{
return dev->adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 2, 0);
}
static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
struct pm4_mes_map_queues *packet;
bool use_static = is_static;
packet = (struct pm4_mes_map_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
sizeof(struct pm4_mes_map_queues));
packet->bitfields2.num_queues = 1;
packet->bitfields2.queue_sel =
queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =
queue_type__mes_map_queues__normal_compute_vi;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_COMPUTE:
if (use_static)
packet->bitfields2.queue_type =
queue_type__mes_map_queues__normal_latency_static_queue_vi;
break;
case KFD_QUEUE_TYPE_DIQ:
packet->bitfields2.queue_type =
queue_type__mes_map_queues__debug_interface_queue_vi;
break;
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
use_static = false; /* no static queues under SDMA */
if (q->properties.sdma_engine_id < 2 &&
!pm_use_ext_eng(q->device->kfd))
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
else {
/*
* For GFX9.4.3, SDMA engine id can be greater than 8.
* For such cases, set extended_engine_sel to 2 and
* ensure engine_sel lies between 0-7.
*/
if (q->properties.sdma_engine_id >= 8)
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__sdma8_to_15_sel;
else
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
packet->bitfields2.engine_sel = q->properties.sdma_engine_id % 8;
}
break;
default:
WARN(1, "queue type %d", q->properties.type);
return -EINVAL;
}
packet->bitfields3.doorbell_offset =
q->properties.doorbell_off;
packet->mqd_addr_lo =
lower_32_bits(q->gart_mqd_addr);
packet->mqd_addr_hi =
upper_32_bits(q->gart_mqd_addr);
packet->wptr_addr_lo =
lower_32_bits((uint64_t)q->properties.write_ptr);
packet->wptr_addr_hi =
upper_32_bits((uint64_t)q->properties.write_ptr);
return 0;
}
static int pm_set_grace_period_v9(struct packet_manager *pm,
uint32_t *buffer,
uint32_t grace_period)
{
struct pm4_mec_write_data_mmio *packet;
uint32_t reg_offset = 0;
uint32_t reg_data = 0;
pm->dqm->dev->kfd2kgd->build_grace_period_packet_info(
pm->dqm->dev->adev,
pm->dqm->wait_times,
grace_period,
®_offset,
®_data);
if (grace_period == USE_DEFAULT_GRACE_PERIOD)
reg_data = pm->dqm->wait_times;
packet = (struct pm4_mec_write_data_mmio *)buffer;
memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio));
packet->header.u32All = pm_build_pm4_header(IT_WRITE_DATA,
sizeof(struct pm4_mec_write_data_mmio));
packet->bitfields2.dst_sel = dst_sel___write_data__mem_mapped_register;
packet->bitfields2.addr_incr =
addr_incr___write_data__do_not_increment_address;
packet->bitfields3.dst_mmreg_addr = reg_offset;
packet->data = reg_data;
return 0;
}
static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset)
{
struct pm4_mes_unmap_queues *packet;
packet = (struct pm4_mes_unmap_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues));
packet->bitfields2.extended_engine_sel =
pm_use_ext_eng(pm->dqm->dev->kfd) ?
extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel :
extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__compute;
if (reset)
packet->bitfields2.action =
action__mes_unmap_queues__reset_queues;
else
packet->bitfields2.action =
action__mes_unmap_queues__preempt_queues;
switch (filter) {
case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
packet->bitfields3a.pasid = filter_param;
break;
case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_queues;
break;
case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
WARN(1, "filter %d", filter);
return -EINVAL;
}
return 0;
}
static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint64_t fence_value)
{
struct pm4_mes_query_status *packet;
packet = (struct pm4_mes_query_status *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_query_status));
packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
sizeof(struct pm4_mes_query_status));
packet->bitfields2.context_id = 0;
packet->bitfields2.interrupt_sel =
interrupt_sel__mes_query_status__completion_status;
packet->bitfields2.command =
command__mes_query_status__fence_only_after_write_ack;
packet->addr_hi = upper_32_bits((uint64_t)fence_address);
packet->addr_lo = lower_32_bits((uint64_t)fence_address);
packet->data_hi = upper_32_bits((uint64_t)fence_value);
packet->data_lo = lower_32_bits((uint64_t)fence_value);
return 0;
}
const struct packet_manager_funcs kfd_v9_pm_funcs = {
.map_process = pm_map_process_v9,
.runlist = pm_runlist_v9,
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
.set_grace_period = pm_set_grace_period_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
.runlist_size = sizeof(struct pm4_mes_runlist),
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
.set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.map_process = pm_map_process_aldebaran,
.runlist = pm_runlist_v9,
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
.set_grace_period = pm_set_grace_period_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process_aldebaran),
.runlist_size = sizeof(struct pm4_mes_runlist),
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
.set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c |
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfd_debug.h"
#include "kfd_device_queue_manager.h"
#include "kfd_topology.h"
#include <linux/file.h>
#include <uapi/linux/kfd_ioctl.h>
#define MAX_WATCH_ADDRESSES 4
int kfd_dbg_ev_query_debug_event(struct kfd_process *process,
unsigned int *queue_id,
unsigned int *gpu_id,
uint64_t exception_clear_mask,
uint64_t *event_status)
{
struct process_queue_manager *pqm;
struct process_queue_node *pqn;
int i;
if (!(process && process->debug_trap_enabled))
return -ENODATA;
mutex_lock(&process->event_mutex);
*event_status = 0;
*queue_id = 0;
*gpu_id = 0;
/* find and report queue events */
pqm = &process->pqm;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
uint64_t tmp = process->exception_enable_mask;
if (!pqn->q)
continue;
tmp &= pqn->q->properties.exception_status;
if (!tmp)
continue;
*event_status = pqn->q->properties.exception_status;
*queue_id = pqn->q->properties.queue_id;
*gpu_id = pqn->q->device->id;
pqn->q->properties.exception_status &= ~exception_clear_mask;
goto out;
}
/* find and report device events */
for (i = 0; i < process->n_pdds; i++) {
struct kfd_process_device *pdd = process->pdds[i];
uint64_t tmp = process->exception_enable_mask
& pdd->exception_status;
if (!tmp)
continue;
*event_status = pdd->exception_status;
*gpu_id = pdd->dev->id;
pdd->exception_status &= ~exception_clear_mask;
goto out;
}
/* report process events */
if (process->exception_enable_mask & process->exception_status) {
*event_status = process->exception_status;
process->exception_status &= ~exception_clear_mask;
}
out:
mutex_unlock(&process->event_mutex);
return *event_status ? 0 : -EAGAIN;
}
void debug_event_write_work_handler(struct work_struct *work)
{
struct kfd_process *process;
static const char write_data = '.';
loff_t pos = 0;
process = container_of(work,
struct kfd_process,
debug_event_workarea);
kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
}
/* update process/device/queue exception status, write to descriptor
* only if exception_status is enabled.
*/
bool kfd_dbg_ev_raise(uint64_t event_mask,
struct kfd_process *process, struct kfd_node *dev,
unsigned int source_id, bool use_worker,
void *exception_data, size_t exception_data_size)
{
struct process_queue_manager *pqm;
struct process_queue_node *pqn;
int i;
static const char write_data = '.';
loff_t pos = 0;
bool is_subscribed = true;
if (!(process && process->debug_trap_enabled))
return false;
mutex_lock(&process->event_mutex);
if (event_mask & KFD_EC_MASK_DEVICE) {
for (i = 0; i < process->n_pdds; i++) {
struct kfd_process_device *pdd = process->pdds[i];
if (pdd->dev != dev)
continue;
pdd->exception_status |= event_mask & KFD_EC_MASK_DEVICE;
if (event_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
if (!pdd->vm_fault_exc_data) {
pdd->vm_fault_exc_data = kmemdup(
exception_data,
exception_data_size,
GFP_KERNEL);
if (!pdd->vm_fault_exc_data)
pr_debug("Failed to allocate exception data memory");
} else {
pr_debug("Debugger exception data not saved\n");
print_hex_dump_bytes("exception data: ",
DUMP_PREFIX_OFFSET,
exception_data,
exception_data_size);
}
}
break;
}
} else if (event_mask & KFD_EC_MASK_PROCESS) {
process->exception_status |= event_mask & KFD_EC_MASK_PROCESS;
} else {
pqm = &process->pqm;
list_for_each_entry(pqn, &pqm->queues,
process_queue_list) {
int target_id;
if (!pqn->q)
continue;
target_id = event_mask & KFD_EC_MASK(EC_QUEUE_NEW) ?
pqn->q->properties.queue_id :
pqn->q->doorbell_id;
if (pqn->q->device != dev || target_id != source_id)
continue;
pqn->q->properties.exception_status |= event_mask;
break;
}
}
if (process->exception_enable_mask & event_mask) {
if (use_worker)
schedule_work(&process->debug_event_workarea);
else
kernel_write(process->dbg_ev_file,
&write_data,
1,
&pos);
} else {
is_subscribed = false;
}
mutex_unlock(&process->event_mutex);
return is_subscribed;
}
/* set pending event queue entry from ring entry */
bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
unsigned int pasid,
uint32_t doorbell_id,
uint64_t trap_mask,
void *exception_data,
size_t exception_data_size)
{
struct kfd_process *p;
bool signaled_to_debugger_or_runtime = false;
p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return false;
if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true,
exception_data, exception_data_size)) {
struct process_queue_manager *pqm;
struct process_queue_node *pqn;
if (!!(trap_mask & KFD_EC_MASK_QUEUE) &&
p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) {
mutex_lock(&p->mutex);
pqm = &p->pqm;
list_for_each_entry(pqn, &pqm->queues,
process_queue_list) {
if (!(pqn->q && pqn->q->device == dev &&
pqn->q->doorbell_id == doorbell_id))
continue;
kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id,
trap_mask);
signaled_to_debugger_or_runtime = true;
break;
}
mutex_unlock(&p->mutex);
} else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
kfd_dqm_evict_pasid(dev->dqm, p->pasid);
kfd_signal_vm_fault_event(dev, p->pasid, NULL,
exception_data);
signaled_to_debugger_or_runtime = true;
}
} else {
signaled_to_debugger_or_runtime = true;
}
kfd_unref_process(p);
return signaled_to_debugger_or_runtime;
}
int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
unsigned int dev_id,
unsigned int queue_id,
uint64_t error_reason)
{
if (error_reason & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
struct kfd_process_device *pdd = NULL;
struct kfd_hsa_memory_exception_data *data;
int i;
for (i = 0; i < p->n_pdds; i++) {
if (p->pdds[i]->dev->id == dev_id) {
pdd = p->pdds[i];
break;
}
}
if (!pdd)
return -ENODEV;
data = (struct kfd_hsa_memory_exception_data *)
pdd->vm_fault_exc_data;
kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid);
kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data);
error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION);
}
if (error_reason & (KFD_EC_MASK(EC_PROCESS_RUNTIME))) {
/*
* block should only happen after the debugger receives runtime
* enable notice.
*/
up(&p->runtime_enable_sema);
error_reason &= ~KFD_EC_MASK(EC_PROCESS_RUNTIME);
}
if (error_reason)
return kfd_send_exception_to_runtime(p, queue_id, error_reason);
return 0;
}
static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
{
struct mqd_update_info minfo = {0};
int err;
if (!q)
return 0;
if (!kfd_dbg_has_cwsr_workaround(q->device))
return 0;
if (enable && q->properties.is_user_cu_masked)
return -EBUSY;
minfo.update_flag = enable ? UPDATE_FLAG_DBG_WA_ENABLE : UPDATE_FLAG_DBG_WA_DISABLE;
q->properties.is_dbg_wa = enable;
err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo);
if (err)
q->properties.is_dbg_wa = false;
return err;
}
static int kfd_dbg_set_workaround(struct kfd_process *target, bool enable)
{
struct process_queue_manager *pqm = &target->pqm;
struct process_queue_node *pqn;
int r = 0;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
r = kfd_dbg_set_queue_workaround(pqn->q, enable);
if (enable && r)
goto unwind;
}
return 0;
unwind:
list_for_each_entry(pqn, &pqm->queues, process_queue_list)
kfd_dbg_set_queue_workaround(pqn->q, false);
if (enable)
target->runtime_info.runtime_state = r == -EBUSY ?
DEBUG_RUNTIME_STATE_ENABLED_BUSY :
DEBUG_RUNTIME_STATE_ENABLED_ERROR;
return r;
}
int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
{
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags;
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0;
return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
pdd->watch_points, flags, sq_trap_en);
}
#define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1
static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_id)
{
int i;
*watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID;
spin_lock(&pdd->dev->kfd->watch_points_lock);
for (i = 0; i < MAX_WATCH_ADDRESSES; i++) {
/* device watchpoint in use so skip */
if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1)
continue;
pdd->alloc_watch_ids |= 0x1 << i;
pdd->dev->kfd->alloc_watch_ids |= 0x1 << i;
*watch_id = i;
spin_unlock(&pdd->dev->kfd->watch_points_lock);
return 0;
}
spin_unlock(&pdd->dev->kfd->watch_points_lock);
return -ENOMEM;
}
static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
{
spin_lock(&pdd->dev->kfd->watch_points_lock);
/* process owns device watch point so safe to clear */
if ((pdd->alloc_watch_ids >> watch_id) & 0x1) {
pdd->alloc_watch_ids &= ~(0x1 << watch_id);
pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id);
}
spin_unlock(&pdd->dev->kfd->watch_points_lock);
}
static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
{
bool owns_watch_id = false;
spin_lock(&pdd->dev->kfd->watch_points_lock);
owns_watch_id = watch_id < MAX_WATCH_ADDRESSES &&
((pdd->alloc_watch_ids >> watch_id) & 0x1);
spin_unlock(&pdd->dev->kfd->watch_points_lock);
return owns_watch_id;
}
int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
uint32_t watch_id)
{
int r;
if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id))
return -EINVAL;
if (!pdd->dev->kfd->shared_resources.enable_mes) {
r = debug_lock_and_unmap(pdd->dev->dqm);
if (r)
return r;
}
amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch(
pdd->dev->adev,
watch_id);
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
if (!pdd->dev->kfd->shared_resources.enable_mes)
r = debug_map_and_unlock(pdd->dev->dqm);
else
r = kfd_dbg_set_mes_debug_mode(pdd, true);
kfd_dbg_clear_dev_watch_id(pdd, watch_id);
return r;
}
int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
uint64_t watch_address,
uint32_t watch_address_mask,
uint32_t *watch_id,
uint32_t watch_mode)
{
int xcc_id, r = kfd_dbg_get_dev_watch_id(pdd, watch_id);
uint32_t xcc_mask = pdd->dev->xcc_mask;
if (r)
return r;
if (!pdd->dev->kfd->shared_resources.enable_mes) {
r = debug_lock_and_unmap(pdd->dev->dqm);
if (r) {
kfd_dbg_clear_dev_watch_id(pdd, *watch_id);
return r;
}
}
amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
for_each_inst(xcc_id, xcc_mask)
pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch(
pdd->dev->adev,
watch_address,
watch_address_mask,
*watch_id,
watch_mode,
pdd->dev->vm_info.last_vmid_kfd,
xcc_id);
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
if (!pdd->dev->kfd->shared_resources.enable_mes)
r = debug_map_and_unlock(pdd->dev->dqm);
else
r = kfd_dbg_set_mes_debug_mode(pdd, true);
/* HWS is broken so no point in HW rollback but release the watchpoint anyways */
if (r)
kfd_dbg_clear_dev_watch_id(pdd, *watch_id);
return 0;
}
static void kfd_dbg_clear_process_address_watch(struct kfd_process *target)
{
int i, j;
for (i = 0; i < target->n_pdds; i++)
for (j = 0; j < MAX_WATCH_ADDRESSES; j++)
kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j);
}
int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags)
{
uint32_t prev_flags = target->dbg_flags;
int i, r = 0, rewind_count = 0;
for (i = 0; i < target->n_pdds; i++) {
if (!kfd_dbg_is_per_vmid_supported(target->pdds[i]->dev) &&
(*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) {
*flags = prev_flags;
return -EACCES;
}
}
target->dbg_flags = *flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP;
*flags = prev_flags;
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
continue;
if (!pdd->dev->kfd->shared_resources.enable_mes)
r = debug_refresh_runlist(pdd->dev->dqm);
else
r = kfd_dbg_set_mes_debug_mode(pdd, true);
if (r) {
target->dbg_flags = prev_flags;
break;
}
rewind_count++;
}
/* Rewind flags */
if (r) {
target->dbg_flags = prev_flags;
for (i = 0; i < rewind_count; i++) {
struct kfd_process_device *pdd = target->pdds[i];
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
continue;
if (!pdd->dev->kfd->shared_resources.enable_mes)
debug_refresh_runlist(pdd->dev->dqm);
else
kfd_dbg_set_mes_debug_mode(pdd, true);
}
}
return r;
}
/* kfd_dbg_trap_deactivate:
* target: target process
* unwind: If this is unwinding a failed kfd_dbg_trap_enable()
* unwind_count:
* If unwind == true, how far down the pdd list we need
* to unwind
* else: ignored
*/
void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count)
{
int i;
if (!unwind) {
uint32_t flags = 0;
int resume_count = resume_queues(target, 0, NULL);
if (resume_count)
pr_debug("Resumed %d queues\n", resume_count);
cancel_work_sync(&target->debug_event_workarea);
kfd_dbg_clear_process_address_watch(target);
kfd_dbg_trap_set_wave_launch_mode(target, 0);
kfd_dbg_trap_set_flags(target, &flags);
}
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
/* If this is an unwind, and we have unwound the required
* enable calls on the pdd list, we need to stop now
* otherwise we may mess up another debugger session.
*/
if (unwind && i == unwind_count)
break;
kfd_process_set_trap_debug_flag(&pdd->qpd, false);
/* GFX off is already disabled by debug activate if not RLC restore supported. */
if (kfd_dbg_is_rlc_restore_supported(pdd->dev))
amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
pdd->spi_dbg_override =
pdd->dev->kfd2kgd->disable_debug_trap(
pdd->dev->adev,
target->runtime_info.ttmp_setup,
pdd->dev->vm_info.last_vmid_kfd);
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
if (!kfd_dbg_is_per_vmid_supported(pdd->dev) &&
release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd))
pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id);
if (!pdd->dev->kfd->shared_resources.enable_mes)
debug_refresh_runlist(pdd->dev->dqm);
else
kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
}
kfd_dbg_set_workaround(target, false);
}
static void kfd_dbg_clean_exception_status(struct kfd_process *target)
{
struct process_queue_manager *pqm;
struct process_queue_node *pqn;
int i;
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
kfd_process_drain_interrupts(pdd);
pdd->exception_status = 0;
}
pqm = &target->pqm;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if (!pqn->q)
continue;
pqn->q->properties.exception_status = 0;
}
target->exception_status = 0;
}
int kfd_dbg_trap_disable(struct kfd_process *target)
{
if (!target->debug_trap_enabled)
return 0;
/*
* Defer deactivation to runtime if runtime not enabled otherwise reset
* attached running target runtime state to enable for re-attach.
*/
if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED)
kfd_dbg_trap_deactivate(target, false, 0);
else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
fput(target->dbg_ev_file);
target->dbg_ev_file = NULL;
if (target->debugger_process) {
atomic_dec(&target->debugger_process->debugged_process_count);
target->debugger_process = NULL;
}
target->debug_trap_enabled = false;
kfd_dbg_clean_exception_status(target);
kfd_unref_process(target);
return 0;
}
int kfd_dbg_trap_activate(struct kfd_process *target)
{
int i, r = 0;
r = kfd_dbg_set_workaround(target, true);
if (r)
return r;
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) {
r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd);
if (r) {
target->runtime_info.runtime_state = (r == -EBUSY) ?
DEBUG_RUNTIME_STATE_ENABLED_BUSY :
DEBUG_RUNTIME_STATE_ENABLED_ERROR;
goto unwind_err;
}
}
/* Disable GFX OFF to prevent garbage read/writes to debug registers.
* If RLC restore of debug registers is not supported and runtime enable
* hasn't done so already on ttmp setup request, restore the trap config registers.
*
* If RLC restore of debug registers is not supported, keep gfx off disabled for
* the debug session.
*/
amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
if (!(kfd_dbg_is_rlc_restore_supported(pdd->dev) ||
target->runtime_info.ttmp_setup))
pdd->dev->kfd2kgd->enable_debug_trap(pdd->dev->adev, true,
pdd->dev->vm_info.last_vmid_kfd);
pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
pdd->dev->adev,
false,
pdd->dev->vm_info.last_vmid_kfd);
if (kfd_dbg_is_rlc_restore_supported(pdd->dev))
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
/*
* Setting the debug flag in the trap handler requires that the TMA has been
* allocated, which occurs during CWSR initialization.
* In the event that CWSR has not been initialized at this point, setting the
* flag will be called again during CWSR initialization if the target process
* is still debug enabled.
*/
kfd_process_set_trap_debug_flag(&pdd->qpd, true);
if (!pdd->dev->kfd->shared_resources.enable_mes)
r = debug_refresh_runlist(pdd->dev->dqm);
else
r = kfd_dbg_set_mes_debug_mode(pdd, true);
if (r) {
target->runtime_info.runtime_state =
DEBUG_RUNTIME_STATE_ENABLED_ERROR;
goto unwind_err;
}
}
return 0;
unwind_err:
/* Enabling debug failed, we need to disable on
* all GPUs so the enable is all or nothing.
*/
kfd_dbg_trap_deactivate(target, true, i);
return r;
}
int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
void __user *runtime_info, uint32_t *runtime_size)
{
struct file *f;
uint32_t copy_size;
int i, r = 0;
if (target->debug_trap_enabled)
return -EALREADY;
/* Enable pre-checks */
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
if (!KFD_IS_SOC15(pdd->dev))
return -ENODEV;
if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) ||
kfd_dbg_has_cwsr_workaround(pdd->dev)))
return -EBUSY;
}
copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info));
f = fget(fd);
if (!f) {
pr_err("Failed to get file for (%i)\n", fd);
return -EBADF;
}
target->dbg_ev_file = f;
/* defer activation to runtime if not runtime enabled */
if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED)
kfd_dbg_trap_activate(target);
/* We already hold the process reference but hold another one for the
* debug session.
*/
kref_get(&target->ref);
target->debug_trap_enabled = true;
if (target->debugger_process)
atomic_inc(&target->debugger_process->debugged_process_count);
if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) {
kfd_dbg_trap_deactivate(target, false, 0);
r = -EFAULT;
}
*runtime_size = sizeof(target->runtime_info);
return r;
}
static int kfd_dbg_validate_trap_override_request(struct kfd_process *p,
uint32_t trap_override,
uint32_t trap_mask_request,
uint32_t *trap_mask_supported)
{
int i = 0;
*trap_mask_supported = 0xffffffff;
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
int err = pdd->dev->kfd2kgd->validate_trap_override_request(
pdd->dev->adev,
trap_override,
trap_mask_supported);
if (err)
return err;
}
if (trap_mask_request & ~*trap_mask_supported)
return -EACCES;
return 0;
}
int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target,
uint32_t trap_override,
uint32_t trap_mask_bits,
uint32_t trap_mask_request,
uint32_t *trap_mask_prev,
uint32_t *trap_mask_supported)
{
int r = 0, i;
r = kfd_dbg_validate_trap_override_request(target,
trap_override,
trap_mask_request,
trap_mask_supported);
if (r)
return r;
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
pdd->spi_dbg_override = pdd->dev->kfd2kgd->set_wave_launch_trap_override(
pdd->dev->adev,
pdd->dev->vm_info.last_vmid_kfd,
trap_override,
trap_mask_bits,
trap_mask_request,
trap_mask_prev,
pdd->spi_dbg_override);
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
if (!pdd->dev->kfd->shared_resources.enable_mes)
r = debug_refresh_runlist(pdd->dev->dqm);
else
r = kfd_dbg_set_mes_debug_mode(pdd, true);
if (r)
break;
}
return r;
}
int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target,
uint8_t wave_launch_mode)
{
int r = 0, i;
if (wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL &&
wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT &&
wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG)
return -EINVAL;
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
pdd->spi_dbg_launch_mode = pdd->dev->kfd2kgd->set_wave_launch_mode(
pdd->dev->adev,
wave_launch_mode,
pdd->dev->vm_info.last_vmid_kfd);
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
if (!pdd->dev->kfd->shared_resources.enable_mes)
r = debug_refresh_runlist(pdd->dev->dqm);
else
r = kfd_dbg_set_mes_debug_mode(pdd, true);
if (r)
break;
}
return r;
}
int kfd_dbg_trap_query_exception_info(struct kfd_process *target,
uint32_t source_id,
uint32_t exception_code,
bool clear_exception,
void __user *info,
uint32_t *info_size)
{
bool found = false;
int r = 0;
uint32_t copy_size, actual_info_size = 0;
uint64_t *exception_status_ptr = NULL;
if (!target)
return -EINVAL;
if (!info || !info_size)
return -EINVAL;
mutex_lock(&target->event_mutex);
if (KFD_DBG_EC_TYPE_IS_QUEUE(exception_code)) {
/* Per queue exceptions */
struct queue *queue = NULL;
int i;
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
struct qcm_process_device *qpd = &pdd->qpd;
list_for_each_entry(queue, &qpd->queues_list, list) {
if (!found && queue->properties.queue_id == source_id) {
found = true;
break;
}
}
if (found)
break;
}
if (!found) {
r = -EINVAL;
goto out;
}
if (!(queue->properties.exception_status & KFD_EC_MASK(exception_code))) {
r = -ENODATA;
goto out;
}
exception_status_ptr = &queue->properties.exception_status;
} else if (KFD_DBG_EC_TYPE_IS_DEVICE(exception_code)) {
/* Per device exceptions */
struct kfd_process_device *pdd = NULL;
int i;
for (i = 0; i < target->n_pdds; i++) {
pdd = target->pdds[i];
if (pdd->dev->id == source_id) {
found = true;
break;
}
}
if (!found) {
r = -EINVAL;
goto out;
}
if (!(pdd->exception_status & KFD_EC_MASK(exception_code))) {
r = -ENODATA;
goto out;
}
if (exception_code == EC_DEVICE_MEMORY_VIOLATION) {
copy_size = min((size_t)(*info_size), pdd->vm_fault_exc_data_size);
if (copy_to_user(info, pdd->vm_fault_exc_data, copy_size)) {
r = -EFAULT;
goto out;
}
actual_info_size = pdd->vm_fault_exc_data_size;
if (clear_exception) {
kfree(pdd->vm_fault_exc_data);
pdd->vm_fault_exc_data = NULL;
pdd->vm_fault_exc_data_size = 0;
}
}
exception_status_ptr = &pdd->exception_status;
} else if (KFD_DBG_EC_TYPE_IS_PROCESS(exception_code)) {
/* Per process exceptions */
if (!(target->exception_status & KFD_EC_MASK(exception_code))) {
r = -ENODATA;
goto out;
}
if (exception_code == EC_PROCESS_RUNTIME) {
copy_size = min((size_t)(*info_size), sizeof(target->runtime_info));
if (copy_to_user(info, (void *)&target->runtime_info, copy_size)) {
r = -EFAULT;
goto out;
}
actual_info_size = sizeof(target->runtime_info);
}
exception_status_ptr = &target->exception_status;
} else {
pr_debug("Bad exception type [%i]\n", exception_code);
r = -EINVAL;
goto out;
}
*info_size = actual_info_size;
if (clear_exception)
*exception_status_ptr &= ~KFD_EC_MASK(exception_code);
out:
mutex_unlock(&target->event_mutex);
return r;
}
int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
uint64_t exception_clear_mask,
void __user *user_info,
uint32_t *number_of_device_infos,
uint32_t *entry_size)
{
struct kfd_dbg_device_info_entry device_info;
uint32_t tmp_entry_size = *entry_size, tmp_num_devices;
int i, r = 0;
if (!(target && user_info && number_of_device_infos && entry_size))
return -EINVAL;
tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds);
*number_of_device_infos = target->n_pdds;
*entry_size = min_t(size_t, *entry_size, sizeof(device_info));
if (!tmp_num_devices)
return 0;
memset(&device_info, 0, sizeof(device_info));
mutex_lock(&target->event_mutex);
/* Run over all pdd of the process */
for (i = 0; i < tmp_num_devices; i++) {
struct kfd_process_device *pdd = target->pdds[i];
struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id);
device_info.gpu_id = pdd->dev->id;
device_info.exception_status = pdd->exception_status;
device_info.lds_base = pdd->lds_base;
device_info.lds_limit = pdd->lds_limit;
device_info.scratch_base = pdd->scratch_base;
device_info.scratch_limit = pdd->scratch_limit;
device_info.gpuvm_base = pdd->gpuvm_base;
device_info.gpuvm_limit = pdd->gpuvm_limit;
device_info.location_id = topo_dev->node_props.location_id;
device_info.vendor_id = topo_dev->node_props.vendor_id;
device_info.device_id = topo_dev->node_props.device_id;
device_info.revision_id = pdd->dev->adev->pdev->revision;
device_info.subsystem_vendor_id = pdd->dev->adev->pdev->subsystem_vendor;
device_info.subsystem_device_id = pdd->dev->adev->pdev->subsystem_device;
device_info.fw_version = pdd->dev->kfd->mec_fw_version;
device_info.gfx_target_version =
topo_dev->node_props.gfx_target_version;
device_info.simd_count = topo_dev->node_props.simd_count;
device_info.max_waves_per_simd =
topo_dev->node_props.max_waves_per_simd;
device_info.array_count = topo_dev->node_props.array_count;
device_info.simd_arrays_per_engine =
topo_dev->node_props.simd_arrays_per_engine;
device_info.num_xcc = NUM_XCC(pdd->dev->xcc_mask);
device_info.capability = topo_dev->node_props.capability;
device_info.debug_prop = topo_dev->node_props.debug_prop;
if (exception_clear_mask)
pdd->exception_status &= ~exception_clear_mask;
if (copy_to_user(user_info, &device_info, *entry_size)) {
r = -EFAULT;
break;
}
user_info += tmp_entry_size;
}
mutex_unlock(&target->event_mutex);
return r;
}
void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target,
uint64_t exception_set_mask)
{
uint64_t found_mask = 0;
struct process_queue_manager *pqm;
struct process_queue_node *pqn;
static const char write_data = '.';
loff_t pos = 0;
int i;
mutex_lock(&target->event_mutex);
found_mask |= target->exception_status;
pqm = &target->pqm;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if (!pqn->q)
continue;
found_mask |= pqn->q->properties.exception_status;
}
for (i = 0; i < target->n_pdds; i++) {
struct kfd_process_device *pdd = target->pdds[i];
found_mask |= pdd->exception_status;
}
if (exception_set_mask & found_mask)
kernel_write(target->dbg_ev_file, &write_data, 1, &pos);
target->exception_enable_mask = exception_set_mask;
mutex_unlock(&target->event_mutex);
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_debug.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/bsearch.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_headers_aldebaran.h"
#include "cwsr_trap_handler.h"
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
#include "kfd_svm.h"
#include "kfd_migrate.h"
#include "amdgpu.h"
#include "amdgpu_xcp.h"
#define MQD_SIZE_ALIGNED 768
/*
* kfd_locked is used to lock the kfd driver during suspend or reset
* once locked, kfd driver will stop any further GPU execution.
* create process (open) will return -EAGAIN.
*/
static int kfd_locked;
#ifdef CONFIG_DRM_AMDGPU_CIK
extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
#endif
extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
extern const struct kfd2kgd_calls arcturus_kfd2kgd;
extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_node *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
{
uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
switch (sdma_version) {
case IP_VERSION(4, 0, 0):/* VEGA10 */
case IP_VERSION(4, 0, 1):/* VEGA12 */
case IP_VERSION(4, 1, 0):/* RAVEN */
case IP_VERSION(4, 1, 1):/* RAVEN */
case IP_VERSION(4, 1, 2):/* RENOIR */
case IP_VERSION(5, 2, 1):/* VANGOGH */
case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
kfd->device_info.num_sdma_queues_per_engine = 2;
break;
case IP_VERSION(4, 2, 0):/* VEGA20 */
case IP_VERSION(4, 2, 2):/* ARCTURUS */
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
case IP_VERSION(4, 4, 2):
case IP_VERSION(5, 0, 0):/* NAVI10 */
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
case IP_VERSION(5, 0, 2):/* NAVI14 */
case IP_VERSION(5, 0, 5):/* NAVI12 */
case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
kfd->device_info.num_sdma_queues_per_engine = 8;
break;
default:
dev_warn(kfd_device,
"Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
sdma_version);
kfd->device_info.num_sdma_queues_per_engine = 8;
}
bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0,
kfd->adev->sdma.num_instances *
kfd->device_info.num_reserved_sdma_queues_per_engine);
break;
default:
break;
}
}
static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
{
uint32_t gc_version = KFD_GC_VERSION(kfd);
switch (gc_version) {
case IP_VERSION(9, 0, 1): /* VEGA10 */
case IP_VERSION(9, 1, 0): /* RAVEN */
case IP_VERSION(9, 2, 1): /* VEGA12 */
case IP_VERSION(9, 2, 2): /* RAVEN */
case IP_VERSION(9, 3, 0): /* RENOIR */
case IP_VERSION(9, 4, 0): /* VEGA20 */
case IP_VERSION(9, 4, 1): /* ARCTURUS */
case IP_VERSION(9, 4, 2): /* ALDEBARAN */
kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
break;
case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
kfd->device_info.event_interrupt_class =
&event_interrupt_class_v9_4_3;
break;
case IP_VERSION(10, 3, 1): /* VANGOGH */
case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 1, 10): /* NAVI10 */
case IP_VERSION(10, 1, 2): /* NAVI12 */
case IP_VERSION(10, 1, 1): /* NAVI14 */
case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
break;
default:
dev_warn(kfd_device, "v9 event interrupt handler is set due to "
"mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
}
}
static void kfd_device_info_init(struct kfd_dev *kfd,
bool vf, uint32_t gfx_target_version)
{
uint32_t gc_version = KFD_GC_VERSION(kfd);
uint32_t asic_type = kfd->adev->asic_type;
kfd->device_info.max_pasid_bits = 16;
kfd->device_info.max_no_of_hqd = 24;
kfd->device_info.num_of_watch_points = 4;
kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
kfd->device_info.gfx_target_version = gfx_target_version;
if (KFD_IS_SOC15(kfd)) {
kfd->device_info.doorbell_size = 8;
kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
kfd->device_info.supports_cwsr = true;
kfd_device_info_set_sdma_info(kfd);
kfd_device_info_set_event_interrupt_class(kfd);
if (gc_version < IP_VERSION(11, 0, 0)) {
/* Navi2x+, Navi1x+ */
if (gc_version == IP_VERSION(10, 3, 6))
kfd->device_info.no_atomic_fw_version = 14;
else if (gc_version == IP_VERSION(10, 3, 7))
kfd->device_info.no_atomic_fw_version = 3;
else if (gc_version >= IP_VERSION(10, 3, 0))
kfd->device_info.no_atomic_fw_version = 92;
else if (gc_version >= IP_VERSION(10, 1, 1))
kfd->device_info.no_atomic_fw_version = 145;
/* Navi1x+ */
if (gc_version >= IP_VERSION(10, 1, 1))
kfd->device_info.needs_pci_atomics = true;
} else if (gc_version < IP_VERSION(12, 0, 0)) {
/*
* PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
* MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
* PCIe atomics support.
*/
kfd->device_info.needs_pci_atomics = true;
kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
}
} else {
kfd->device_info.doorbell_size = 4;
kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
kfd->device_info.num_sdma_queues_per_engine = 2;
if (asic_type != CHIP_KAVERI &&
asic_type != CHIP_HAWAII &&
asic_type != CHIP_TONGA)
kfd->device_info.supports_cwsr = true;
if (asic_type != CHIP_HAWAII && !vf)
kfd->device_info.needs_pci_atomics = true;
}
}
struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
{
struct kfd_dev *kfd = NULL;
const struct kfd2kgd_calls *f2g = NULL;
uint32_t gfx_target_version = 0;
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
gfx_target_version = 70000;
if (!vf)
f2g = &gfx_v7_kfd2kgd;
break;
#endif
case CHIP_CARRIZO:
gfx_target_version = 80001;
if (!vf)
f2g = &gfx_v8_kfd2kgd;
break;
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_HAWAII:
gfx_target_version = 70001;
if (!amdgpu_exp_hw_support)
pr_info(
"KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
);
else if (!vf)
f2g = &gfx_v7_kfd2kgd;
break;
#endif
case CHIP_TONGA:
gfx_target_version = 80002;
if (!vf)
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_FIJI:
case CHIP_POLARIS10:
gfx_target_version = 80003;
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
gfx_target_version = 80003;
if (!vf)
f2g = &gfx_v8_kfd2kgd;
break;
default:
switch (adev->ip_versions[GC_HWIP][0]) {
/* Vega 10 */
case IP_VERSION(9, 0, 1):
gfx_target_version = 90000;
f2g = &gfx_v9_kfd2kgd;
break;
/* Raven */
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
gfx_target_version = 90002;
if (!vf)
f2g = &gfx_v9_kfd2kgd;
break;
/* Vega12 */
case IP_VERSION(9, 2, 1):
gfx_target_version = 90004;
if (!vf)
f2g = &gfx_v9_kfd2kgd;
break;
/* Renoir */
case IP_VERSION(9, 3, 0):
gfx_target_version = 90012;
if (!vf)
f2g = &gfx_v9_kfd2kgd;
break;
/* Vega20 */
case IP_VERSION(9, 4, 0):
gfx_target_version = 90006;
if (!vf)
f2g = &gfx_v9_kfd2kgd;
break;
/* Arcturus */
case IP_VERSION(9, 4, 1):
gfx_target_version = 90008;
f2g = &arcturus_kfd2kgd;
break;
/* Aldebaran */
case IP_VERSION(9, 4, 2):
gfx_target_version = 90010;
f2g = &aldebaran_kfd2kgd;
break;
case IP_VERSION(9, 4, 3):
gfx_target_version = adev->rev_id >= 1 ? 90402
: adev->flags & AMD_IS_APU ? 90400
: 90401;
f2g = &gc_9_4_3_kfd2kgd;
break;
/* Navi10 */
case IP_VERSION(10, 1, 10):
gfx_target_version = 100100;
if (!vf)
f2g = &gfx_v10_kfd2kgd;
break;
/* Navi12 */
case IP_VERSION(10, 1, 2):
gfx_target_version = 100101;
f2g = &gfx_v10_kfd2kgd;
break;
/* Navi14 */
case IP_VERSION(10, 1, 1):
gfx_target_version = 100102;
if (!vf)
f2g = &gfx_v10_kfd2kgd;
break;
/* Cyan Skillfish */
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 1, 4):
gfx_target_version = 100103;
if (!vf)
f2g = &gfx_v10_kfd2kgd;
break;
/* Sienna Cichlid */
case IP_VERSION(10, 3, 0):
gfx_target_version = 100300;
f2g = &gfx_v10_3_kfd2kgd;
break;
/* Navy Flounder */
case IP_VERSION(10, 3, 2):
gfx_target_version = 100301;
f2g = &gfx_v10_3_kfd2kgd;
break;
/* Van Gogh */
case IP_VERSION(10, 3, 1):
gfx_target_version = 100303;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
/* Dimgrey Cavefish */
case IP_VERSION(10, 3, 4):
gfx_target_version = 100302;
f2g = &gfx_v10_3_kfd2kgd;
break;
/* Beige Goby */
case IP_VERSION(10, 3, 5):
gfx_target_version = 100304;
f2g = &gfx_v10_3_kfd2kgd;
break;
/* Yellow Carp */
case IP_VERSION(10, 3, 3):
gfx_target_version = 100305;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 7):
gfx_target_version = 100306;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
case IP_VERSION(11, 0, 0):
gfx_target_version = 110000;
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
gfx_target_version = 110003;
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 2):
gfx_target_version = 110002;
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 3):
if ((adev->pdev->device == 0x7460 &&
adev->pdev->revision == 0x00) ||
(adev->pdev->device == 0x7461 &&
adev->pdev->revision == 0x00))
/* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
gfx_target_version = 110005;
else
/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
default:
break;
}
break;
}
if (!f2g) {
if (adev->ip_versions[GC_HWIP][0])
dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
else
dev_err(kfd_device, "%s %s not supported in kfd\n",
amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
return NULL;
}
kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
if (!kfd)
return NULL;
kfd->adev = adev;
kfd_device_info_init(kfd, vf, gfx_target_version);
kfd->init_complete = false;
kfd->kfd2kgd = f2g;
atomic_set(&kfd->compute_profile, 0);
mutex_init(&kfd->doorbell_mutex);
ida_init(&kfd->doorbell_ida);
return kfd;
}
static void kfd_cwsr_init(struct kfd_dev *kfd)
{
if (cwsr_enable && kfd->device_info.supports_cwsr) {
if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx8_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_arcturus_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx9_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_nv1x_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx10_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
} else {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx11_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
}
kfd->cwsr_enabled = true;
}
}
static int kfd_gws_init(struct kfd_node *node)
{
int ret = 0;
struct kfd_dev *kfd = node->kfd;
uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
return 0;
if (hws_gws_support || (KFD_IS_SOC15(node) &&
((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
&& kfd->mec2_fw_version >= 0x81b3) ||
(KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
&& kfd->mec2_fw_version >= 0x1b3) ||
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
&& kfd->mec2_fw_version >= 0x30) ||
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
&& kfd->mec2_fw_version >= 0x28) ||
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) ||
(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
&& kfd->mec2_fw_version >= 0x6b) ||
(KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
&& mes_rev >= 68))))
ret = amdgpu_amdkfd_alloc_gws(node->adev,
node->adev->gds.gws_size, &node->gws);
return ret;
}
static void kfd_smi_init(struct kfd_node *dev)
{
INIT_LIST_HEAD(&dev->smi_clients);
spin_lock_init(&dev->smi_lock);
}
static int kfd_init_node(struct kfd_node *node)
{
int err = -1;
if (kfd_interrupt_init(node)) {
dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
}
node->dqm = device_queue_manager_init(node);
if (!node->dqm) {
dev_err(kfd_device, "Error initializing queue manager\n");
goto device_queue_manager_error;
}
if (kfd_gws_init(node)) {
dev_err(kfd_device, "Could not allocate %d gws\n",
node->adev->gds.gws_size);
goto gws_error;
}
if (kfd_resume(node))
goto kfd_resume_error;
if (kfd_topology_add_device(node)) {
dev_err(kfd_device, "Error adding device to topology\n");
goto kfd_topology_add_device_error;
}
kfd_smi_init(node);
return 0;
kfd_topology_add_device_error:
kfd_resume_error:
gws_error:
device_queue_manager_uninit(node->dqm);
device_queue_manager_error:
kfd_interrupt_exit(node);
kfd_interrupt_error:
if (node->gws)
amdgpu_amdkfd_free_gws(node->adev, node->gws);
/* Cleanup the node memory here */
kfree(node);
return err;
}
static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
{
struct kfd_node *knode;
unsigned int i;
for (i = 0; i < num_nodes; i++) {
knode = kfd->nodes[i];
device_queue_manager_uninit(knode->dqm);
kfd_interrupt_exit(knode);
kfd_topology_remove_device(knode);
if (knode->gws)
amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
kfree(knode);
kfd->nodes[i] = NULL;
}
}
static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
unsigned int kfd_node_idx)
{
struct amdgpu_device *adev = node->adev;
uint32_t xcc_mask = node->xcc_mask;
uint32_t xcc, mapped_xcc;
/*
* Interrupt bitmap is setup for processing interrupts from
* different XCDs and AIDs.
* Interrupt bitmap is defined as follows:
* 1. Bits 0-15 - correspond to the NodeId field.
* Each bit corresponds to NodeId number. For example, if
* a KFD node has interrupt bitmap set to 0x7, then this
* KFD node will process interrupts with NodeId = 0, 1 and 2
* in the IH cookie.
* 2. Bits 16-31 - unused.
*
* Please note that the kfd_node_idx argument passed to this
* function is not related to NodeId field received in the
* IH cookie.
*
* In CPX mode, a KFD node will process an interrupt if:
* - the Node Id matches the corresponding bit set in
* Bits 0-15.
* - AND VMID reported in the interrupt lies within the
* VMID range of the node.
*/
for_each_inst(xcc, xcc_mask) {
mapped_xcc = GET_INST(GC, xcc);
node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
}
dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx,
node->interrupt_bitmap);
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources)
{
unsigned int size, map_process_packet_size, i;
struct kfd_node *node;
uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
unsigned int max_proc_per_quantum;
int partition_mode;
int xcp_idx;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC1);
kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC2);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
if (kfd->num_nodes == 0) {
dev_err(kfd_device,
"KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
kfd->adev->gfx.num_xcc_per_xcp);
goto out;
}
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
* supported.
*/
kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
if (!kfd->pci_atomic_requested &&
kfd->device_info.needs_pci_atomics &&
(!kfd->device_info.no_atomic_fw_version ||
kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics %d<%d\n",
kfd->adev->pdev->vendor, kfd->adev->pdev->device,
kfd->mec_fw_version,
kfd->device_info.no_atomic_fw_version);
return false;
}
first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
/* For GFX9.4.3, we need special handling for VMIDs depending on
* partition mode.
* In CPX mode, the VMID range needs to be shared between XCDs.
* Additionally, there are 13 VMIDs (3-15) available for KFD. To
* divide them equally, we change starting VMID to 4 and not use
* VMID 3.
* If the VMID range changes for GFX9.4.3, then this code MUST be
* revisited.
*/
if (kfd->adev->xcp_mgr) {
partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
AMDGPU_XCP_FL_LOCKED);
if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
vmid_num_kfd /= 2;
first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
}
}
/* Verify module parameters regarding mapped process number*/
if (hws_max_conc_proc >= 0)
max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
else
max_proc_per_quantum = vmid_num_kfd;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info.mqd_size_aligned;
/*
* calculate max size of runlist packet.
* There can be only 2 packets at once
*/
map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
sizeof(struct pm4_mes_map_process_aldebaran) :
sizeof(struct pm4_mes_map_process);
size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
+ sizeof(struct pm4_mes_runlist)) * 2;
/* Add size of HIQ & DIQ */
size += KFD_KERNEL_QUEUE_SIZE * 2;
/* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
if (amdgpu_amdkfd_alloc_gtt_mem(
kfd->adev, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto alloc_gtt_mem_failure;
}
dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
/* Initialize GTT sa with 512 byte chunk size */
if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
goto kfd_gtt_sa_init_error;
}
if (kfd_doorbell_init(kfd)) {
dev_err(kfd_device,
"Error initializing doorbell aperture\n");
goto kfd_doorbell_error;
}
if (amdgpu_use_xgmi_p2p)
kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
/*
* For GFX9.4.3, the KFD abstracts all partitions within a socket as
* xGMI connected in the topology so assign a unique hive id per
* device based on the pci device location if device is in PCIe mode.
*/
if (!kfd->hive_id && (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) && kfd->num_nodes > 1)
kfd->hive_id = pci_dev_id(kfd->adev->pdev);
kfd->noretry = kfd->adev->gmc.noretry;
kfd_cwsr_init(kfd);
dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
kfd->num_nodes);
/* Allocate the KFD nodes */
for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
if (!node)
goto node_alloc_error;
node->node_id = i;
node->adev = kfd->adev;
node->kfd = kfd;
node->kfd2kgd = kfd->kfd2kgd;
node->vm_info.vmid_num_kfd = vmid_num_kfd;
node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
/* TODO : Check if error handling is needed */
if (node->xcp) {
amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
&node->xcc_mask);
++xcp_idx;
} else {
node->xcc_mask =
(1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
}
if (node->xcp) {
dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
node->node_id, node->xcp->mem_id,
KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
}
if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
/* For GFX9.4.3 and CPX mode, first XCD gets VMID range
* 4-9 and second XCD gets VMID range 10-15.
*/
node->vm_info.first_vmid_kfd = (i%2 == 0) ?
first_vmid_kfd :
first_vmid_kfd+vmid_num_kfd;
node->vm_info.last_vmid_kfd = (i%2 == 0) ?
last_vmid_kfd-vmid_num_kfd :
last_vmid_kfd;
node->compute_vmid_bitmap =
((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
} else {
node->vm_info.first_vmid_kfd = first_vmid_kfd;
node->vm_info.last_vmid_kfd = last_vmid_kfd;
node->compute_vmid_bitmap =
gpu_resources->compute_vmid_bitmap;
}
node->max_proc_per_quantum = max_proc_per_quantum;
atomic_set(&node->sram_ecc_flag, 0);
amdgpu_amdkfd_get_local_mem_info(kfd->adev,
&node->local_mem_info, node->xcp);
if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3))
kfd_setup_interrupt_bitmap(node, i);
/* Initialize the KFD node */
if (kfd_init_node(node)) {
dev_err(kfd_device, "Error initializing KFD node\n");
goto node_init_error;
}
kfd->nodes[i] = node;
}
svm_range_set_max_pages(kfd->adev);
spin_lock_init(&kfd->watch_points_lock);
kfd->init_complete = true;
dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
kfd->adev->pdev->device);
pr_debug("Starting kfd with the following scheduling policy %d\n",
node->dqm->sched_policy);
goto out;
node_init_error:
node_alloc_error:
kfd_cleanup_nodes(kfd, i);
kfd_doorbell_fini(kfd);
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
alloc_gtt_mem_failure:
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->adev->pdev->vendor, kfd->adev->pdev->device);
out:
return kfd->init_complete;
}
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
/* Cleanup KFD nodes */
kfd_cleanup_nodes(kfd, kfd->num_nodes);
/* Cleanup common/shared resources */
kfd_doorbell_fini(kfd);
ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
}
kfree(kfd);
}
int kgd2kfd_pre_reset(struct kfd_dev *kfd)
{
struct kfd_node *node;
int i;
if (!kfd->init_complete)
return 0;
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
kfd_smi_event_update_gpu_reset(node, false);
node->dqm->ops.pre_reset(node->dqm);
}
kgd2kfd_suspend(kfd, false);
for (i = 0; i < kfd->num_nodes; i++)
kfd_signal_reset_event(kfd->nodes[i]);
return 0;
}
/*
* Fix me. KFD won't be able to resume existing process for now.
* We will keep all existing process in a evicted state and
* wait the process to be terminated.
*/
int kgd2kfd_post_reset(struct kfd_dev *kfd)
{
int ret;
struct kfd_node *node;
int i;
if (!kfd->init_complete)
return 0;
for (i = 0; i < kfd->num_nodes; i++) {
ret = kfd_resume(kfd->nodes[i]);
if (ret)
return ret;
}
mutex_lock(&kfd_processes_mutex);
--kfd_locked;
mutex_unlock(&kfd_processes_mutex);
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
atomic_set(&node->sram_ecc_flag, 0);
kfd_smi_event_update_gpu_reset(node, true);
}
return 0;
}
bool kfd_is_locked(void)
{
lockdep_assert_held(&kfd_processes_mutex);
return (kfd_locked > 0);
}
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
struct kfd_node *node;
int i;
int count;
if (!kfd->init_complete)
return;
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
count = ++kfd_locked;
mutex_unlock(&kfd_processes_mutex);
/* For first KFD device suspend all the KFD processes */
if (count == 1)
kfd_suspend_all_processes();
}
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
node->dqm->ops.stop(node->dqm);
}
}
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
int ret, count, i;
if (!kfd->init_complete)
return 0;
for (i = 0; i < kfd->num_nodes; i++) {
ret = kfd_resume(kfd->nodes[i]);
if (ret)
return ret;
}
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
count = --kfd_locked;
mutex_unlock(&kfd_processes_mutex);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
ret = kfd_resume_all_processes();
}
return ret;
}
static int kfd_resume(struct kfd_node *node)
{
int err = 0;
err = node->dqm->ops.start(node->dqm);
if (err)
dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n",
node->adev->pdev->vendor, node->adev->pdev->device);
return err;
}
static inline void kfd_queue_work(struct workqueue_struct *wq,
struct work_struct *work)
{
int cpu, new_cpu;
cpu = new_cpu = smp_processor_id();
do {
new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
if (cpu_to_node(new_cpu) == numa_node_id())
break;
} while (cpu != new_cpu);
queue_work_on(new_cpu, wq, work);
}
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
bool is_patched = false;
unsigned long flags;
struct kfd_node *node;
if (!kfd->init_complete)
return;
if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
dev_err_once(kfd_device, "Ring entry too small\n");
return;
}
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
spin_lock_irqsave(&node->interrupt_lock, flags);
if (node->interrupts_active
&& interrupt_is_wanted(node, ih_ring_entry,
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(node,
is_patched ? patched_ihre : ih_ring_entry)) {
kfd_queue_work(node->ih_wq, &node->interrupt_work);
spin_unlock_irqrestore(&node->interrupt_lock, flags);
return;
}
spin_unlock_irqrestore(&node->interrupt_lock, flags);
}
}
int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
{
struct kfd_process *p;
int r;
/* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
p = kfd_lookup_process_by_mm(mm);
if (!p)
return -ESRCH;
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
r = kfd_process_evict_queues(p, trigger);
kfd_unref_process(p);
return r;
}
int kgd2kfd_resume_mm(struct mm_struct *mm)
{
struct kfd_process *p;
int r;
/* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
p = kfd_lookup_process_by_mm(mm);
if (!p)
return -ESRCH;
r = kfd_process_restore_queues(p);
kfd_unref_process(p);
return r;
}
/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
* prepare for safe eviction of KFD BOs that belong to the specified
* process.
*
* @mm: mm_struct that identifies the specified KFD process
* @fence: eviction fence attached to KFD process BOs
*
*/
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
struct dma_fence *fence)
{
struct kfd_process *p;
unsigned long active_time;
unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
if (!fence)
return -EINVAL;
if (dma_fence_is_signaled(fence))
return 0;
p = kfd_lookup_process_by_mm(mm);
if (!p)
return -ENODEV;
if (fence->seqno == p->last_eviction_seqno)
goto out;
p->last_eviction_seqno = fence->seqno;
/* Avoid KFD process starvation. Wait for at least
* PROCESS_ACTIVE_TIME_MS before evicting the process again
*/
active_time = get_jiffies_64() - p->last_restore_timestamp;
if (delay_jiffies > active_time)
delay_jiffies -= active_time;
else
delay_jiffies = 0;
/* During process initialization eviction_work.dwork is initialized
* to kfd_evict_bo_worker
*/
WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
p->lead_thread->pid, delay_jiffies);
schedule_delayed_work(&p->eviction_work, delay_jiffies);
out:
kfd_unref_process(p);
return 0;
}
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size)
{
if (WARN_ON(buf_size < chunk_size))
return -EINVAL;
if (WARN_ON(buf_size == 0))
return -EINVAL;
if (WARN_ON(chunk_size == 0))
return -EINVAL;
kfd->gtt_sa_chunk_size = chunk_size;
kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
GFP_KERNEL);
if (!kfd->gtt_sa_bitmap)
return -ENOMEM;
pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
mutex_init(&kfd->gtt_sa_lock);
return 0;
}
static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
{
mutex_destroy(&kfd->gtt_sa_lock);
bitmap_free(kfd->gtt_sa_bitmap);
}
static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
unsigned int bit_num,
unsigned int chunk_size)
{
return start_addr + bit_num * chunk_size;
}
static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
unsigned int bit_num,
unsigned int chunk_size)
{
return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
}
int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
struct kfd_mem_obj **mem_obj)
{
unsigned int found, start_search, cur_size;
struct kfd_dev *kfd = node->kfd;
if (size == 0)
return -EINVAL;
if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
return -ENOMEM;
*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!(*mem_obj))
return -ENOMEM;
pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
start_search = 0;
mutex_lock(&kfd->gtt_sa_lock);
kfd_gtt_restart_search:
/* Find the first chunk that is free */
found = find_next_zero_bit(kfd->gtt_sa_bitmap,
kfd->gtt_sa_num_of_chunks,
start_search);
pr_debug("Found = %d\n", found);
/* If there wasn't any free chunk, bail out */
if (found == kfd->gtt_sa_num_of_chunks)
goto kfd_gtt_no_free_chunk;
/* Update fields of mem_obj */
(*mem_obj)->range_start = found;
(*mem_obj)->range_end = found;
(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
kfd->gtt_start_gpu_addr,
found,
kfd->gtt_sa_chunk_size);
(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
kfd->gtt_start_cpu_ptr,
found,
kfd->gtt_sa_chunk_size);
pr_debug("gpu_addr = %p, cpu_addr = %p\n",
(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
/* If we need only one chunk, mark it as allocated and get out */
if (size <= kfd->gtt_sa_chunk_size) {
pr_debug("Single bit\n");
__set_bit(found, kfd->gtt_sa_bitmap);
goto kfd_gtt_out;
}
/* Otherwise, try to see if we have enough contiguous chunks */
cur_size = size - kfd->gtt_sa_chunk_size;
do {
(*mem_obj)->range_end =
find_next_zero_bit(kfd->gtt_sa_bitmap,
kfd->gtt_sa_num_of_chunks, ++found);
/*
* If next free chunk is not contiguous than we need to
* restart our search from the last free chunk we found (which
* wasn't contiguous to the previous ones
*/
if ((*mem_obj)->range_end != found) {
start_search = found;
goto kfd_gtt_restart_search;
}
/*
* If we reached end of buffer, bail out with error
*/
if (found == kfd->gtt_sa_num_of_chunks)
goto kfd_gtt_no_free_chunk;
/* Check if we don't need another chunk */
if (cur_size <= kfd->gtt_sa_chunk_size)
cur_size = 0;
else
cur_size -= kfd->gtt_sa_chunk_size;
} while (cur_size > 0);
pr_debug("range_start = %d, range_end = %d\n",
(*mem_obj)->range_start, (*mem_obj)->range_end);
/* Mark the chunks as allocated */
bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
(*mem_obj)->range_end - (*mem_obj)->range_start + 1);
kfd_gtt_out:
mutex_unlock(&kfd->gtt_sa_lock);
return 0;
kfd_gtt_no_free_chunk:
pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(*mem_obj);
return -ENOMEM;
}
int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
{
struct kfd_dev *kfd = node->kfd;
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
mem_obj, mem_obj->range_start, mem_obj->range_end);
mutex_lock(&kfd->gtt_sa_lock);
/* Mark the chunks as free */
bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
mem_obj->range_end - mem_obj->range_start + 1);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(mem_obj);
return 0;
}
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
{
/*
* TODO: Currently update SRAM ECC flag for first node.
* This needs to be updated later when we can
* identify SRAM ECC error on other nodes also.
*/
if (kfd)
atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
}
void kfd_inc_compute_active(struct kfd_node *node)
{
if (atomic_inc_return(&node->kfd->compute_profile) == 1)
amdgpu_amdkfd_set_compute_idle(node->adev, false);
}
void kfd_dec_compute_active(struct kfd_node *node)
{
int count = atomic_dec_return(&node->kfd->compute_profile);
if (count == 0)
amdgpu_amdkfd_set_compute_idle(node->adev, true);
WARN_ONCE(count < 0, "Compute profile ref. count error");
}
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
/*
* TODO: For now, raise the throttling event only on first node.
* This will need to change after we are able to determine
* which node raised the throttling event.
*/
if (kfd && kfd->init_complete)
kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
throttle_bitmask);
}
/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
* kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
* When the device has more than two engines, we reserve two for PCIe to enable
* full-duplex and the rest are used as XGMI.
*/
unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
{
/* If XGMI is not supported, all SDMA engines are PCIe */
if (!node->adev->gmc.xgmi.supported)
return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
}
unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
{
/* After reserved for PCIe, the rest of engines are XGMI */
return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
kfd_get_num_sdma_engines(node);
}
int kgd2kfd_check_and_lock_kfd(void)
{
mutex_lock(&kfd_processes_mutex);
if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
mutex_unlock(&kfd_processes_mutex);
return -EBUSY;
}
++kfd_locked;
mutex_unlock(&kfd_processes_mutex);
return 0;
}
void kgd2kfd_unlock_kfd(void)
{
mutex_lock(&kfd_processes_mutex);
--kfd_locked;
mutex_unlock(&kfd_processes_mutex);
}
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
* which will trigger a GPU reset and bring the HWS back to normal state
*/
int kfd_debugfs_hang_hws(struct kfd_node *dev)
{
if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
pr_err("HWS is not enabled");
return -EINVAL;
}
return dqm_debugfs_hang_hws(dev->dqm);
}
#endif
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_device.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfd_priv.h"
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/idr.h>
/*
* This extension supports a kernel level doorbells management for the
* kernel queues using the first doorbell page reserved for the kernel.
*/
/*
* Each device exposes a doorbell aperture, a PCI MMIO aperture that
* receives 32-bit writes that are passed to queues as wptr values.
* The doorbells are intended to be written by applications as part
* of queueing work on user-mode queues.
* We assign doorbells to applications in PAGE_SIZE-sized and aligned chunks.
* We map the doorbell address space into user-mode when a process creates
* its first queue on each device.
* Although the mapping is done by KFD, it is equivalent to an mmap of
* the /dev/kfd with the particular device encoded in the mmap offset.
* There will be other uses for mmap of /dev/kfd, so only a range of
* offsets (KFD_MMAP_DOORBELL_START-END) is used for doorbells.
*/
/* # of doorbell bytes allocated for each process. */
size_t kfd_doorbell_process_slice(struct kfd_dev *kfd)
{
if (!kfd->shared_resources.enable_mes)
return roundup(kfd->device_info.doorbell_size *
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
PAGE_SIZE);
else
return amdgpu_mes_doorbell_process_slice(
(struct amdgpu_device *)kfd->adev);
}
/* Doorbell calculations for device init. */
int kfd_doorbell_init(struct kfd_dev *kfd)
{
int size = PAGE_SIZE;
int r;
/*
* Todo: KFD kernel level operations need only one doorbell for
* ring test/HWS. So instead of reserving a whole page here for
* kernel, reserve and consume a doorbell from existing KGD kernel
* doorbell page.
*/
/* Bitmap to dynamically allocate doorbells from kernel page */
kfd->doorbell_bitmap = bitmap_zalloc(size / sizeof(u32), GFP_KERNEL);
if (!kfd->doorbell_bitmap) {
DRM_ERROR("Failed to allocate kernel doorbell bitmap\n");
return -ENOMEM;
}
/* Alloc a doorbell page for KFD kernel usages */
r = amdgpu_bo_create_kernel(kfd->adev,
size,
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_DOORBELL,
&kfd->doorbells,
NULL,
(void **)&kfd->doorbell_kernel_ptr);
if (r) {
pr_err("failed to allocate kernel doorbells\n");
bitmap_free(kfd->doorbell_bitmap);
return r;
}
pr_debug("Doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
return 0;
}
void kfd_doorbell_fini(struct kfd_dev *kfd)
{
bitmap_free(kfd->doorbell_bitmap);
amdgpu_bo_free_kernel(&kfd->doorbells, NULL,
(void **)&kfd->doorbell_kernel_ptr);
}
int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
struct kfd_process_device *pdd;
/*
* For simplicitly we only allow mapping of the entire doorbell
* allocation of a single device & process.
*/
if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev->kfd))
return -EINVAL;
pdd = kfd_get_process_device_data(dev, process);
if (!pdd)
return -EINVAL;
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(pdd);
if (!address)
return -ENOMEM;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pr_debug("Mapping doorbell page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
(unsigned long long) vma->vm_start, address, vma->vm_flags,
kfd_doorbell_process_slice(dev->kfd));
return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
kfd_doorbell_process_slice(dev->kfd),
vma->vm_page_prot);
}
/* get kernel iomem pointer for a doorbell */
void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
unsigned int *doorbell_off)
{
u32 inx;
mutex_lock(&kfd->doorbell_mutex);
inx = find_first_zero_bit(kfd->doorbell_bitmap, PAGE_SIZE / sizeof(u32));
__set_bit(inx, kfd->doorbell_bitmap);
mutex_unlock(&kfd->doorbell_mutex);
if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
return NULL;
*doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx);
inx *= 2;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
" doorbell index == 0x%x\n",
*doorbell_off, inx);
return kfd->doorbell_kernel_ptr + inx;
}
void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
inx /= 2;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_bitmap);
mutex_unlock(&kfd->doorbell_mutex);
}
void write_kernel_doorbell(void __iomem *db, u32 value)
{
if (db) {
writel(value, db);
pr_debug("Writing %d to doorbell address %p\n", value, db);
}
}
void write_kernel_doorbell64(void __iomem *db, u64 value)
{
if (db) {
WARN(((unsigned long)db & 7) != 0,
"Unaligned 64-bit doorbell");
writeq(value, (u64 __iomem *)db);
pr_debug("writing %llu to doorbell address %p\n", value, db);
}
}
static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_dev *dev)
{
unsigned int i;
int range_start = dev->shared_resources.non_cp_doorbells_start;
int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!KFD_IS_SOC15(dev))
return 0;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
if (i >= range_start && i <= range_end) {
__set_bit(i, qpd->doorbell_bitmap);
__set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
}
}
return 0;
}
phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
{
struct amdgpu_device *adev = pdd->dev->adev;
uint32_t first_db_index;
if (!pdd->qpd.proc_doorbells) {
if (kfd_alloc_process_doorbells(pdd->dev->kfd, pdd))
/* phys_addr_t 0 is error */
return 0;
}
first_db_index = amdgpu_doorbell_index_on_bar(adev, pdd->qpd.proc_doorbells, 0);
return adev->doorbell.base + first_db_index * sizeof(uint32_t);
}
int kfd_alloc_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd)
{
int r;
struct qcm_process_device *qpd = &pdd->qpd;
/* Allocate bitmap for dynamic doorbell allocation */
qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
GFP_KERNEL);
if (!qpd->doorbell_bitmap) {
DRM_ERROR("Failed to allocate process doorbell bitmap\n");
return -ENOMEM;
}
r = init_doorbell_bitmap(&pdd->qpd, kfd);
if (r) {
DRM_ERROR("Failed to initialize process doorbells\n");
r = -ENOMEM;
goto err;
}
/* Allocate doorbells for this process */
r = amdgpu_bo_create_kernel(kfd->adev,
kfd_doorbell_process_slice(kfd),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_DOORBELL,
&qpd->proc_doorbells,
NULL,
NULL);
if (r) {
DRM_ERROR("Failed to allocate process doorbells\n");
goto err;
}
return 0;
err:
bitmap_free(qpd->doorbell_bitmap);
qpd->doorbell_bitmap = NULL;
return r;
}
void kfd_free_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
if (qpd->doorbell_bitmap) {
bitmap_free(qpd->doorbell_bitmap);
qpd->doorbell_bitmap = NULL;
}
amdgpu_bo_free_kernel(&qpd->proc_doorbells, NULL, NULL);
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2018-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#include "v10_structs.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "amdgpu_amdkfd.h"
static inline struct v10_compute_mqd *get_mqd(void *mqd)
{
return (struct v10_compute_mqd *)mqd;
}
static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
{
return (struct v10_sdma_mqd *)mqd;
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct mqd_update_info *minfo)
{
struct v10_compute_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
pr_debug("update cu mask to %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3);
}
static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
{
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
m->cp_hqd_queue_priority = q->priority;
}
static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
&mqd_mem_obj))
return NULL;
return mqd_mem_obj;
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct v10_compute_mqd *m;
m = (struct v10_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memset(m, 0, sizeof(struct v10_compute_mqd));
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
/* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
* DISPATCH_PTR. This is required for the kfd debugger
*/
m->cp_hqd_hq_scheduler0 = 1 << 14;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
}
if (mm->dev->kfd->cwsr_enabled) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
lower_32_bits(q->ctx_save_restore_area_address);
m->cp_hqd_ctx_save_base_addr_hi =
upper_32_bits(q->ctx_save_restore_area_address);
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
}
*mqd = m;
if (gart_addr)
*gart_addr = addr;
mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
int r = 0;
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, 0, mms, 0);
return r;
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct v10_compute_mqd *m;
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_doorbell_control =
q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT;
/*
* HW does not clamp this field correctly. Maximum EOP queue size
* is constrained by per-SE EOP done signal count, which is 8-bit.
* Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
* more than (EOP entry count - 1) so a queue size of 0x800 dwords
* is safe, giving a maximum field value of 0xA.
*/
m->cp_hqd_eop_control = min(0xA,
ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
upper_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_iq_timer = 0;
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
/* GC 10 removed WPP_CLAMP from PQ Control */
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT;
m->cp_hqd_pq_doorbell_control |=
1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
if (mm->dev->kfd->cwsr_enabled)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static uint32_t read_doorbell_id(void *mqd)
{
struct v10_compute_mqd *m = (struct v10_compute_mqd *)mqd;
return m->queue_doorbell_id0;
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v10_compute_mqd *m;
struct kfd_context_save_area_header header;
m = get_mqd(mqd);
/* Control stack is written backwards, while workgroup context data
* is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
* Current position is at m->cp_hqd_cntl_stack_offset and
* m->cp_hqd_wg_state_offset, respectively.
*/
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
/* Control stack is not copied to user mode for GFXv10 because
* it's part of the context save area that is already
* accessible to user mode
*/
header.wave_state.control_stack_size = *ctl_stack_used_size;
header.wave_state.wave_state_size = *save_area_used_size;
header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
return -EFAULT;
return 0;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
{
struct v10_compute_mqd *m;
m = get_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct v10_compute_mqd));
}
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct v10_compute_mqd *m;
m = (struct v10_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
*mqd = m;
if (gart_addr)
*gart_addr = addr;
m->cp_hqd_pq_doorbell_control =
qp->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
qp->is_active = 0;
}
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct v10_compute_mqd *m;
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
m = get_mqd(*mqd);
m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type, unsigned int timeout,
uint32_t pipe_id, uint32_t queue_id)
{
int err;
struct v10_compute_mqd *m;
u32 doorbell_off;
m = get_mqd(mqd);
doorbell_off = m->cp_hqd_pq_doorbell_control >>
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
if (err)
pr_debug("Destroy HIQ MQD failed: %d\n", err);
return err;
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct v10_sdma_mqd *m;
m = (struct v10_sdma_mqd *) mqd_mem_obj->cpu_ptr;
memset(m, 0, sizeof(struct v10_sdma_mqd));
*mqd = m;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
mm->update_mqd(mm, m, q, NULL);
}
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct v10_sdma_mqd *m;
m = get_sdma_mqd(mqd);
m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->sdmax_rlcx_doorbell_offset =
q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static void checkpoint_mqd_sdma(struct mqd_manager *mm,
void *mqd,
void *mqd_dst,
void *ctl_stack_dst)
{
struct v10_sdma_mqd *m;
m = get_sdma_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct v10_sdma_mqd));
}
static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src,
const u32 ctl_stack_size)
{
uint64_t addr;
struct v10_sdma_mqd *m;
m = (struct v10_sdma_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
m->sdmax_rlcx_doorbell_offset =
qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
*mqd = m;
if (gart_addr)
*gart_addr = addr;
qp->is_active = 0;
}
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct v10_compute_mqd), false);
return 0;
}
static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct v10_sdma_mqd), false);
return 0;
}
#endif
struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
struct kfd_node *dev)
{
struct mqd_manager *mqd;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
mqd->dev = dev;
switch (type) {
case KFD_MQD_TYPE_CP:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
mqd->get_wave_state = get_wave_state;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_HIQ:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_hiq_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
mqd->read_doorbell_id = read_doorbell_id;
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_SDMA:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_sdma_mqd;
mqd->init_mqd = init_mqd_sdma;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct v10_sdma_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
pr_debug("%s@%i\n", __func__, __LINE__);
break;
default:
kfree(mqd);
return NULL;
}
return mqd;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/hash.h>
#include <linux/cpufreq.h>
#include <linux/log2.h>
#include <linux/dmi.h>
#include <linux/atomic.h>
#include "kfd_priv.h"
#include "kfd_crat.h"
#include "kfd_topology.h"
#include "kfd_device_queue_manager.h"
#include "kfd_svm.h"
#include "kfd_debug.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_ras.h"
#include "amdgpu.h"
/* topology_device_list - Master list of all topology devices */
static struct list_head topology_device_list;
static struct kfd_system_properties sys_props;
static DECLARE_RWSEM(topology_lock);
static uint32_t topology_crat_proximity_domain;
struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
uint32_t proximity_domain)
{
struct kfd_topology_device *top_dev;
struct kfd_topology_device *device = NULL;
list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->proximity_domain == proximity_domain) {
device = top_dev;
break;
}
return device;
}
struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
uint32_t proximity_domain)
{
struct kfd_topology_device *device = NULL;
down_read(&topology_lock);
device = kfd_topology_device_by_proximity_domain_no_lock(
proximity_domain);
up_read(&topology_lock);
return device;
}
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
{
struct kfd_topology_device *top_dev = NULL;
struct kfd_topology_device *ret = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->gpu_id == gpu_id) {
ret = top_dev;
break;
}
up_read(&topology_lock);
return ret;
}
struct kfd_node *kfd_device_by_id(uint32_t gpu_id)
{
struct kfd_topology_device *top_dev;
top_dev = kfd_topology_device_by_id(gpu_id);
if (!top_dev)
return NULL;
return top_dev->gpu;
}
struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev)
{
struct kfd_topology_device *top_dev;
struct kfd_node *device = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->gpu && top_dev->gpu->adev->pdev == pdev) {
device = top_dev->gpu;
break;
}
up_read(&topology_lock);
return device;
}
/* Called with write topology_lock acquired */
static void kfd_release_topology_device(struct kfd_topology_device *dev)
{
struct kfd_mem_properties *mem;
struct kfd_cache_properties *cache;
struct kfd_iolink_properties *iolink;
struct kfd_iolink_properties *p2plink;
struct kfd_perf_properties *perf;
list_del(&dev->list);
while (dev->mem_props.next != &dev->mem_props) {
mem = container_of(dev->mem_props.next,
struct kfd_mem_properties, list);
list_del(&mem->list);
kfree(mem);
}
while (dev->cache_props.next != &dev->cache_props) {
cache = container_of(dev->cache_props.next,
struct kfd_cache_properties, list);
list_del(&cache->list);
kfree(cache);
}
while (dev->io_link_props.next != &dev->io_link_props) {
iolink = container_of(dev->io_link_props.next,
struct kfd_iolink_properties, list);
list_del(&iolink->list);
kfree(iolink);
}
while (dev->p2p_link_props.next != &dev->p2p_link_props) {
p2plink = container_of(dev->p2p_link_props.next,
struct kfd_iolink_properties, list);
list_del(&p2plink->list);
kfree(p2plink);
}
while (dev->perf_props.next != &dev->perf_props) {
perf = container_of(dev->perf_props.next,
struct kfd_perf_properties, list);
list_del(&perf->list);
kfree(perf);
}
kfree(dev);
}
void kfd_release_topology_device_list(struct list_head *device_list)
{
struct kfd_topology_device *dev;
while (!list_empty(device_list)) {
dev = list_first_entry(device_list,
struct kfd_topology_device, list);
kfd_release_topology_device(dev);
}
}
static void kfd_release_live_view(void)
{
kfd_release_topology_device_list(&topology_device_list);
memset(&sys_props, 0, sizeof(sys_props));
}
struct kfd_topology_device *kfd_create_topology_device(
struct list_head *device_list)
{
struct kfd_topology_device *dev;
dev = kfd_alloc_struct(dev);
if (!dev) {
pr_err("No memory to allocate a topology device");
return NULL;
}
INIT_LIST_HEAD(&dev->mem_props);
INIT_LIST_HEAD(&dev->cache_props);
INIT_LIST_HEAD(&dev->io_link_props);
INIT_LIST_HEAD(&dev->p2p_link_props);
INIT_LIST_HEAD(&dev->perf_props);
list_add_tail(&dev->list, device_list);
return dev;
}
#define sysfs_show_gen_prop(buffer, offs, fmt, ...) \
(offs += snprintf(buffer+offs, PAGE_SIZE-offs, \
fmt, __VA_ARGS__))
#define sysfs_show_32bit_prop(buffer, offs, name, value) \
sysfs_show_gen_prop(buffer, offs, "%s %u\n", name, value)
#define sysfs_show_64bit_prop(buffer, offs, name, value) \
sysfs_show_gen_prop(buffer, offs, "%s %llu\n", name, value)
#define sysfs_show_32bit_val(buffer, offs, value) \
sysfs_show_gen_prop(buffer, offs, "%u\n", value)
#define sysfs_show_str_val(buffer, offs, value) \
sysfs_show_gen_prop(buffer, offs, "%s\n", value)
static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
int offs = 0;
/* Making sure that the buffer is an empty string */
buffer[0] = 0;
if (attr == &sys_props.attr_genid) {
sysfs_show_32bit_val(buffer, offs,
sys_props.generation_count);
} else if (attr == &sys_props.attr_props) {
sysfs_show_64bit_prop(buffer, offs, "platform_oem",
sys_props.platform_oem);
sysfs_show_64bit_prop(buffer, offs, "platform_id",
sys_props.platform_id);
sysfs_show_64bit_prop(buffer, offs, "platform_rev",
sys_props.platform_rev);
} else {
offs = -EINVAL;
}
return offs;
}
static void kfd_topology_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
static const struct kobj_type sysprops_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
int offs = 0;
struct kfd_iolink_properties *iolink;
/* Making sure that the buffer is an empty string */
buffer[0] = 0;
iolink = container_of(attr, struct kfd_iolink_properties, attr);
if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
return -EPERM;
sysfs_show_32bit_prop(buffer, offs, "type", iolink->iolink_type);
sysfs_show_32bit_prop(buffer, offs, "version_major", iolink->ver_maj);
sysfs_show_32bit_prop(buffer, offs, "version_minor", iolink->ver_min);
sysfs_show_32bit_prop(buffer, offs, "node_from", iolink->node_from);
sysfs_show_32bit_prop(buffer, offs, "node_to", iolink->node_to);
sysfs_show_32bit_prop(buffer, offs, "weight", iolink->weight);
sysfs_show_32bit_prop(buffer, offs, "min_latency", iolink->min_latency);
sysfs_show_32bit_prop(buffer, offs, "max_latency", iolink->max_latency);
sysfs_show_32bit_prop(buffer, offs, "min_bandwidth",
iolink->min_bandwidth);
sysfs_show_32bit_prop(buffer, offs, "max_bandwidth",
iolink->max_bandwidth);
sysfs_show_32bit_prop(buffer, offs, "recommended_transfer_size",
iolink->rec_transfer_size);
sysfs_show_32bit_prop(buffer, offs, "flags", iolink->flags);
return offs;
}
static const struct sysfs_ops iolink_ops = {
.show = iolink_show,
};
static const struct kobj_type iolink_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
int offs = 0;
struct kfd_mem_properties *mem;
/* Making sure that the buffer is an empty string */
buffer[0] = 0;
mem = container_of(attr, struct kfd_mem_properties, attr);
if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
return -EPERM;
sysfs_show_32bit_prop(buffer, offs, "heap_type", mem->heap_type);
sysfs_show_64bit_prop(buffer, offs, "size_in_bytes",
mem->size_in_bytes);
sysfs_show_32bit_prop(buffer, offs, "flags", mem->flags);
sysfs_show_32bit_prop(buffer, offs, "width", mem->width);
sysfs_show_32bit_prop(buffer, offs, "mem_clk_max",
mem->mem_clk_max);
return offs;
}
static const struct sysfs_ops mem_ops = {
.show = mem_show,
};
static const struct kobj_type mem_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
int offs = 0;
uint32_t i, j;
struct kfd_cache_properties *cache;
/* Making sure that the buffer is an empty string */
buffer[0] = 0;
cache = container_of(attr, struct kfd_cache_properties, attr);
if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
return -EPERM;
sysfs_show_32bit_prop(buffer, offs, "processor_id_low",
cache->processor_id_low);
sysfs_show_32bit_prop(buffer, offs, "level", cache->cache_level);
sysfs_show_32bit_prop(buffer, offs, "size", cache->cache_size);
sysfs_show_32bit_prop(buffer, offs, "cache_line_size",
cache->cacheline_size);
sysfs_show_32bit_prop(buffer, offs, "cache_lines_per_tag",
cache->cachelines_per_tag);
sysfs_show_32bit_prop(buffer, offs, "association", cache->cache_assoc);
sysfs_show_32bit_prop(buffer, offs, "latency", cache->cache_latency);
sysfs_show_32bit_prop(buffer, offs, "type", cache->cache_type);
offs += snprintf(buffer+offs, PAGE_SIZE-offs, "sibling_map ");
for (i = 0; i < cache->sibling_map_size; i++)
for (j = 0; j < sizeof(cache->sibling_map[0])*8; j++)
/* Check each bit */
offs += snprintf(buffer+offs, PAGE_SIZE-offs, "%d,",
(cache->sibling_map[i] >> j) & 1);
/* Replace the last "," with end of line */
buffer[offs-1] = '\n';
return offs;
}
static const struct sysfs_ops cache_ops = {
.show = kfd_cache_show,
};
static const struct kobj_type cache_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
/****** Sysfs of Performance Counters ******/
struct kfd_perf_attr {
struct kobj_attribute attr;
uint32_t data;
};
static ssize_t perf_show(struct kobject *kobj, struct kobj_attribute *attrs,
char *buf)
{
int offs = 0;
struct kfd_perf_attr *attr;
buf[0] = 0;
attr = container_of(attrs, struct kfd_perf_attr, attr);
if (!attr->data) /* invalid data for PMC */
return 0;
else
return sysfs_show_32bit_val(buf, offs, attr->data);
}
#define KFD_PERF_DESC(_name, _data) \
{ \
.attr = __ATTR(_name, 0444, perf_show, NULL), \
.data = _data, \
}
static struct kfd_perf_attr perf_attr_iommu[] = {
KFD_PERF_DESC(max_concurrent, 0),
KFD_PERF_DESC(num_counters, 0),
KFD_PERF_DESC(counter_ids, 0),
};
/****************************************/
static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
int offs = 0;
struct kfd_topology_device *dev;
uint32_t log_max_watch_addr;
/* Making sure that the buffer is an empty string */
buffer[0] = 0;
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
return -EPERM;
return sysfs_show_32bit_val(buffer, offs, dev->gpu_id);
}
if (strcmp(attr->name, "name") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_name);
if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
return -EPERM;
return sysfs_show_str_val(buffer, offs, dev->node_props.name);
}
dev = container_of(attr, struct kfd_topology_device,
attr_props);
if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
return -EPERM;
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count",
dev->gpu ? dev->node_props.simd_count : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count",
dev->node_props.caches_count);
sysfs_show_32bit_prop(buffer, offs, "io_links_count",
dev->node_props.io_links_count);
sysfs_show_32bit_prop(buffer, offs, "p2p_links_count",
dev->node_props.p2p_links_count);
sysfs_show_32bit_prop(buffer, offs, "cpu_core_id_base",
dev->node_props.cpu_core_id_base);
sysfs_show_32bit_prop(buffer, offs, "simd_id_base",
dev->node_props.simd_id_base);
sysfs_show_32bit_prop(buffer, offs, "max_waves_per_simd",
dev->node_props.max_waves_per_simd);
sysfs_show_32bit_prop(buffer, offs, "lds_size_in_kb",
dev->node_props.lds_size_in_kb);
sysfs_show_32bit_prop(buffer, offs, "gds_size_in_kb",
dev->node_props.gds_size_in_kb);
sysfs_show_32bit_prop(buffer, offs, "num_gws",
dev->node_props.num_gws);
sysfs_show_32bit_prop(buffer, offs, "wave_front_size",
dev->node_props.wave_front_size);
sysfs_show_32bit_prop(buffer, offs, "array_count",
dev->gpu ? (dev->node_props.array_count *
NUM_XCC(dev->gpu->xcc_mask)) : 0);
sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine",
dev->node_props.simd_arrays_per_engine);
sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array",
dev->node_props.cu_per_simd_array);
sysfs_show_32bit_prop(buffer, offs, "simd_per_cu",
dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, offs, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu);
sysfs_show_32bit_prop(buffer, offs, "gfx_target_version",
dev->node_props.gfx_target_version);
sysfs_show_32bit_prop(buffer, offs, "vendor_id",
dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, offs, "device_id",
dev->node_props.device_id);
sysfs_show_32bit_prop(buffer, offs, "location_id",
dev->node_props.location_id);
sysfs_show_32bit_prop(buffer, offs, "domain",
dev->node_props.domain);
sysfs_show_32bit_prop(buffer, offs, "drm_render_minor",
dev->node_props.drm_render_minor);
sysfs_show_64bit_prop(buffer, offs, "hive_id",
dev->node_props.hive_id);
sysfs_show_32bit_prop(buffer, offs, "num_sdma_engines",
dev->node_props.num_sdma_engines);
sysfs_show_32bit_prop(buffer, offs, "num_sdma_xgmi_engines",
dev->node_props.num_sdma_xgmi_engines);
sysfs_show_32bit_prop(buffer, offs, "num_sdma_queues_per_engine",
dev->node_props.num_sdma_queues_per_engine);
sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
dev->node_props.num_cp_queues);
if (dev->gpu) {
log_max_watch_addr =
__ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points);
if (log_max_watch_addr) {
dev->node_props.capability |=
HSA_CAP_WATCH_POINTS_SUPPORTED;
dev->node_props.capability |=
((log_max_watch_addr <<
HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT) &
HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
}
if (dev->gpu->adev->asic_type == CHIP_TONGA)
dev->node_props.capability |=
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
dev->node_props.max_engine_clk_fcompute);
sysfs_show_64bit_prop(buffer, offs, "local_mem_size", 0ULL);
sysfs_show_32bit_prop(buffer, offs, "fw_version",
dev->gpu->kfd->mec_fw_version);
sysfs_show_32bit_prop(buffer, offs, "capability",
dev->node_props.capability);
sysfs_show_64bit_prop(buffer, offs, "debug_prop",
dev->node_props.debug_prop);
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
dev->gpu->adev->unique_id);
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
NUM_XCC(dev->gpu->xcc_mask));
}
return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
cpufreq_quick_get_max(0)/1000);
}
static const struct sysfs_ops node_ops = {
.show = node_show,
};
static const struct kobj_type node_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};
static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr)
{
sysfs_remove_file(kobj, attr);
kobject_del(kobj);
kobject_put(kobj);
}
static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
{
struct kfd_iolink_properties *p2plink;
struct kfd_iolink_properties *iolink;
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
struct kfd_perf_properties *perf;
if (dev->kobj_iolink) {
list_for_each_entry(iolink, &dev->io_link_props, list)
if (iolink->kobj) {
kfd_remove_sysfs_file(iolink->kobj,
&iolink->attr);
iolink->kobj = NULL;
}
kobject_del(dev->kobj_iolink);
kobject_put(dev->kobj_iolink);
dev->kobj_iolink = NULL;
}
if (dev->kobj_p2plink) {
list_for_each_entry(p2plink, &dev->p2p_link_props, list)
if (p2plink->kobj) {
kfd_remove_sysfs_file(p2plink->kobj,
&p2plink->attr);
p2plink->kobj = NULL;
}
kobject_del(dev->kobj_p2plink);
kobject_put(dev->kobj_p2plink);
dev->kobj_p2plink = NULL;
}
if (dev->kobj_cache) {
list_for_each_entry(cache, &dev->cache_props, list)
if (cache->kobj) {
kfd_remove_sysfs_file(cache->kobj,
&cache->attr);
cache->kobj = NULL;
}
kobject_del(dev->kobj_cache);
kobject_put(dev->kobj_cache);
dev->kobj_cache = NULL;
}
if (dev->kobj_mem) {
list_for_each_entry(mem, &dev->mem_props, list)
if (mem->kobj) {
kfd_remove_sysfs_file(mem->kobj, &mem->attr);
mem->kobj = NULL;
}
kobject_del(dev->kobj_mem);
kobject_put(dev->kobj_mem);
dev->kobj_mem = NULL;
}
if (dev->kobj_perf) {
list_for_each_entry(perf, &dev->perf_props, list) {
kfree(perf->attr_group);
perf->attr_group = NULL;
}
kobject_del(dev->kobj_perf);
kobject_put(dev->kobj_perf);
dev->kobj_perf = NULL;
}
if (dev->kobj_node) {
sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
sysfs_remove_file(dev->kobj_node, &dev->attr_name);
sysfs_remove_file(dev->kobj_node, &dev->attr_props);
kobject_del(dev->kobj_node);
kobject_put(dev->kobj_node);
dev->kobj_node = NULL;
}
}
static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
uint32_t id)
{
struct kfd_iolink_properties *p2plink;
struct kfd_iolink_properties *iolink;
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
struct kfd_perf_properties *perf;
int ret;
uint32_t i, num_attrs;
struct attribute **attrs;
if (WARN_ON(dev->kobj_node))
return -EEXIST;
/*
* Creating the sysfs folders
*/
dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
if (!dev->kobj_node)
return -ENOMEM;
ret = kobject_init_and_add(dev->kobj_node, &node_type,
sys_props.kobj_nodes, "%d", id);
if (ret < 0) {
kobject_put(dev->kobj_node);
return ret;
}
dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
if (!dev->kobj_mem)
return -ENOMEM;
dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node);
if (!dev->kobj_cache)
return -ENOMEM;
dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node);
if (!dev->kobj_iolink)
return -ENOMEM;
dev->kobj_p2plink = kobject_create_and_add("p2p_links", dev->kobj_node);
if (!dev->kobj_p2plink)
return -ENOMEM;
dev->kobj_perf = kobject_create_and_add("perf", dev->kobj_node);
if (!dev->kobj_perf)
return -ENOMEM;
/*
* Creating sysfs files for node properties
*/
dev->attr_gpuid.name = "gpu_id";
dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&dev->attr_gpuid);
dev->attr_name.name = "name";
dev->attr_name.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&dev->attr_name);
dev->attr_props.name = "properties";
dev->attr_props.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&dev->attr_props);
ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid);
if (ret < 0)
return ret;
ret = sysfs_create_file(dev->kobj_node, &dev->attr_name);
if (ret < 0)
return ret;
ret = sysfs_create_file(dev->kobj_node, &dev->attr_props);
if (ret < 0)
return ret;
i = 0;
list_for_each_entry(mem, &dev->mem_props, list) {
mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (!mem->kobj)
return -ENOMEM;
ret = kobject_init_and_add(mem->kobj, &mem_type,
dev->kobj_mem, "%d", i);
if (ret < 0) {
kobject_put(mem->kobj);
return ret;
}
mem->attr.name = "properties";
mem->attr.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&mem->attr);
ret = sysfs_create_file(mem->kobj, &mem->attr);
if (ret < 0)
return ret;
i++;
}
i = 0;
list_for_each_entry(cache, &dev->cache_props, list) {
cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (!cache->kobj)
return -ENOMEM;
ret = kobject_init_and_add(cache->kobj, &cache_type,
dev->kobj_cache, "%d", i);
if (ret < 0) {
kobject_put(cache->kobj);
return ret;
}
cache->attr.name = "properties";
cache->attr.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&cache->attr);
ret = sysfs_create_file(cache->kobj, &cache->attr);
if (ret < 0)
return ret;
i++;
}
i = 0;
list_for_each_entry(iolink, &dev->io_link_props, list) {
iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (!iolink->kobj)
return -ENOMEM;
ret = kobject_init_and_add(iolink->kobj, &iolink_type,
dev->kobj_iolink, "%d", i);
if (ret < 0) {
kobject_put(iolink->kobj);
return ret;
}
iolink->attr.name = "properties";
iolink->attr.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&iolink->attr);
ret = sysfs_create_file(iolink->kobj, &iolink->attr);
if (ret < 0)
return ret;
i++;
}
i = 0;
list_for_each_entry(p2plink, &dev->p2p_link_props, list) {
p2plink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (!p2plink->kobj)
return -ENOMEM;
ret = kobject_init_and_add(p2plink->kobj, &iolink_type,
dev->kobj_p2plink, "%d", i);
if (ret < 0) {
kobject_put(p2plink->kobj);
return ret;
}
p2plink->attr.name = "properties";
p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&p2plink->attr);
ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
if (ret < 0)
return ret;
i++;
}
/* All hardware blocks have the same number of attributes. */
num_attrs = ARRAY_SIZE(perf_attr_iommu);
list_for_each_entry(perf, &dev->perf_props, list) {
perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
* num_attrs + sizeof(struct attribute_group),
GFP_KERNEL);
if (!perf->attr_group)
return -ENOMEM;
attrs = (struct attribute **)(perf->attr_group + 1);
if (!strcmp(perf->block_name, "iommu")) {
/* Information of IOMMU's num_counters and counter_ids is shown
* under /sys/bus/event_source/devices/amd_iommu. We don't
* duplicate here.
*/
perf_attr_iommu[0].data = perf->max_concurrent;
for (i = 0; i < num_attrs; i++)
attrs[i] = &perf_attr_iommu[i].attr.attr;
}
perf->attr_group->name = perf->block_name;
perf->attr_group->attrs = attrs;
ret = sysfs_create_group(dev->kobj_perf, perf->attr_group);
if (ret < 0)
return ret;
}
return 0;
}
/* Called with write topology lock acquired */
static int kfd_build_sysfs_node_tree(void)
{
struct kfd_topology_device *dev;
int ret;
uint32_t i = 0;
list_for_each_entry(dev, &topology_device_list, list) {
ret = kfd_build_sysfs_node_entry(dev, i);
if (ret < 0)
return ret;
i++;
}
return 0;
}
/* Called with write topology lock acquired */
static void kfd_remove_sysfs_node_tree(void)
{
struct kfd_topology_device *dev;
list_for_each_entry(dev, &topology_device_list, list)
kfd_remove_sysfs_node_entry(dev);
}
static int kfd_topology_update_sysfs(void)
{
int ret;
if (!sys_props.kobj_topology) {
sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology);
if (!sys_props.kobj_topology)
return -ENOMEM;
ret = kobject_init_and_add(sys_props.kobj_topology,
&sysprops_type, &kfd_device->kobj,
"topology");
if (ret < 0) {
kobject_put(sys_props.kobj_topology);
return ret;
}
sys_props.kobj_nodes = kobject_create_and_add("nodes",
sys_props.kobj_topology);
if (!sys_props.kobj_nodes)
return -ENOMEM;
sys_props.attr_genid.name = "generation_id";
sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&sys_props.attr_genid);
ret = sysfs_create_file(sys_props.kobj_topology,
&sys_props.attr_genid);
if (ret < 0)
return ret;
sys_props.attr_props.name = "system_properties";
sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&sys_props.attr_props);
ret = sysfs_create_file(sys_props.kobj_topology,
&sys_props.attr_props);
if (ret < 0)
return ret;
}
kfd_remove_sysfs_node_tree();
return kfd_build_sysfs_node_tree();
}
static void kfd_topology_release_sysfs(void)
{
kfd_remove_sysfs_node_tree();
if (sys_props.kobj_topology) {
sysfs_remove_file(sys_props.kobj_topology,
&sys_props.attr_genid);
sysfs_remove_file(sys_props.kobj_topology,
&sys_props.attr_props);
if (sys_props.kobj_nodes) {
kobject_del(sys_props.kobj_nodes);
kobject_put(sys_props.kobj_nodes);
sys_props.kobj_nodes = NULL;
}
kobject_del(sys_props.kobj_topology);
kobject_put(sys_props.kobj_topology);
sys_props.kobj_topology = NULL;
}
}
/* Called with write topology_lock acquired */
static void kfd_topology_update_device_list(struct list_head *temp_list,
struct list_head *master_list)
{
while (!list_empty(temp_list)) {
list_move_tail(temp_list->next, master_list);
sys_props.num_devices++;
}
}
static void kfd_debug_print_topology(void)
{
struct kfd_topology_device *dev;
down_read(&topology_lock);
dev = list_last_entry(&topology_device_list,
struct kfd_topology_device, list);
if (dev) {
if (dev->node_props.cpu_cores_count &&
dev->node_props.simd_count) {
pr_info("Topology: Add APU node [0x%0x:0x%0x]\n",
dev->node_props.device_id,
dev->node_props.vendor_id);
} else if (dev->node_props.cpu_cores_count)
pr_info("Topology: Add CPU node\n");
else if (dev->node_props.simd_count)
pr_info("Topology: Add dGPU node [0x%0x:0x%0x]\n",
dev->node_props.device_id,
dev->node_props.vendor_id);
}
up_read(&topology_lock);
}
/* Helper function for intializing platform_xx members of
* kfd_system_properties. Uses OEM info from the last CPU/APU node.
*/
static void kfd_update_system_properties(void)
{
struct kfd_topology_device *dev;
down_read(&topology_lock);
dev = list_last_entry(&topology_device_list,
struct kfd_topology_device, list);
if (dev) {
sys_props.platform_id =
(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
sys_props.platform_rev = dev->oem_revision;
}
up_read(&topology_lock);
}
static void find_system_memory(const struct dmi_header *dm,
void *private)
{
struct kfd_mem_properties *mem;
u16 mem_width, mem_clock;
struct kfd_topology_device *kdev =
(struct kfd_topology_device *)private;
const u8 *dmi_data = (const u8 *)(dm + 1);
if (dm->type == DMI_ENTRY_MEM_DEVICE && dm->length >= 0x15) {
mem_width = (u16)(*(const u16 *)(dmi_data + 0x6));
mem_clock = (u16)(*(const u16 *)(dmi_data + 0x11));
list_for_each_entry(mem, &kdev->mem_props, list) {
if (mem_width != 0xFFFF && mem_width != 0)
mem->width = mem_width;
if (mem_clock != 0)
mem->mem_clk_max = mem_clock;
}
}
}
/* kfd_add_non_crat_information - Add information that is not currently
* defined in CRAT but is necessary for KFD topology
* @dev - topology device to which addition info is added
*/
static void kfd_add_non_crat_information(struct kfd_topology_device *kdev)
{
/* Check if CPU only node. */
if (!kdev->gpu) {
/* Add system memory information */
dmi_walk(find_system_memory, kdev);
}
/* TODO: For GPU node, rearrange code from kfd_topology_add_device */
}
int kfd_topology_init(void)
{
void *crat_image = NULL;
size_t image_size = 0;
int ret;
struct list_head temp_topology_device_list;
int cpu_only_node = 0;
struct kfd_topology_device *kdev;
int proximity_domain;
/* topology_device_list - Master list of all topology devices
* temp_topology_device_list - temporary list created while parsing CRAT
* or VCRAT. Once parsing is complete the contents of list is moved to
* topology_device_list
*/
/* Initialize the head for the both the lists */
INIT_LIST_HEAD(&topology_device_list);
INIT_LIST_HEAD(&temp_topology_device_list);
init_rwsem(&topology_lock);
memset(&sys_props, 0, sizeof(sys_props));
/* Proximity domains in ACPI CRAT tables start counting at
* 0. The same should be true for virtual CRAT tables created
* at this stage. GPUs added later in kfd_topology_add_device
* use a counter.
*/
proximity_domain = 0;
ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
COMPUTE_UNIT_CPU, NULL,
proximity_domain);
cpu_only_node = 1;
if (ret) {
pr_err("Error creating VCRAT table for CPU\n");
return ret;
}
ret = kfd_parse_crat_table(crat_image,
&temp_topology_device_list,
proximity_domain);
if (ret) {
pr_err("Error parsing VCRAT table for CPU\n");
goto err;
}
kdev = list_first_entry(&temp_topology_device_list,
struct kfd_topology_device, list);
down_write(&topology_lock);
kfd_topology_update_device_list(&temp_topology_device_list,
&topology_device_list);
topology_crat_proximity_domain = sys_props.num_devices-1;
ret = kfd_topology_update_sysfs();
up_write(&topology_lock);
if (!ret) {
sys_props.generation_count++;
kfd_update_system_properties();
kfd_debug_print_topology();
} else
pr_err("Failed to update topology in sysfs ret=%d\n", ret);
/* For nodes with GPU, this information gets added
* when GPU is detected (kfd_topology_add_device).
*/
if (cpu_only_node) {
/* Add additional information to CPU only node created above */
down_write(&topology_lock);
kdev = list_first_entry(&topology_device_list,
struct kfd_topology_device, list);
up_write(&topology_lock);
kfd_add_non_crat_information(kdev);
}
err:
kfd_destroy_crat_image(crat_image);
return ret;
}
void kfd_topology_shutdown(void)
{
down_write(&topology_lock);
kfd_topology_release_sysfs();
kfd_release_live_view();
up_write(&topology_lock);
}
static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu)
{
uint32_t hashout;
uint32_t buf[8];
uint64_t local_mem_size;
int i;
if (!gpu)
return 0;
local_mem_size = gpu->local_mem_info.local_mem_size_private +
gpu->local_mem_info.local_mem_size_public;
buf[0] = gpu->adev->pdev->devfn;
buf[1] = gpu->adev->pdev->subsystem_vendor |
(gpu->adev->pdev->subsystem_device << 16);
buf[2] = pci_domain_nr(gpu->adev->pdev->bus);
buf[3] = gpu->adev->pdev->device;
buf[4] = gpu->adev->pdev->bus->number;
buf[5] = lower_32_bits(local_mem_size);
buf[6] = upper_32_bits(local_mem_size);
buf[7] = (ffs(gpu->xcc_mask) - 1) | (NUM_XCC(gpu->xcc_mask) << 16);
for (i = 0, hashout = 0; i < 8; i++)
hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
return hashout;
}
/* kfd_assign_gpu - Attach @gpu to the correct kfd topology device. If
* the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to
* be created for this GPU.
*/
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_node *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
struct kfd_mem_properties *mem;
struct kfd_cache_properties *cache;
struct kfd_iolink_properties *iolink;
struct kfd_iolink_properties *p2plink;
list_for_each_entry(dev, &topology_device_list, list) {
/* Discrete GPUs need their own topology device list
* entries. Don't assign them to CPU/APU nodes.
*/
if (dev->node_props.cpu_cores_count)
continue;
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
list_for_each_entry(mem, &dev->mem_props, list)
mem->gpu = dev->gpu;
list_for_each_entry(cache, &dev->cache_props, list)
cache->gpu = dev->gpu;
list_for_each_entry(iolink, &dev->io_link_props, list)
iolink->gpu = dev->gpu;
list_for_each_entry(p2plink, &dev->p2p_link_props, list)
p2plink->gpu = dev->gpu;
break;
}
}
return out_dev;
}
static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
{
/*
* TODO: Generate an event for thunk about the arrival/removal
* of the GPU
*/
}
/* kfd_fill_mem_clk_max_info - Since CRAT doesn't have memory clock info,
* patch this after CRAT parsing.
*/
static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
{
struct kfd_mem_properties *mem;
struct kfd_local_mem_info local_mem_info;
if (!dev)
return;
/* Currently, amdgpu driver (amdgpu_mc) deals only with GPUs with
* single bank of VRAM local memory.
* for dGPUs - VCRAT reports only one bank of Local Memory
* for APUs - If CRAT from ACPI reports more than one bank, then
* all the banks will report the same mem_clk_max information
*/
amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info,
dev->gpu->xcp);
list_for_each_entry(mem, &dev->mem_props, list)
mem->mem_clk_max = local_mem_info.mem_clk_max;
}
static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
struct kfd_topology_device *target_gpu_dev,
struct kfd_iolink_properties *link)
{
/* xgmi always supports atomics between links. */
if (link->iolink_type == CRAT_IOLINK_TYPE_XGMI)
return;
/* check pcie support to set cpu(dev) flags for target_gpu_dev link. */
if (target_gpu_dev) {
uint32_t cap;
pcie_capability_read_dword(target_gpu_dev->gpu->adev->pdev,
PCI_EXP_DEVCAP2, &cap);
if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
/* set gpu (dev) flags. */
} else {
if (!dev->gpu->kfd->pci_atomic_requested ||
dev->gpu->adev->asic_type == CHIP_HAWAII)
link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
}
}
static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
struct kfd_iolink_properties *outbound_link,
struct kfd_iolink_properties *inbound_link)
{
/* CPU -> GPU with PCIe */
if (!to_dev->gpu &&
inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
if (to_dev->gpu) {
/* GPU <-> GPU with PCIe and
* Vega20 with XGMI
*/
if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
(inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) {
outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
}
}
}
static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
{
struct kfd_iolink_properties *link, *inbound_link;
struct kfd_topology_device *peer_dev;
if (!dev || !dev->gpu)
return;
/* GPU only creates direct links so apply flags setting to all */
list_for_each_entry(link, &dev->io_link_props, list) {
link->flags = CRAT_IOLINK_FLAGS_ENABLED;
kfd_set_iolink_no_atomics(dev, NULL, link);
peer_dev = kfd_topology_device_by_proximity_domain(
link->node_to);
if (!peer_dev)
continue;
/* Include the CPU peer in GPU hive if connected over xGMI. */
if (!peer_dev->gpu &&
link->iolink_type == CRAT_IOLINK_TYPE_XGMI) {
/*
* If the GPU is not part of a GPU hive, use its pci
* device location as the hive ID to bind with the CPU.
*/
if (!dev->node_props.hive_id)
dev->node_props.hive_id = pci_dev_id(dev->gpu->adev->pdev);
peer_dev->node_props.hive_id = dev->node_props.hive_id;
}
list_for_each_entry(inbound_link, &peer_dev->io_link_props,
list) {
if (inbound_link->node_to != link->node_from)
continue;
inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
}
}
/* Create indirect links so apply flags setting to all */
list_for_each_entry(link, &dev->p2p_link_props, list) {
link->flags = CRAT_IOLINK_FLAGS_ENABLED;
kfd_set_iolink_no_atomics(dev, NULL, link);
peer_dev = kfd_topology_device_by_proximity_domain(
link->node_to);
if (!peer_dev)
continue;
list_for_each_entry(inbound_link, &peer_dev->p2p_link_props,
list) {
if (inbound_link->node_to != link->node_from)
continue;
inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
}
}
}
static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
struct kfd_iolink_properties *p2plink)
{
int ret;
p2plink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (!p2plink->kobj)
return -ENOMEM;
ret = kobject_init_and_add(p2plink->kobj, &iolink_type,
dev->kobj_p2plink, "%d", dev->node_props.p2p_links_count - 1);
if (ret < 0) {
kobject_put(p2plink->kobj);
return ret;
}
p2plink->attr.name = "properties";
p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&p2plink->attr);
ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
if (ret < 0)
return ret;
return 0;
}
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
{
struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
struct kfd_topology_device *cpu_dev;
int ret = 0;
int i, num_cpu;
num_cpu = 0;
list_for_each_entry(cpu_dev, &topology_device_list, list) {
if (cpu_dev->gpu)
break;
num_cpu++;
}
gpu_link = list_first_entry(&kdev->io_link_props,
struct kfd_iolink_properties, list);
if (!gpu_link)
return -ENOMEM;
for (i = 0; i < num_cpu; i++) {
/* CPU <--> GPU */
if (gpu_link->node_to == i)
continue;
/* find CPU <--> CPU links */
cpu_link = NULL;
cpu_dev = kfd_topology_device_by_proximity_domain(i);
if (cpu_dev) {
list_for_each_entry(tmp_link,
&cpu_dev->io_link_props, list) {
if (tmp_link->node_to == gpu_link->node_to) {
cpu_link = tmp_link;
break;
}
}
}
if (!cpu_link)
return -ENOMEM;
/* CPU <--> CPU <--> GPU, GPU node*/
props = kfd_alloc_struct(props);
if (!props)
return -ENOMEM;
memcpy(props, gpu_link, sizeof(struct kfd_iolink_properties));
props->weight = gpu_link->weight + cpu_link->weight;
props->min_latency = gpu_link->min_latency + cpu_link->min_latency;
props->max_latency = gpu_link->max_latency + cpu_link->max_latency;
props->min_bandwidth = min(gpu_link->min_bandwidth, cpu_link->min_bandwidth);
props->max_bandwidth = min(gpu_link->max_bandwidth, cpu_link->max_bandwidth);
props->node_from = gpu_node;
props->node_to = i;
kdev->node_props.p2p_links_count++;
list_add_tail(&props->list, &kdev->p2p_link_props);
ret = kfd_build_p2p_node_entry(kdev, props);
if (ret < 0)
return ret;
/* for small Bar, no CPU --> GPU in-direct links */
if (kfd_dev_is_large_bar(kdev->gpu)) {
/* CPU <--> CPU <--> GPU, CPU node*/
props2 = kfd_alloc_struct(props2);
if (!props2)
return -ENOMEM;
memcpy(props2, props, sizeof(struct kfd_iolink_properties));
props2->node_from = i;
props2->node_to = gpu_node;
props2->kobj = NULL;
cpu_dev->node_props.p2p_links_count++;
list_add_tail(&props2->list, &cpu_dev->p2p_link_props);
ret = kfd_build_p2p_node_entry(cpu_dev, props2);
if (ret < 0)
return ret;
}
}
return ret;
}
#if defined(CONFIG_HSA_AMD_P2P)
static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
struct kfd_topology_device *peer, int from, int to)
{
struct kfd_iolink_properties *props = NULL;
struct kfd_iolink_properties *iolink1, *iolink2, *iolink3;
struct kfd_topology_device *cpu_dev;
int ret = 0;
if (!amdgpu_device_is_peer_accessible(
kdev->gpu->adev,
peer->gpu->adev))
return ret;
iolink1 = list_first_entry(&kdev->io_link_props,
struct kfd_iolink_properties, list);
if (!iolink1)
return -ENOMEM;
iolink2 = list_first_entry(&peer->io_link_props,
struct kfd_iolink_properties, list);
if (!iolink2)
return -ENOMEM;
props = kfd_alloc_struct(props);
if (!props)
return -ENOMEM;
memcpy(props, iolink1, sizeof(struct kfd_iolink_properties));
props->weight = iolink1->weight + iolink2->weight;
props->min_latency = iolink1->min_latency + iolink2->min_latency;
props->max_latency = iolink1->max_latency + iolink2->max_latency;
props->min_bandwidth = min(iolink1->min_bandwidth, iolink2->min_bandwidth);
props->max_bandwidth = min(iolink2->max_bandwidth, iolink2->max_bandwidth);
if (iolink1->node_to != iolink2->node_to) {
/* CPU->CPU link*/
cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
if (cpu_dev) {
list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
if (iolink3->node_to == iolink2->node_to)
break;
props->weight += iolink3->weight;
props->min_latency += iolink3->min_latency;
props->max_latency += iolink3->max_latency;
props->min_bandwidth = min(props->min_bandwidth,
iolink3->min_bandwidth);
props->max_bandwidth = min(props->max_bandwidth,
iolink3->max_bandwidth);
} else {
WARN(1, "CPU node not found");
}
}
props->node_from = from;
props->node_to = to;
peer->node_props.p2p_links_count++;
list_add_tail(&props->list, &peer->p2p_link_props);
ret = kfd_build_p2p_node_entry(peer, props);
return ret;
}
#endif
static int kfd_dev_create_p2p_links(void)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *new_dev;
#if defined(CONFIG_HSA_AMD_P2P)
uint32_t i;
#endif
uint32_t k;
int ret = 0;
k = 0;
list_for_each_entry(dev, &topology_device_list, list)
k++;
if (k < 2)
return 0;
new_dev = list_last_entry(&topology_device_list, struct kfd_topology_device, list);
if (WARN_ON(!new_dev->gpu))
return 0;
k--;
/* create in-direct links */
ret = kfd_create_indirect_link_prop(new_dev, k);
if (ret < 0)
goto out;
/* create p2p links */
#if defined(CONFIG_HSA_AMD_P2P)
i = 0;
list_for_each_entry(dev, &topology_device_list, list) {
if (dev == new_dev)
break;
if (!dev->gpu || !dev->gpu->adev ||
(dev->gpu->kfd->hive_id &&
dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id))
goto next;
/* check if node(s) is/are peer accessible in one direction or bi-direction */
ret = kfd_add_peer_prop(new_dev, dev, i, k);
if (ret < 0)
goto out;
ret = kfd_add_peer_prop(dev, new_dev, k, i);
if (ret < 0)
goto out;
next:
i++;
}
#endif
out:
return ret;
}
/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
struct kfd_cu_info *cu_info,
int cu_bitmask,
int cache_type, unsigned int cu_processor_id,
int cu_block)
{
unsigned int cu_sibling_map_mask;
int first_active_cu;
struct kfd_cache_properties *pcache = NULL;
cu_sibling_map_mask = cu_bitmask;
cu_sibling_map_mask >>= cu_block;
cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
/* CU could be inactive. In case of shared cache find the first active
* CU. and incase of non-shared cache check if the CU is inactive. If
* inactive active skip it
*/
if (first_active_cu) {
pcache = kfd_alloc_struct(pcache);
if (!pcache)
return -ENOMEM;
memset(pcache, 0, sizeof(struct kfd_cache_properties));
pcache->processor_id_low = cu_processor_id + (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level;
pcache->cache_size = pcache_info[cache_type].cache_size;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_INST_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_CPU_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_CPU;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_HSACU;
/* Sibling map is w.r.t processor_id_low, so shift out
* inactive CU
*/
cu_sibling_map_mask =
cu_sibling_map_mask >> (first_active_cu - 1);
pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
pcache->sibling_map[1] =
(uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
pcache->sibling_map[2] =
(uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
pcache->sibling_map[3] =
(uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
pcache->sibling_map_size = 4;
*props_ext = pcache;
return 0;
}
return 1;
}
/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
struct kfd_cu_info *cu_info,
int cache_type, unsigned int cu_processor_id,
struct kfd_node *knode)
{
unsigned int cu_sibling_map_mask;
int first_active_cu;
int i, j, k, xcc, start, end;
struct kfd_cache_properties *pcache = NULL;
start = ffs(knode->xcc_mask) - 1;
end = start + NUM_XCC(knode->xcc_mask);
cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0];
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
/* CU could be inactive. In case of shared cache find the first active
* CU. and incase of non-shared cache check if the CU is inactive. If
* inactive active skip it
*/
if (first_active_cu) {
pcache = kfd_alloc_struct(pcache);
if (!pcache)
return -ENOMEM;
memset(pcache, 0, sizeof(struct kfd_cache_properties));
pcache->processor_id_low = cu_processor_id
+ (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level;
pcache->cache_size = pcache_info[cache_type].cache_size;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_INST_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_CPU_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_CPU;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_HSACU;
/* Sibling map is w.r.t processor_id_low, so shift out
* inactive CU
*/
cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
k = 0;
for (xcc = start; xcc < end; xcc++) {
for (i = 0; i < cu_info->num_shader_engines; i++) {
for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
k += 4;
cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4];
cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
}
}
}
pcache->sibling_map_size = k;
*props_ext = pcache;
return 0;
}
return 1;
}
#define KFD_MAX_CACHE_TYPES 6
/* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info
* tables
*/
static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
{
struct kfd_gpu_cache_info *pcache_info = NULL;
int i, j, k, xcc, start, end;
int ct = 0;
unsigned int cu_processor_id;
int ret;
unsigned int num_cu_shared;
struct kfd_cu_info cu_info;
struct kfd_cu_info *pcu_info;
int gpu_processor_id;
struct kfd_cache_properties *props_ext;
int num_of_entries = 0;
int num_of_cache_types = 0;
struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
pcu_info = &cu_info;
gpu_processor_id = dev->node_props.simd_id_base;
pcache_info = cache_info;
num_of_cache_types = kfd_get_gpu_cache_info(kdev, &pcache_info);
if (!num_of_cache_types) {
pr_warn("no cache info found\n");
return;
}
/* For each type of cache listed in the kfd_gpu_cache_info table,
* go through all available Compute Units.
* The [i,j,k] loop will
* if kfd_gpu_cache_info.num_cu_shared = 1
* will parse through all available CU
* If (kfd_gpu_cache_info.num_cu_shared != 1)
* then it will consider only one CU from
* the shared unit
*/
start = ffs(kdev->xcc_mask) - 1;
end = start + NUM_XCC(kdev->xcc_mask);
for (ct = 0; ct < num_of_cache_types; ct++) {
cu_processor_id = gpu_processor_id;
if (pcache_info[ct].cache_level == 1) {
for (xcc = start; xcc < end; xcc++) {
for (i = 0; i < pcu_info->num_shader_engines; i++) {
for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct,
cu_processor_id, k);
if (ret < 0)
break;
if (!ret) {
num_of_entries++;
list_add_tail(&props_ext->list, &dev->cache_props);
}
/* Move to next CU block */
num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
pcu_info->num_cu_per_sh) ?
pcache_info[ct].num_cu_shared :
(pcu_info->num_cu_per_sh - k);
cu_processor_id += num_cu_shared;
}
}
}
}
} else {
ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
pcu_info, ct, cu_processor_id, kdev);
if (ret < 0)
break;
if (!ret) {
num_of_entries++;
list_add_tail(&props_ext->list, &dev->cache_props);
}
}
}
dev->node_props.caches_count += num_of_entries;
pr_debug("Added [%d] GPU cache entries\n", num_of_entries);
}
static int kfd_topology_add_device_locked(struct kfd_node *gpu, uint32_t gpu_id,
struct kfd_topology_device **dev)
{
int proximity_domain = ++topology_crat_proximity_domain;
struct list_head temp_topology_device_list;
void *crat_image = NULL;
size_t image_size = 0;
int res;
res = kfd_create_crat_image_virtual(&crat_image, &image_size,
COMPUTE_UNIT_GPU, gpu,
proximity_domain);
if (res) {
pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
gpu_id);
topology_crat_proximity_domain--;
goto err;
}
INIT_LIST_HEAD(&temp_topology_device_list);
res = kfd_parse_crat_table(crat_image,
&temp_topology_device_list,
proximity_domain);
if (res) {
pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
gpu_id);
topology_crat_proximity_domain--;
goto err;
}
kfd_topology_update_device_list(&temp_topology_device_list,
&topology_device_list);
*dev = kfd_assign_gpu(gpu);
if (WARN_ON(!*dev)) {
res = -ENODEV;
goto err;
}
/* Fill the cache affinity information here for the GPUs
* using VCRAT
*/
kfd_fill_cache_non_crat_info(*dev, gpu);
/* Update the SYSFS tree, since we added another topology
* device
*/
res = kfd_topology_update_sysfs();
if (!res)
sys_props.generation_count++;
else
pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
gpu_id, res);
err:
kfd_destroy_crat_image(crat_image);
return res;
}
static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *dev)
{
bool firmware_supported = true;
if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) &&
KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) {
uint32_t mes_api_rev = (dev->gpu->adev->mes.sched_version &
AMDGPU_MES_API_VERSION_MASK) >>
AMDGPU_MES_API_VERSION_SHIFT;
uint32_t mes_rev = dev->gpu->adev->mes.sched_version &
AMDGPU_MES_VERSION_MASK;
firmware_supported = (mes_api_rev >= 14) && (mes_rev >= 64);
goto out;
}
/*
* Note: Any unlisted devices here are assumed to support exception handling.
* Add additional checks here as needed.
*/
switch (KFD_GC_VERSION(dev->gpu)) {
case IP_VERSION(9, 0, 1):
firmware_supported = dev->gpu->kfd->mec_fw_version >= 459 + 32768;
break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
firmware_supported = dev->gpu->kfd->mec_fw_version >= 459;
break;
case IP_VERSION(9, 4, 1):
firmware_supported = dev->gpu->kfd->mec_fw_version >= 60;
break;
case IP_VERSION(9, 4, 2):
firmware_supported = dev->gpu->kfd->mec_fw_version >= 51;
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 1):
firmware_supported = dev->gpu->kfd->mec_fw_version >= 144;
break;
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
firmware_supported = dev->gpu->kfd->mec_fw_version >= 89;
break;
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 3, 3):
firmware_supported = false;
break;
default:
break;
}
out:
if (firmware_supported)
dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED;
}
static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
{
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_SUPPORT |
HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED |
HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED;
if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3))
dev->node_props.debug_prop |=
HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3;
else
dev->node_props.debug_prop |=
HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2))
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0))
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
}
kfd_topology_set_dbg_firmware_support(dev);
}
int kfd_topology_add_device(struct kfd_node *gpu)
{
uint32_t gpu_id;
struct kfd_topology_device *dev;
struct kfd_cu_info cu_info;
int res = 0;
int i;
const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
gpu_id = kfd_generate_gpu_id(gpu);
if (gpu->xcp && !gpu->xcp->ddev) {
dev_warn(gpu->adev->dev,
"Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.",
gpu_id);
return 0;
} else {
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
}
/* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device,
* else create a Virtual CRAT for this gpu device and then parse that
* CRAT to create a new topology device. Once created assign the gpu to
* that topology device
*/
down_write(&topology_lock);
dev = kfd_assign_gpu(gpu);
if (!dev)
res = kfd_topology_add_device_locked(gpu, gpu_id, &dev);
up_write(&topology_lock);
if (res)
return res;
dev->gpu_id = gpu_id;
gpu->id = gpu_id;
kfd_dev_create_p2p_links();
/* TODO: Move the following lines to function
* kfd_add_non_crat_information
*/
/* Fill-in additional information that is not available in CRAT but
* needed for the topology
*/
amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info);
for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1; i++) {
dev->node_props.name[i] = __tolower(asic_name[i]);
if (asic_name[i] == '\0')
break;
}
dev->node_props.name[i] = '\0';
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
dev->node_props.gfx_target_version =
gpu->kfd->device_info.gfx_target_version;
dev->node_props.vendor_id = gpu->adev->pdev->vendor;
dev->node_props.device_id = gpu->adev->pdev->device;
dev->node_props.capability |=
((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) &
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
dev->node_props.location_id |= dev->gpu->node_id;
dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev);
dev->node_props.max_engine_clk_ccompute =
cpufreq_quick_get_max(0) / 1000;
if (gpu->xcp)
dev->node_props.drm_render_minor = gpu->xcp->ddev->render->index;
else
dev->node_props.drm_render_minor =
gpu->kfd->shared_resources.drm_render_minor;
dev->node_props.hive_id = gpu->kfd->hive_id;
dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu);
dev->node_props.num_sdma_xgmi_engines =
kfd_get_num_xgmi_sdma_engines(gpu);
dev->node_props.num_sdma_queues_per_engine =
gpu->kfd->device_info.num_sdma_queues_per_engine -
gpu->kfd->device_info.num_reserved_sdma_queues_per_engine;
dev->node_props.num_gws = (dev->gpu->gws &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
dev->gpu->adev->gds.gws_size : 0;
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
switch (dev->gpu->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
case CHIP_TONGA:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_PRE_1_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
case CHIP_CARRIZO:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
pr_debug("Adding doorbell packet type capability\n");
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
default:
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 0, 1))
WARN(1, "Unexpected ASIC family %u",
dev->gpu->adev->asic_type);
else
kfd_topology_set_capabilities(dev);
}
/*
* Overwrite ATS capability according to needs_iommu_device to fix
* potential missing corresponding bit in CRAT of BIOS.
*/
dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
/* Fix errors in CZ CRAT.
* simd_count: Carrizo CRAT reports wrong simd_count, probably
* because it doesn't consider masked out CUs
* max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
*/
if (dev->gpu->adev->asic_type == CHIP_CARRIZO) {
dev->node_props.simd_count =
cu_info.simd_per_cu * cu_info.cu_active_number;
dev->node_props.max_waves_per_simd = 10;
}
/* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
dev->node_props.capability |=
((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
HSA_CAP_SRAM_EDCSUPPORTED : 0;
dev->node_props.capability |=
((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
HSA_CAP_MEM_EDCSUPPORTED : 0;
if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1))
dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
if (dev->gpu->adev->gmc.is_app_apu ||
dev->gpu->adev->gmc.xgmi.connected_to_cpu)
dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS;
kfd_debug_print_topology();
kfd_notify_gpu_change(gpu_id, 1);
return 0;
}
/**
* kfd_topology_update_io_links() - Update IO links after device removal.
* @proximity_domain: Proximity domain value of the dev being removed.
*
* The topology list currently is arranged in increasing order of
* proximity domain.
*
* Two things need to be done when a device is removed:
* 1. All the IO links to this device need to be removed.
* 2. All nodes after the current device node need to move
* up once this device node is removed from the topology
* list. As a result, the proximity domain values for
* all nodes after the node being deleted reduce by 1.
* This would also cause the proximity domain values for
* io links to be updated based on new proximity domain
* values.
*
* Context: The caller must hold write topology_lock.
*/
static void kfd_topology_update_io_links(int proximity_domain)
{
struct kfd_topology_device *dev;
struct kfd_iolink_properties *iolink, *p2plink, *tmp;
list_for_each_entry(dev, &topology_device_list, list) {
if (dev->proximity_domain > proximity_domain)
dev->proximity_domain--;
list_for_each_entry_safe(iolink, tmp, &dev->io_link_props, list) {
/*
* If there is an io link to the dev being deleted
* then remove that IO link also.
*/
if (iolink->node_to == proximity_domain) {
list_del(&iolink->list);
dev->node_props.io_links_count--;
} else {
if (iolink->node_from > proximity_domain)
iolink->node_from--;
if (iolink->node_to > proximity_domain)
iolink->node_to--;
}
}
list_for_each_entry_safe(p2plink, tmp, &dev->p2p_link_props, list) {
/*
* If there is a p2p link to the dev being deleted
* then remove that p2p link also.
*/
if (p2plink->node_to == proximity_domain) {
list_del(&p2plink->list);
dev->node_props.p2p_links_count--;
} else {
if (p2plink->node_from > proximity_domain)
p2plink->node_from--;
if (p2plink->node_to > proximity_domain)
p2plink->node_to--;
}
}
}
}
int kfd_topology_remove_device(struct kfd_node *gpu)
{
struct kfd_topology_device *dev, *tmp;
uint32_t gpu_id;
int res = -ENODEV;
int i = 0;
down_write(&topology_lock);
list_for_each_entry_safe(dev, tmp, &topology_device_list, list) {
if (dev->gpu == gpu) {
gpu_id = dev->gpu_id;
kfd_remove_sysfs_node_entry(dev);
kfd_release_topology_device(dev);
sys_props.num_devices--;
kfd_topology_update_io_links(i);
topology_crat_proximity_domain = sys_props.num_devices-1;
sys_props.generation_count++;
res = 0;
if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs();
break;
}
i++;
}
up_write(&topology_lock);
if (!res)
kfd_notify_gpu_change(gpu_id, 0);
return res;
}
/* kfd_topology_enum_kfd_devices - Enumerate through all devices in KFD
* topology. If GPU device is found @idx, then valid kfd_dev pointer is
* returned through @kdev
* Return - 0: On success (@kdev will be NULL for non GPU nodes)
* -1: If end of list
*/
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev)
{
struct kfd_topology_device *top_dev;
uint8_t device_idx = 0;
*kdev = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list) {
if (device_idx == idx) {
*kdev = top_dev->gpu;
up_read(&topology_lock);
return 0;
}
device_idx++;
}
up_read(&topology_lock);
return -1;
}
static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
{
int first_cpu_of_numa_node;
if (!cpumask || cpumask == cpu_none_mask)
return -1;
first_cpu_of_numa_node = cpumask_first(cpumask);
if (first_cpu_of_numa_node >= nr_cpu_ids)
return -1;
#ifdef CONFIG_X86_64
return cpu_data(first_cpu_of_numa_node).apicid;
#else
return first_cpu_of_numa_node;
#endif
}
/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
* of the given NUMA node (numa_node_id)
* Return -1 on failure
*/
int kfd_numa_node_to_apic_id(int numa_node_id)
{
if (numa_node_id == -1) {
pr_warn("Invalid NUMA Node. Use online CPU mask\n");
return kfd_cpumask_to_apic_id(cpu_online_mask);
}
return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
}
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
{
struct kfd_topology_device *dev;
unsigned int i = 0;
int r = 0;
down_read(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) {
if (!dev->gpu) {
i++;
continue;
}
seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
r = dqm_debugfs_hqds(m, dev->gpu->dqm);
if (r)
break;
}
up_read(&topology_lock);
return r;
}
int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
{
struct kfd_topology_device *dev;
unsigned int i = 0;
int r = 0;
down_read(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) {
if (!dev->gpu) {
i++;
continue;
}
seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr);
if (r)
break;
}
up_read(&topology_lock);
return r;
}
#endif
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_topology.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_kernel_queue.h"
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
{
union PM4_MES_TYPE_3_HEADER header;
header.u32All = 0;
header.opcode = opcode;
header.count = packet_size / 4 - 2;
header.type = PM4_TYPE_3;
return header.u32All;
}
static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
struct pm4_mes_map_process *packet;
packet = (struct pm4_mes_map_process *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
packet->bitfields3.page_table_base = qpd->page_table_base;
packet->bitfields10.gds_size = qpd->gds_size;
packet->bitfields10.num_gws = qpd->num_gws;
packet->bitfields10.num_oac = qpd->num_oac;
packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
return 0;
}
static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
uint64_t ib, size_t ib_size_in_dwords, bool chain)
{
struct pm4_mes_runlist *packet;
int concurrent_proc_cnt = 0;
struct kfd_node *kfd = pm->dqm->dev;
if (WARN_ON(!ib))
return -EFAULT;
/* Determine the number of processes to map together to HW:
* it can not exceed the number of VMIDs available to the
* scheduler, and it is determined by the smaller of the number
* of processes in the runlist and kfd module parameter
* hws_max_conc_proc.
* Note: the arbitration between the number of VMIDs and
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
concurrent_proc_cnt = min(pm->dqm->processes_count,
kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_runlist));
packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
sizeof(struct pm4_mes_runlist));
packet->bitfields4.ib_size = ib_size_in_dwords;
packet->bitfields4.chain = chain ? 1 : 0;
packet->bitfields4.offload_polling = 0;
packet->bitfields4.valid = 1;
packet->bitfields4.process_cnt = concurrent_proc_cnt;
packet->ordinal2 = lower_32_bits(ib);
packet->bitfields3.ib_base_hi = upper_32_bits(ib);
return 0;
}
static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
struct scheduling_resources *res)
{
struct pm4_mes_set_resources *packet;
packet = (struct pm4_mes_set_resources *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
sizeof(struct pm4_mes_set_resources));
packet->bitfields2.queue_type =
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
packet->gws_mask_lo = lower_32_bits(res->gws_mask);
packet->gws_mask_hi = upper_32_bits(res->gws_mask);
packet->queue_mask_lo = lower_32_bits(res->queue_mask);
packet->queue_mask_hi = upper_32_bits(res->queue_mask);
return 0;
}
static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
struct pm4_mes_map_queues *packet;
bool use_static = is_static;
packet = (struct pm4_mes_map_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
sizeof(struct pm4_mes_map_queues));
packet->bitfields2.num_queues = 1;
packet->bitfields2.queue_sel =
queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
packet->bitfields2.queue_type =
queue_type__mes_map_queues__normal_compute_vi;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_COMPUTE:
if (use_static)
packet->bitfields2.queue_type =
queue_type__mes_map_queues__normal_latency_static_queue_vi;
break;
case KFD_QUEUE_TYPE_DIQ:
packet->bitfields2.queue_type =
queue_type__mes_map_queues__debug_interface_queue_vi;
break;
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
use_static = false; /* no static queues under SDMA */
break;
default:
WARN(1, "queue type %d", q->properties.type);
return -EINVAL;
}
packet->bitfields3.doorbell_offset =
q->properties.doorbell_off;
packet->mqd_addr_lo =
lower_32_bits(q->gart_mqd_addr);
packet->mqd_addr_hi =
upper_32_bits(q->gart_mqd_addr);
packet->wptr_addr_lo =
lower_32_bits((uint64_t)q->properties.write_ptr);
packet->wptr_addr_hi =
upper_32_bits((uint64_t)q->properties.write_ptr);
return 0;
}
static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset)
{
struct pm4_mes_unmap_queues *packet;
packet = (struct pm4_mes_unmap_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues));
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__compute;
if (reset)
packet->bitfields2.action =
action__mes_unmap_queues__reset_queues;
else
packet->bitfields2.action =
action__mes_unmap_queues__preempt_queues;
switch (filter) {
case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
packet->bitfields3a.pasid = filter_param;
break;
case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_queues;
break;
case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
WARN(1, "filter %d", filter);
return -EINVAL;
}
return 0;
}
static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint64_t fence_value)
{
struct pm4_mes_query_status *packet;
packet = (struct pm4_mes_query_status *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_query_status));
packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
sizeof(struct pm4_mes_query_status));
packet->bitfields2.context_id = 0;
packet->bitfields2.interrupt_sel =
interrupt_sel__mes_query_status__completion_status;
packet->bitfields2.command =
command__mes_query_status__fence_only_after_write_ack;
packet->addr_hi = upper_32_bits((uint64_t)fence_address);
packet->addr_lo = lower_32_bits((uint64_t)fence_address);
packet->data_hi = upper_32_bits((uint64_t)fence_value);
packet->data_lo = lower_32_bits((uint64_t)fence_value);
return 0;
}
static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
{
struct pm4_mec_release_mem *packet;
packet = (struct pm4_mec_release_mem *)buffer;
memset(buffer, 0, sizeof(*packet));
packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
sizeof(*packet));
packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
packet->bitfields2.tcl1_action_ena = 1;
packet->bitfields2.tc_action_ena = 1;
packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
packet->bitfields2.atc = 0;
packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low;
packet->bitfields3.int_sel =
int_sel___release_mem__send_interrupt_after_write_confirm;
packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
packet->address_hi = upper_32_bits(gpu_addr);
packet->data_lo = 0;
return 0;
}
const struct packet_manager_funcs kfd_vi_pm_funcs = {
.map_process = pm_map_process_vi,
.runlist = pm_runlist_vi,
.set_resources = pm_set_resources_vi,
.map_queues = pm_map_queues_vi,
.unmap_queues = pm_unmap_queues_vi,
.set_grace_period = NULL,
.query_status = pm_query_status_vi,
.release_mem = pm_release_mem_vi,
.map_process_size = sizeof(struct pm4_mes_map_process),
.runlist_size = sizeof(struct pm4_mes_runlist),
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
.set_grace_period_size = 0,
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = sizeof(struct pm4_mec_release_mem)
};
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfd_priv.h"
#include "kfd_events.h"
#include "kfd_debug.h"
#include "soc15_int.h"
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
/*
* GFX9 SQ Interrupts
*
* There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit
* packet to the Interrupt Handler:
* Auto - Generated by the SQG (various cmd overflows, timestamps etc)
* Wave - Generated by S_SENDMSG through a shader program
* Error - HW generated errors (Illegal instructions, Memviols, EDC etc)
*
* The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus
* 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such:
*
* - context_id0[27:26]
* Encoding type (0 = Auto, 1 = Wave, 2 = Error)
*
* - context_id0[13]
* PRIV bit indicates that Wave S_SEND or error occurred within trap
*
* - {context_id1[7:0],context_id0[31:28],context_id0[11:0]}
* 24-bit data with the following layout per encoding type:
* Auto - only context_id0[8:0] is used, which reports various interrupts
* generated by SQG. The rest is 0.
* Wave - user data sent from m0 via S_SENDMSG
* Error - Error type (context_id1[7:4]), Error Details (rest of bits)
*
* The other context_id bits show coordinates (SE/SH/CU/SIMD/WAVE) for wave
* S_SENDMSG and Errors. These are 0 for Auto.
*/
enum SQ_INTERRUPT_WORD_ENCODING {
SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
SQ_INTERRUPT_WORD_ENCODING_INST,
SQ_INTERRUPT_WORD_ENCODING_ERROR,
};
enum SQ_INTERRUPT_ERROR_TYPE {
SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
};
/* SQ_INTERRUPT_WORD_AUTO_CTXID */
#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0
#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 1
#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 2
#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 3
#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 4
#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 5
#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 6
#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 7
#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 8
#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 24
#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 26
#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x00000001
#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x00000002
#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x00000004
#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x00000008
#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x00000010
#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x00000020
#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x00000040
#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x00000080
#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x00000100
#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x03000000
#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0x0c000000
/* SQ_INTERRUPT_WORD_WAVE_CTXID */
#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0
#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 12
#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 13
#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 14
#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 18
#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 20
#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 24
#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 26
#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x00000fff
#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x00001000
#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x00002000
#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x0003c000
#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x000c0000
#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x00f00000
#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000
#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000
/* GFX9 SQ interrupt 24-bit data from context_id<0,1> */
#define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \
((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000))
#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
/*
* The debugger will send user data(m0) with PRIV=1 to indicate it requires
* notification from the KFD with the following queue id (DOORBELL_ID) and
* trap code (TRAP_CODE).
*/
#define KFD_INT_DATA_DEBUG_DOORBELL_MASK 0x0003ff
#define KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT 10
#define KFD_INT_DATA_DEBUG_TRAP_CODE_MASK 0x07fc00
#define KFD_DEBUG_DOORBELL_ID(sq_int_data) ((sq_int_data) & \
KFD_INT_DATA_DEBUG_DOORBELL_MASK)
#define KFD_DEBUG_TRAP_CODE(sq_int_data) (((sq_int_data) & \
KFD_INT_DATA_DEBUG_TRAP_CODE_MASK) \
>> KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT)
#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00
#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10
#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \
KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
>> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
int old_poison, ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return;
/* all queues of a process will be unmapped in one time */
old_poison = atomic_cmpxchg(&p->poison, 0, 1);
kfd_unref_process(p);
if (old_poison)
return;
switch (client_id) {
case SOC15_IH_CLIENTID_SE0SH:
case SOC15_IH_CLIENTID_SE1SH:
case SOC15_IH_CLIENTID_SE2SH:
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
case SOC15_IH_CLIENTID_SDMA2:
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
break;
default:
break;
}
kfd_signal_poison_consumed_event(dev, pasid);
/* resetting queue passes, do page retirement without gpu reset
* resetting queue fails, fallback to gpu reset solution
*/
if (!ret) {
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
} else {
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
}
}
static bool context_id_expected(struct kfd_dev *dev)
{
switch (KFD_GC_VERSION(dev)) {
case IP_VERSION(9, 0, 1):
return dev->mec_fw_version >= 0x817a;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
return dev->mec_fw_version >= 0x17a;
default:
/* Other GFXv9 and later GPUs always sent valid context IDs
* on legitimate events
*/
return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 1);
}
}
static bool event_interrupt_isr_v9(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
{
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle interrupts from KFD VMIDs */
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
(vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd))
return false;
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle clients we care about */
if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
client_id != SOC15_IH_CLIENTID_SDMA0 &&
client_id != SOC15_IH_CLIENTID_SDMA1 &&
client_id != SOC15_IH_CLIENTID_SDMA2 &&
client_id != SOC15_IH_CLIENTID_SDMA3 &&
client_id != SOC15_IH_CLIENTID_SDMA4 &&
client_id != SOC15_IH_CLIENTID_SDMA5 &&
client_id != SOC15_IH_CLIENTID_SDMA6 &&
client_id != SOC15_IH_CLIENTID_SDMA7 &&
client_id != SOC15_IH_CLIENTID_VMC &&
client_id != SOC15_IH_CLIENTID_VMC1 &&
client_id != SOC15_IH_CLIENTID_UTCL2 &&
client_id != SOC15_IH_CLIENTID_SE0SH &&
client_id != SOC15_IH_CLIENTID_SE1SH &&
client_id != SOC15_IH_CLIENTID_SE2SH &&
client_id != SOC15_IH_CLIENTID_SE3SH &&
!KFD_IRQ_IS_FENCE(client_id, source_id))
return false;
/* This is a known issue for gfx9. Under non HWS, pasid is not set
* in the interrupt payload, so we need to find out the pasid on our
* own.
*/
if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
const uint32_t pasid_mask = 0xffff;
*patched_flag = true;
memcpy(patched_ihre, ih_ring_entry,
dev->kfd->device_info.ih_ring_entry_size);
pasid = dev->dqm->vmid_pasid[vmid];
/* Patch the pasid field */
patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
& ~pasid_mask) | pasid);
}
pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
client_id, source_id, vmid, pasid);
pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
return false;
/* Workaround CP firmware sending bogus signals with 0 context_id.
* Those can be safely ignored on hardware and firmware versions that
* include a valid context_id on legitimate signals. This avoids the
* slow path in kfd_signal_event_interrupt that scans all event slots
* for signaled events.
*/
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) {
uint32_t context_id =
SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
if (context_id == 0 && context_id_expected(dev->kfd))
return false;
}
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
*/
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP ||
source_id == SOC15_INTSRC_SDMA_ECC ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
KFD_IRQ_IS_FENCE(client_id, source_id) ||
((client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) &&
!amdgpu_no_queue_eviction_on_vm_fault);
}
static void event_interrupt_wq_v9(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, pasid, vmid;
uint32_t context_id0, context_id1;
uint32_t sq_intr_err, sq_int_data, encoding;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
client_id == SOC15_IH_CLIENTID_SE0SH ||
client_id == SOC15_IH_CLIENTID_SE1SH ||
client_id == SOC15_IH_CLIENTID_SE2SH ||
client_id == SOC15_IH_CLIENTID_SE3SH) {
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
sq_int_data = KFD_CONTEXT_ID_GET_SQ_INT_DATA(context_id0, context_id1);
encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
pr_debug(
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
sq_int_data);
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(sq_int_data),
KFD_DEBUG_TRAP_CODE(sq_int_data),
NULL, 0))
return;
}
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}
break;
default:
break;
}
kfd_signal_event_interrupt(pasid, sq_int_data, 24);
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
NULL, 0);
}
} else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
client_id == SOC15_IH_CLIENTID_SDMA1 ||
client_id == SOC15_IH_CLIENTID_SDMA2 ||
client_id == SOC15_IH_CLIENTID_SDMA3 ||
client_id == SOC15_IH_CLIENTID_SDMA4 ||
client_id == SOC15_IH_CLIENTID_SDMA5 ||
client_id == SOC15_IH_CLIENTID_SDMA6 ||
client_id == SOC15_IH_CLIENTID_SDMA7) {
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
struct kfd_hsa_memory_exception_data exception_data;
if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}
info.vmid = vmid;
info.mc_id = client_id;
info.page_addr = ih_ring_entry[4] |
(uint64_t)(ih_ring_entry[5] & 0xf) << 32;
info.prot_valid = ring_id & 0x08;
info.prot_read = ring_id & 0x10;
info.prot_write = ring_id & 0x20;
memset(&exception_data, 0, sizeof(exception_data));
exception_data.gpu_id = dev->id;
exception_data.va = (info.page_addr) << PAGE_SHIFT;
exception_data.failure.NotPresent = info.prot_valid ? 1 : 0;
exception_data.failure.NoExecute = info.prot_exec ? 1 : 0;
exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
exception_data.failure.imprecise = 0;
kfd_set_dbg_ev_from_interrupt(dev,
pasid,
-1,
KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
&exception_data,
sizeof(exception_data));
kfd_smi_event_update_vmfault(dev, pasid);
} else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
kfd_process_close_interrupt_drain(pasid);
}
}
static bool event_interrupt_isr_v9_4_3(struct kfd_node *node,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
{
uint16_t node_id, vmid;
/*
* For GFX 9.4.3, process the interrupt if:
* - NodeID field in IH entry matches the corresponding bit
* set in interrupt_bitmap Bits 0-15.
* OR
* - If partition mode is CPX and interrupt came from
* Node_id 0,4,8,12, then check if the Bit (16 + client id)
* is set in interrupt bitmap Bits 16-31.
*/
node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
if (kfd_irq_is_from_node(node, node_id, vmid))
return event_interrupt_isr_v9(node, ih_ring_entry,
patched_ihre, patched_flag);
return false;
}
const struct kfd_event_interrupt_class event_interrupt_class_v9 = {
.interrupt_isr = event_interrupt_isr_v9,
.interrupt_wq = event_interrupt_wq_v9,
};
const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3 = {
.interrupt_isr = event_interrupt_isr_v9_4_3,
.interrupt_wq = event_interrupt_wq_v9,
};
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
#include "kfd_priv.h"
#include "amdgpu_ids.h"
static unsigned int pasid_bits = 16;
static bool pasids_allocated; /* = false */
bool kfd_set_pasid_limit(unsigned int new_limit)
{
if (new_limit < 2)
return false;
if (new_limit < (1U << pasid_bits)) {
if (pasids_allocated)
/* We've already allocated user PASIDs, too late to
* change the limit
*/
return false;
while (new_limit < (1U << pasid_bits))
pasid_bits--;
}
return true;
}
unsigned int kfd_get_pasid_limit(void)
{
return 1U << pasid_bits;
}
u32 kfd_pasid_alloc(void)
{
int r = amdgpu_pasid_alloc(pasid_bits);
if (r > 0) {
pasids_allocated = true;
return r;
}
return 0;
}
void kfd_pasid_free(u32 pasid)
{
amdgpu_pasid_free(pasid);
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_pasid.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_device_queue_manager.h"
#include "vega10_enum.h"
#include "gc/gc_9_4_3_sh_mask.h"
static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
void device_queue_manager_init_v9(
struct device_queue_manager_asic_ops *asic_ops)
{
asic_ops->update_qpd = update_qpd_v9;
asic_ops->init_sdma_vm = init_sdma_vm_v9;
asic_ops->mqd_manager_init = mqd_manager_init_v9;
}
static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
{
uint32_t shared_base = pdd->lds_base >> 48;
uint32_t private_base = pdd->scratch_base >> 48;
return (shared_base << SH_MEM_BASES__SHARED_BASE__SHIFT) |
private_base;
}
static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
if (dqm->dev->kfd->noretry)
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3))
qpd->sh_mem_config |=
(1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) {
if (!pdd->process->xnack_enabled)
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
else
qpd->sh_mem_config &= ~(1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT);
}
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
qpd->sh_mem_config);
return 0;
}
static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
/* Not needed on SDMAv4 any more */
q->properties.sdma_vm_addr = 0;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/slab.h>
#include <linux/list.h>
#include "kfd_device_queue_manager.h"
#include "kfd_priv.h"
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
static inline struct process_queue_node *get_queue_by_qid(
struct process_queue_manager *pqm, unsigned int qid)
{
struct process_queue_node *pqn;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if ((pqn->q && pqn->q->properties.queue_id == qid) ||
(pqn->kq && pqn->kq->queue->properties.queue_id == qid))
return pqn;
}
return NULL;
}
static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
unsigned int qid)
{
if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
return -EINVAL;
if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
return -ENOSPC;
}
return 0;
}
static int find_available_queue_slot(struct process_queue_manager *pqm,
unsigned int *qid)
{
unsigned long found;
found = find_first_zero_bit(pqm->queue_slot_bitmap,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("Cannot open more queues for process with pasid 0x%x\n",
pqm->process->pasid);
return -ENOMEM;
}
set_bit(found, pqm->queue_slot_bitmap);
*qid = found;
return 0;
}
void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
{
struct kfd_node *dev = pdd->dev;
if (pdd->already_dequeued)
return;
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
pdd->already_dequeued = true;
}
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws)
{
struct kfd_node *dev = NULL;
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
struct kgd_mem *mem = NULL;
int ret;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
pr_err("Queue id does not match any known queue\n");
return -EINVAL;
}
if (pqn->q)
dev = pqn->q->device;
if (WARN_ON(!dev))
return -ENODEV;
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return -EINVAL;
}
/* Only allow one queue per process can have GWS assigned */
if (gws && pdd->qpd.num_gws)
return -EBUSY;
if (!gws && pdd->qpd.num_gws == 0)
return -EINVAL;
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && !dev->kfd->shared_resources.enable_mes) {
if (gws)
ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
gws, &mem);
else
ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
pqn->q->gws);
if (unlikely(ret))
return ret;
pqn->q->gws = mem;
} else {
/*
* Intentionally set GWS to a non-NULL value
* for devices that do not use GWS for global wave
* synchronization but require the formality
* of setting GWS for cooperative groups.
*/
pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
}
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, NULL);
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
{
int i;
for (i = 0; i < p->n_pdds; i++)
kfd_process_dequeue_from_device(p->pdds[i]);
}
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
GFP_KERNEL);
if (!pqm->queue_slot_bitmap)
return -ENOMEM;
pqm->process = p;
return 0;
}
void pqm_uninit(struct process_queue_manager *pqm)
{
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
if (pqn->q && pqn->q->gws &&
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
!pqn->q->device->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
pqn->q->gws);
kfd_procfs_del_queue(pqn->q);
uninit_queue(pqn->q);
list_del(&pqn->process_queue_list);
kfree(pqn);
}
bitmap_free(pqm->queue_slot_bitmap);
pqm->queue_slot_bitmap = NULL;
}
static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_node *dev, struct queue **q,
struct queue_properties *q_properties,
struct file *f, struct amdgpu_bo *wptr_bo,
unsigned int qid)
{
int retval;
/* Doorbell initialized in user space*/
q_properties->doorbell_ptr = NULL;
q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
/* let DQM handle it*/
q_properties->vmid = 0;
q_properties->queue_id = qid;
retval = init_queue(q, q_properties);
if (retval != 0)
return retval;
(*q)->device = dev;
(*q)->process = pqm->process;
if (dev->kfd->shared_resources.enable_mes) {
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
AMDGPU_MES_GANG_CTX_SIZE,
&(*q)->gang_ctx_bo,
&(*q)->gang_ctx_gpu_addr,
&(*q)->gang_ctx_cpu_ptr,
false);
if (retval) {
pr_err("failed to allocate gang context bo\n");
goto cleanup;
}
memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
(*q)->wptr_bo = wptr_bo;
}
pr_debug("PQM After init queue");
return 0;
cleanup:
uninit_queue(*q);
*q = NULL;
return retval;
}
int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_node *dev,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
struct amdgpu_bo *wptr_bo,
const struct kfd_criu_queue_priv_data *q_data,
const void *restore_mqd,
const void *restore_ctl_stack,
uint32_t *p_doorbell_offset_in_process)
{
int retval;
struct kfd_process_device *pdd;
struct queue *q;
struct process_queue_node *pqn;
struct kernel_queue *kq;
enum kfd_queue_type type = properties->type;
unsigned int max_queues = 127; /* HWS limit */
/*
* On GFX 9.4.3, increase the number of queues that
* can be created to 255. No HWS limit on GFX 9.4.3.
*/
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))
max_queues = 255;
q = NULL;
kq = NULL;
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return -1;
}
/*
* for debug process, verify that it is within the static queues limit
* currently limit is set to half of the total avail HQD slots
* If we are just about to create DIQ, the is_debug flag is not set yet
* Hence we also check the type as well
*/
if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
max_queues = dev->kfd->device_info.max_no_of_hqd/2;
if (pdd->qpd.queue_count >= max_queues)
return -ENOSPC;
if (q_data) {
retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
*qid = q_data->q_id;
} else
retval = find_available_queue_slot(pqm, qid);
if (retval != 0)
return retval;
if (list_empty(&pdd->qpd.queues_list) &&
list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
retval = -ENOMEM;
goto err_allocate_pqn;
}
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
/* SDMA queues are always allocated statically no matter
* which scheduler mode is used. We also do not need to
* check whether a SDMA queue can be allocated here, because
* allocate_sdma_queue() in create_queue() has the
* corresponding check logic.
*/
retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
restore_mqd, restore_ctl_stack);
print_queue(q);
break;
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((dev->dqm->sched_policy ==
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
restore_mqd, restore_ctl_stack);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
if (!kq) {
retval = -ENOMEM;
goto err_create_queue;
}
kq->queue->properties.queue_id = *qid;
pqn->kq = kq;
pqn->q = NULL;
retval = kfd_process_drain_interrupts(pdd);
if (retval)
break;
retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
kq, &pdd->qpd);
break;
default:
WARN(1, "Invalid queue type %d", type);
retval = -EINVAL;
}
if (retval != 0) {
pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
if (q && p_doorbell_offset_in_process) {
/* Return the doorbell offset within the doorbell page
* to the caller so it can be passed up to user mode
* (in bytes).
* relative doorbell index = Absolute doorbell index -
* absolute index of first doorbell in the page.
*/
uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
pdd->qpd.proc_doorbells,
0);
*p_doorbell_offset_in_process = (q->properties.doorbell_off
- first_db_index) * sizeof(uint32_t);
}
pr_debug("PQM After DQM create queue\n");
list_add(&pqn->process_queue_list, &pqm->queues);
if (q) {
pr_debug("PQM done creating queue\n");
kfd_procfs_add_queue(q);
print_queue_properties(&q->properties);
}
return retval;
err_create_queue:
uninit_queue(q);
if (kq)
kernel_queue_uninit(kq, false);
kfree(pqn);
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
if (list_empty(&pdd->qpd.queues_list) &&
list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
{
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
struct device_queue_manager *dqm;
struct kfd_node *dev;
int retval;
dqm = NULL;
retval = 0;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
pr_err("Queue id does not match any known queue\n");
return -EINVAL;
}
dev = NULL;
if (pqn->kq)
dev = pqn->kq->dev;
if (pqn->q)
dev = pqn->q->device;
if (WARN_ON(!dev))
return -ENODEV;
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return -1;
}
if (pqn->kq) {
/* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm;
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
kernel_queue_uninit(pqn->kq, false);
}
if (pqn->q) {
kfd_procfs_del_queue(pqn->q);
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pqm->process->pasid,
pqn->q->properties.queue_id, retval);
if (retval != -ETIME)
goto err_destroy_queue;
}
if (pqn->q->gws) {
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
!dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(
pqm->process->kgd_process_info,
pqn->q->gws);
pdd->qpd.num_gws = 0;
}
if (dev->kfd->shared_resources.enable_mes) {
amdgpu_amdkfd_free_gtt_mem(dev->adev,
pqn->q->gang_ctx_bo);
if (pqn->q->wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
}
uninit_queue(pqn->q);
}
list_del(&pqn->process_queue_list);
kfree(pqn);
clear_bit(qid, pqm->queue_slot_bitmap);
if (list_empty(&pdd->qpd.queues_list) &&
list_empty(&pdd->qpd.priv_queue_list))
dqm->ops.unregister_process(dqm, &pdd->qpd);
err_destroy_queue:
return retval;
}
int pqm_update_queue_properties(struct process_queue_manager *pqm,
unsigned int qid, struct queue_properties *p)
{
int retval;
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
pr_debug("No queue %d exists for update operation\n", qid);
return -EFAULT;
}
pqn->q->properties.queue_address = p->queue_address;
pqn->q->properties.queue_size = p->queue_size;
pqn->q->properties.queue_percent = p->queue_percent;
pqn->q->properties.priority = p->priority;
pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, NULL);
if (retval != 0)
return retval;
return 0;
}
int pqm_update_mqd(struct process_queue_manager *pqm,
unsigned int qid, struct mqd_update_info *minfo)
{
int retval;
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
pr_debug("No queue %d exists for update operation\n", qid);
return -EFAULT;
}
/* CUs are masked for debugger requirements so deny user mask */
if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
return -EBUSY;
/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
if (minfo && minfo->cu_mask.ptr &&
KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
int i;
for (i = 0; i < minfo->cu_mask.count; i += 2) {
uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
if (cu_pair && cu_pair != 0x3) {
pr_debug("CUs must be adjacent pairwise enabled.\n");
return -EINVAL;
}
}
}
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, minfo);
if (retval != 0)
return retval;
if (minfo && minfo->cu_mask.ptr)
pqn->q->properties.is_user_cu_masked = true;
return 0;
}
struct kernel_queue *pqm_get_kernel_queue(
struct process_queue_manager *pqm,
unsigned int qid)
{
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
if (pqn && pqn->kq)
return pqn->kq;
return NULL;
}
struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
unsigned int qid)
{
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
return pqn ? pqn->q : NULL;
}
int pqm_get_wave_state(struct process_queue_manager *pqm,
unsigned int qid,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
pr_debug("amdkfd: No queue %d exists for operation\n",
qid);
return -EFAULT;
}
return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
pqn->q,
ctl_stack,
ctl_stack_used_size,
save_area_used_size);
}
int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
uint64_t exception_clear_mask,
void __user *buf,
int *num_qss_entries,
uint32_t *entry_size)
{
struct process_queue_node *pqn;
struct kfd_queue_snapshot_entry src;
uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
int r = 0;
*num_qss_entries = 0;
if (!(*entry_size))
return -EINVAL;
*entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
mutex_lock(&pqm->process->event_mutex);
memset(&src, 0, sizeof(src));
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if (!pqn->q)
continue;
if (*num_qss_entries < tmp_qss_entries) {
set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
if (copy_to_user(buf, &src, *entry_size)) {
r = -EFAULT;
break;
}
buf += tmp_entry_size;
}
*num_qss_entries += 1;
}
mutex_unlock(&pqm->process->event_mutex);
return r;
}
static int get_queue_data_sizes(struct kfd_process_device *pdd,
struct queue *q,
uint32_t *mqd_size,
uint32_t *ctl_stack_size)
{
int ret;
ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
q->properties.queue_id,
mqd_size,
ctl_stack_size);
if (ret)
pr_err("Failed to get queue dump info (%d)\n", ret);
return ret;
}
int kfd_process_get_queue_info(struct kfd_process *p,
uint32_t *num_queues,
uint64_t *priv_data_sizes)
{
uint32_t extra_data_sizes = 0;
struct queue *q;
int i;
int ret;
*num_queues = 0;
/* Run over all PDDs of the process */
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
list_for_each_entry(q, &pdd->qpd.queues_list, list) {
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
uint32_t mqd_size, ctl_stack_size;
*num_queues = *num_queues + 1;
ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
if (ret)
return ret;
extra_data_sizes += mqd_size + ctl_stack_size;
} else {
pr_err("Unsupported queue type (%d)\n", q->properties.type);
return -EOPNOTSUPP;
}
}
}
*priv_data_sizes = extra_data_sizes +
(*num_queues * sizeof(struct kfd_criu_queue_priv_data));
return 0;
}
static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
unsigned int qid,
void *mqd,
void *ctl_stack)
{
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
pr_debug("amdkfd: No queue %d exists for operation\n", qid);
return -EFAULT;
}
if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
pr_err("amdkfd: queue dumping not supported on this device\n");
return -EOPNOTSUPP;
}
return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
pqn->q, mqd, ctl_stack);
}
static int criu_checkpoint_queue(struct kfd_process_device *pdd,
struct queue *q,
struct kfd_criu_queue_priv_data *q_data)
{
uint8_t *mqd, *ctl_stack;
int ret;
mqd = (void *)(q_data + 1);
ctl_stack = mqd + q_data->mqd_size;
q_data->gpu_id = pdd->user_gpu_id;
q_data->type = q->properties.type;
q_data->format = q->properties.format;
q_data->q_id = q->properties.queue_id;
q_data->q_address = q->properties.queue_address;
q_data->q_size = q->properties.queue_size;
q_data->priority = q->properties.priority;
q_data->q_percent = q->properties.queue_percent;
q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
q_data->doorbell_id = q->doorbell_id;
q_data->sdma_id = q->sdma_id;
q_data->eop_ring_buffer_address =
q->properties.eop_ring_buffer_address;
q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
q_data->ctx_save_restore_area_address =
q->properties.ctx_save_restore_area_address;
q_data->ctx_save_restore_area_size =
q->properties.ctx_save_restore_area_size;
q_data->gws = !!q->gws;
ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
if (ret) {
pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
return ret;
}
pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
return ret;
}
static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
uint8_t __user *user_priv,
unsigned int *q_index,
uint64_t *queues_priv_data_offset)
{
unsigned int q_private_data_size = 0;
uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
struct queue *q;
int ret = 0;
list_for_each_entry(q, &pdd->qpd.queues_list, list) {
struct kfd_criu_queue_priv_data *q_data;
uint64_t q_data_size;
uint32_t mqd_size;
uint32_t ctl_stack_size;
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
q->properties.type != KFD_QUEUE_TYPE_SDMA &&
q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
pr_err("Unsupported queue type (%d)\n", q->properties.type);
ret = -EOPNOTSUPP;
break;
}
ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
if (ret)
break;
q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
/* Increase local buffer space if needed */
if (q_private_data_size < q_data_size) {
kfree(q_private_data);
q_private_data = kzalloc(q_data_size, GFP_KERNEL);
if (!q_private_data) {
ret = -ENOMEM;
break;
}
q_private_data_size = q_data_size;
}
q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
/* data stored in this order: priv_data, mqd, ctl_stack */
q_data->mqd_size = mqd_size;
q_data->ctl_stack_size = ctl_stack_size;
ret = criu_checkpoint_queue(pdd, q, q_data);
if (ret)
break;
q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
ret = copy_to_user(user_priv + *queues_priv_data_offset,
q_data, q_data_size);
if (ret) {
ret = -EFAULT;
break;
}
*queues_priv_data_offset += q_data_size;
*q_index = *q_index + 1;
}
kfree(q_private_data);
return ret;
}
int kfd_criu_checkpoint_queues(struct kfd_process *p,
uint8_t __user *user_priv_data,
uint64_t *priv_data_offset)
{
int ret = 0, pdd_index, q_index = 0;
for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
struct kfd_process_device *pdd = p->pdds[pdd_index];
/*
* criu_checkpoint_queues_device will copy data to user and update q_index and
* queues_priv_data_offset
*/
ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
priv_data_offset);
if (ret)
break;
}
return ret;
}
static void set_queue_properties_from_criu(struct queue_properties *qp,
struct kfd_criu_queue_priv_data *q_data)
{
qp->is_interop = false;
qp->queue_percent = q_data->q_percent;
qp->priority = q_data->priority;
qp->queue_address = q_data->q_address;
qp->queue_size = q_data->q_size;
qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
qp->ctl_stack_size = q_data->ctl_stack_size;
qp->type = q_data->type;
qp->format = q_data->format;
}
int kfd_criu_restore_queue(struct kfd_process *p,
uint8_t __user *user_priv_ptr,
uint64_t *priv_data_offset,
uint64_t max_priv_data_size)
{
uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
struct kfd_criu_queue_priv_data *q_data;
struct kfd_process_device *pdd;
uint64_t q_extra_data_size;
struct queue_properties qp;
unsigned int queue_id;
int ret = 0;
if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
return -EINVAL;
q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
if (!q_data)
return -ENOMEM;
ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
if (ret) {
ret = -EFAULT;
goto exit;
}
*priv_data_offset += sizeof(*q_data);
q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
ret = -EINVAL;
goto exit;
}
q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
if (!q_extra_data) {
ret = -ENOMEM;
goto exit;
}
ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
if (ret) {
ret = -EFAULT;
goto exit;
}
*priv_data_offset += q_extra_data_size;
pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
if (!pdd) {
pr_err("Failed to get pdd\n");
ret = -EINVAL;
goto exit;
}
/* data stored in this order: mqd, ctl_stack */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
memset(&qp, 0, sizeof(qp));
set_queue_properties_from_criu(&qp, q_data);
print_queue_properties(&qp);
ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
goto exit;
}
if (q_data->gws)
ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
exit:
if (ret)
pr_err("Failed to restore queue (%d)\n", ret);
else
pr_debug("Queue id %d was restored successfully\n", queue_id);
kfree(q_data);
return ret;
}
int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
unsigned int qid,
uint32_t *mqd_size,
uint32_t *ctl_stack_size)
{
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
pr_debug("amdkfd: No queue %d exists for operation\n", qid);
return -EFAULT;
}
if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
pr_err("amdkfd: queue dumping not supported on this device\n");
return -EOPNOTSUPP;
}
pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
pqn->q, mqd_size,
ctl_stack_size);
return 0;
}
#if defined(CONFIG_DEBUG_FS)
int pqm_debugfs_mqds(struct seq_file *m, void *data)
{
struct process_queue_manager *pqm = data;
struct process_queue_node *pqn;
struct queue *q;
enum KFD_MQD_TYPE mqd_type;
struct mqd_manager *mqd_mgr;
int r = 0, xcc, num_xccs = 1;
void *mqd;
uint64_t size = 0;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if (pqn->q) {
q = pqn->q;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
seq_printf(m, " SDMA queue on device %x\n",
q->device->id);
mqd_type = KFD_MQD_TYPE_SDMA;
break;
case KFD_QUEUE_TYPE_COMPUTE:
seq_printf(m, " Compute queue on device %x\n",
q->device->id);
mqd_type = KFD_MQD_TYPE_CP;
num_xccs = NUM_XCC(q->device->xcc_mask);
break;
default:
seq_printf(m,
" Bad user queue type %d on device %x\n",
q->properties.type, q->device->id);
continue;
}
mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
size = mqd_mgr->mqd_stride(mqd_mgr,
&q->properties);
} else if (pqn->kq) {
q = pqn->kq->queue;
mqd_mgr = pqn->kq->mqd_mgr;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_DIQ:
seq_printf(m, " DIQ on device %x\n",
pqn->kq->dev->id);
break;
default:
seq_printf(m,
" Bad kernel queue type %d on device %x\n",
q->properties.type,
pqn->kq->dev->id);
continue;
}
} else {
seq_printf(m,
" Weird: Queue node with neither kernel nor user queue\n");
continue;
}
for (xcc = 0; xcc < num_xccs; xcc++) {
mqd = q->mqd + size * xcc;
r = mqd_mgr->debugfs_show_mqd(m, mqd);
if (r != 0)
break;
}
}
return r;
}
#endif
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2015-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/acpi.h>
#include "kfd_crat.h"
#include "kfd_priv.h"
#include "kfd_topology.h"
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
* GPU processor ID are expressed with Bit[31]=1.
* The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
* used in the CRAT.
*/
static uint32_t gpu_processor_id_low = 0x80001000;
/* Return the next available gpu_processor_id and increment it for next GPU
* @total_cu_count - Total CUs present in the GPU including ones
* masked off
*/
static inline unsigned int get_and_inc_gpu_processor_id(
unsigned int total_cu_count)
{
int current_id = gpu_processor_id_low;
gpu_processor_id_low += total_cu_count;
return current_id;
}
static struct kfd_gpu_cache_info kaveri_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache (in SQC module) per bank */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache (in SQC module) per bank */
.cache_size = 8,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
/* TODO: Add L2 Cache information */
};
static struct kfd_gpu_cache_info carrizo_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache (in SQC module) per bank */
.cache_size = 8,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 4,
},
{
/* Scalar L1 Data Cache (in SQC module) per bank. */
.cache_size = 4,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 4,
},
/* TODO: Add L2 Cache information */
};
#define hawaii_cache_info kaveri_cache_info
#define tonga_cache_info carrizo_cache_info
#define fiji_cache_info carrizo_cache_info
#define polaris10_cache_info carrizo_cache_info
#define polaris11_cache_info carrizo_cache_info
#define polaris12_cache_info carrizo_cache_info
#define vegam_cache_info carrizo_cache_info
/* NOTE: L1 cache information has been updated and L2/L3
* cache information has been added for Vega10 and
* newer ASICs. The unit for cache_size is KiB.
* In future, check & update cache details
* for every new ASIC is required.
*/
static struct kfd_gpu_cache_info vega10_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 16,
},
};
static struct kfd_gpu_cache_info raven_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 11,
},
};
static struct kfd_gpu_cache_info renoir_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
};
static struct kfd_gpu_cache_info vega12_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 5,
},
};
static struct kfd_gpu_cache_info vega20_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 3,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 8192,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 16,
},
};
static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 8192,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 14,
},
};
static struct kfd_gpu_cache_info navi10_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 10,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 10,
},
};
static struct kfd_gpu_cache_info vangogh_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
};
static struct kfd_gpu_cache_info navi14_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 12,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 12,
},
};
static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 10,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 10,
},
{
/* L3 Data Cache per GPU */
.cache_size = 128*1024,
.cache_level = 3,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 10,
},
};
static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 10,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 3072,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 10,
},
{
/* L3 Data Cache per GPU */
.cache_size = 96*1024,
.cache_level = 3,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 10,
},
};
static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
{
/* L3 Data Cache per GPU */
.cache_size = 32*1024,
.cache_level = 3,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
};
static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
{
/* L3 Data Cache per GPU */
.cache_size = 16*1024,
.cache_level = 3,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 8,
},
};
static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 6,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 6,
},
};
static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 256,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
};
static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 256,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
};
static struct kfd_gpu_cache_info dummy_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 6,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 6,
},
};
static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
dev->node_props.cpu_cores_count = cu->num_cpu_cores;
dev->node_props.cpu_core_id_base = cu->processor_id_low;
if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
cu->processor_id_low);
}
static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
dev->node_props.simd_id_base = cu->processor_id_low;
dev->node_props.simd_count = cu->num_simd_cores;
dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
dev->node_props.max_waves_per_simd = cu->max_waves_simd;
dev->node_props.wave_front_size = cu->wave_front_size;
dev->node_props.array_count = cu->array_count;
dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
dev->node_props.simd_per_cu = cu->num_simd_per_cu;
dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
}
/* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
* topology device present in the device_list
*/
static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
struct list_head *device_list)
{
struct kfd_topology_device *dev;
pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
cu->proximity_domain, cu->hsa_capability);
list_for_each_entry(dev, device_list, list) {
if (cu->proximity_domain == dev->proximity_domain) {
if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
kfd_populated_cu_info_cpu(dev, cu);
if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
kfd_populated_cu_info_gpu(dev, cu);
break;
}
}
return 0;
}
static struct kfd_mem_properties *
find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
struct kfd_topology_device *dev)
{
struct kfd_mem_properties *props;
list_for_each_entry(props, &dev->mem_props, list) {
if (props->heap_type == heap_type
&& props->flags == flags
&& props->width == width)
return props;
}
return NULL;
}
/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
* topology device present in the device_list
*/
static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
struct list_head *device_list)
{
struct kfd_mem_properties *props;
struct kfd_topology_device *dev;
uint32_t heap_type;
uint64_t size_in_bytes;
uint32_t flags = 0;
uint32_t width;
pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
mem->proximity_domain);
list_for_each_entry(dev, device_list, list) {
if (mem->proximity_domain == dev->proximity_domain) {
/* We're on GPU node */
if (dev->node_props.cpu_cores_count == 0) {
/* APU */
if (mem->visibility_type == 0)
heap_type =
HSA_MEM_HEAP_TYPE_FB_PRIVATE;
/* dGPU */
else
heap_type = mem->visibility_type;
} else
heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
flags |= HSA_MEM_FLAGS_NON_VOLATILE;
size_in_bytes =
((uint64_t)mem->length_high << 32) +
mem->length_low;
width = mem->width;
/* Multiple banks of the same type are aggregated into
* one. User mode doesn't care about multiple physical
* memory segments. It's managed as a single virtual
* heap for user mode.
*/
props = find_subtype_mem(heap_type, flags, width, dev);
if (props) {
props->size_in_bytes += size_in_bytes;
break;
}
props = kfd_alloc_struct(props);
if (!props)
return -ENOMEM;
props->heap_type = heap_type;
props->flags = flags;
props->size_in_bytes = size_in_bytes;
props->width = width;
dev->node_props.mem_banks_count++;
list_add_tail(&props->list, &dev->mem_props);
break;
}
}
return 0;
}
/* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
* topology device present in the device_list
*/
static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
struct list_head *device_list)
{
struct kfd_cache_properties *props;
struct kfd_topology_device *dev;
uint32_t id;
uint32_t total_num_of_cu;
id = cache->processor_id_low;
pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
list_for_each_entry(dev, device_list, list) {
total_num_of_cu = (dev->node_props.array_count *
dev->node_props.cu_per_simd_array);
/* Cache infomration in CRAT doesn't have proximity_domain
* information as it is associated with a CPU core or GPU
* Compute Unit. So map the cache using CPU core Id or SIMD
* (GPU) ID.
* TODO: This works because currently we can safely assume that
* Compute Units are parsed before caches are parsed. In
* future, remove this dependency
*/
if ((id >= dev->node_props.cpu_core_id_base &&
id <= dev->node_props.cpu_core_id_base +
dev->node_props.cpu_cores_count) ||
(id >= dev->node_props.simd_id_base &&
id < dev->node_props.simd_id_base +
total_num_of_cu)) {
props = kfd_alloc_struct(props);
if (!props)
return -ENOMEM;
props->processor_id_low = id;
props->cache_level = cache->cache_level;
props->cache_size = cache->cache_size;
props->cacheline_size = cache->cache_line_size;
props->cachelines_per_tag = cache->lines_per_tag;
props->cache_assoc = cache->associativity;
props->cache_latency = cache->cache_latency;
memcpy(props->sibling_map, cache->sibling_map,
CRAT_SIBLINGMAP_SIZE);
/* set the sibling_map_size as 32 for CRAT from ACPI */
props->sibling_map_size = CRAT_SIBLINGMAP_SIZE;
if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
props->cache_type |= HSA_CACHE_TYPE_DATA;
if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
props->cache_type |= HSA_CACHE_TYPE_CPU;
if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
props->cache_type |= HSA_CACHE_TYPE_HSACU;
dev->node_props.caches_count++;
list_add_tail(&props->list, &dev->cache_props);
break;
}
}
return 0;
}
/* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
* topology device present in the device_list
*/
static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
struct list_head *device_list)
{
struct kfd_iolink_properties *props = NULL, *props2;
struct kfd_topology_device *dev, *to_dev;
uint32_t id_from;
uint32_t id_to;
id_from = iolink->proximity_domain_from;
id_to = iolink->proximity_domain_to;
pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
id_from, id_to);
list_for_each_entry(dev, device_list, list) {
if (id_from == dev->proximity_domain) {
props = kfd_alloc_struct(props);
if (!props)
return -ENOMEM;
props->node_from = id_from;
props->node_to = id_to;
props->ver_maj = iolink->version_major;
props->ver_min = iolink->version_minor;
props->iolink_type = iolink->io_interface_type;
if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
props->weight = 20;
else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
props->weight = iolink->weight_xgmi;
else
props->weight = node_distance(id_from, id_to);
props->min_latency = iolink->minimum_latency;
props->max_latency = iolink->maximum_latency;
props->min_bandwidth = iolink->minimum_bandwidth_mbs;
props->max_bandwidth = iolink->maximum_bandwidth_mbs;
props->rec_transfer_size =
iolink->recommended_transfer_size;
dev->node_props.io_links_count++;
list_add_tail(&props->list, &dev->io_link_props);
break;
}
}
/* CPU topology is created before GPUs are detected, so CPU->GPU
* links are not built at that time. If a PCIe type is discovered, it
* means a GPU is detected and we are adding GPU->CPU to the topology.
* At this time, also add the corresponded CPU->GPU link if GPU
* is large bar.
* For xGMI, we only added the link with one direction in the crat
* table, add corresponded reversed direction link now.
*/
if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
to_dev = kfd_topology_device_by_proximity_domain_no_lock(id_to);
if (!to_dev)
return -ENODEV;
/* same everything but the other direction */
props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
if (!props2)
return -ENOMEM;
props2->node_from = id_to;
props2->node_to = id_from;
props2->kobj = NULL;
to_dev->node_props.io_links_count++;
list_add_tail(&props2->list, &to_dev->io_link_props);
}
return 0;
}
/* kfd_parse_subtype - parse subtypes and attach it to correct topology device
* present in the device_list
* @sub_type_hdr - subtype section of crat_image
* @device_list - list of topology devices present in this crat_image
*/
static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
struct list_head *device_list)
{
struct crat_subtype_computeunit *cu;
struct crat_subtype_memory *mem;
struct crat_subtype_cache *cache;
struct crat_subtype_iolink *iolink;
int ret = 0;
switch (sub_type_hdr->type) {
case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
cu = (struct crat_subtype_computeunit *)sub_type_hdr;
ret = kfd_parse_subtype_cu(cu, device_list);
break;
case CRAT_SUBTYPE_MEMORY_AFFINITY:
mem = (struct crat_subtype_memory *)sub_type_hdr;
ret = kfd_parse_subtype_mem(mem, device_list);
break;
case CRAT_SUBTYPE_CACHE_AFFINITY:
cache = (struct crat_subtype_cache *)sub_type_hdr;
ret = kfd_parse_subtype_cache(cache, device_list);
break;
case CRAT_SUBTYPE_TLB_AFFINITY:
/*
* For now, nothing to do here
*/
pr_debug("Found TLB entry in CRAT table (not processing)\n");
break;
case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
/*
* For now, nothing to do here
*/
pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
break;
case CRAT_SUBTYPE_IOLINK_AFFINITY:
iolink = (struct crat_subtype_iolink *)sub_type_hdr;
ret = kfd_parse_subtype_iolink(iolink, device_list);
break;
default:
pr_warn("Unknown subtype %d in CRAT\n",
sub_type_hdr->type);
}
return ret;
}
/* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
* create a kfd_topology_device and add in to device_list. Also parse
* CRAT subtypes and attach it to appropriate kfd_topology_device
* @crat_image - input image containing CRAT
* @device_list - [OUT] list of kfd_topology_device generated after
* parsing crat_image
* @proximity_domain - Proximity domain of the first device in the table
*
* Return - 0 if successful else -ve value
*/
int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
uint32_t proximity_domain)
{
struct kfd_topology_device *top_dev = NULL;
struct crat_subtype_generic *sub_type_hdr;
uint16_t node_id;
int ret = 0;
struct crat_header *crat_table = (struct crat_header *)crat_image;
uint16_t num_nodes;
uint32_t image_len;
if (!crat_image)
return -EINVAL;
if (!list_empty(device_list)) {
pr_warn("Error device list should be empty\n");
return -EINVAL;
}
num_nodes = crat_table->num_domains;
image_len = crat_table->length;
pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
for (node_id = 0; node_id < num_nodes; node_id++) {
top_dev = kfd_create_topology_device(device_list);
if (!top_dev)
break;
top_dev->proximity_domain = proximity_domain++;
}
if (!top_dev) {
ret = -ENOMEM;
goto err;
}
memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
CRAT_OEMTABLEID_LENGTH);
top_dev->oem_revision = crat_table->oem_revision;
sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
((char *)crat_image) + image_len) {
if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
ret = kfd_parse_subtype(sub_type_hdr, device_list);
if (ret)
break;
}
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
}
err:
if (ret)
kfd_release_topology_device_list(device_list);
return ret;
}
static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
struct kfd_gpu_cache_info *pcache_info)
{
struct amdgpu_device *adev = kdev->adev;
int i = 0;
/* TCP L1 Cache per CU */
if (adev->gfx.config.gc_tcp_l1_size) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcp_l1_size;
pcache_info[i].cache_level = 1;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[0].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
i++;
}
/* Scalar L1 Instruction Cache per SQC */
if (adev->gfx.config.gc_l1_instruction_cache_size_per_sqc) {
pcache_info[i].cache_size =
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
i++;
}
/* Scalar L1 Data Cache per SQC */
if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
i++;
}
/* GL1 Data Cache per SA */
if (adev->gfx.config.gc_gl1c_per_sa &&
adev->gfx.config.gc_gl1c_size_per_instance) {
pcache_info[i].cache_size = adev->gfx.config.gc_gl1c_per_sa *
adev->gfx.config.gc_gl1c_size_per_instance;
pcache_info[i].cache_level = 1;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
i++;
}
/* L2 Data Cache per GPU (Total Tex Cache) */
if (adev->gfx.config.gc_gl2c_per_gpu) {
pcache_info[i].cache_size = adev->gfx.config.gc_gl2c_per_gpu;
pcache_info[i].cache_level = 2;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
i++;
}
/* L3 Data Cache per GPU */
if (adev->gmc.mall_size) {
pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
pcache_info[i].cache_level = 3;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
i++;
}
return i;
}
int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{
int num_of_cache_types = 0;
switch (kdev->adev->asic_type) {
case CHIP_KAVERI:
*pcache_info = kaveri_cache_info;
num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
break;
case CHIP_HAWAII:
*pcache_info = hawaii_cache_info;
num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
break;
case CHIP_CARRIZO:
*pcache_info = carrizo_cache_info;
num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
break;
case CHIP_TONGA:
*pcache_info = tonga_cache_info;
num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
break;
case CHIP_FIJI:
*pcache_info = fiji_cache_info;
num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
break;
case CHIP_POLARIS10:
*pcache_info = polaris10_cache_info;
num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
break;
case CHIP_POLARIS11:
*pcache_info = polaris11_cache_info;
num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
break;
case CHIP_POLARIS12:
*pcache_info = polaris12_cache_info;
num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
break;
case CHIP_VEGAM:
*pcache_info = vegam_cache_info;
num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
break;
default:
switch (KFD_GC_VERSION(kdev)) {
case IP_VERSION(9, 0, 1):
*pcache_info = vega10_cache_info;
num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
break;
case IP_VERSION(9, 2, 1):
*pcache_info = vega12_cache_info;
num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
break;
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
*pcache_info = vega20_cache_info;
num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
break;
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
*pcache_info = aldebaran_cache_info;
num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
*pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
break;
case IP_VERSION(9, 3, 0):
*pcache_info = renoir_cache_info;
num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 1, 4):
*pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
break;
case IP_VERSION(10, 1, 1):
*pcache_info = navi14_cache_info;
num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
break;
case IP_VERSION(10, 3, 0):
*pcache_info = sienna_cichlid_cache_info;
num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
break;
case IP_VERSION(10, 3, 2):
*pcache_info = navy_flounder_cache_info;
num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
break;
case IP_VERSION(10, 3, 4):
*pcache_info = dimgrey_cavefish_cache_info;
num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
break;
case IP_VERSION(10, 3, 1):
*pcache_info = vangogh_cache_info;
num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
break;
case IP_VERSION(10, 3, 5):
*pcache_info = beige_goby_cache_info;
num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
break;
case IP_VERSION(10, 3, 3):
*pcache_info = yellow_carp_cache_info;
num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
break;
case IP_VERSION(10, 3, 6):
*pcache_info = gc_10_3_6_cache_info;
num_of_cache_types = ARRAY_SIZE(gc_10_3_6_cache_info);
break;
case IP_VERSION(10, 3, 7):
*pcache_info = gfx1037_cache_info;
num_of_cache_types = ARRAY_SIZE(gfx1037_cache_info);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
break;
default:
*pcache_info = dummy_cache_info;
num_of_cache_types = ARRAY_SIZE(dummy_cache_info);
pr_warn("dummy cache info is used temporarily and real cache info need update later.\n");
break;
}
}
return num_of_cache_types;
}
/* Memory required to create Virtual CRAT.
* Since there is no easy way to predict the amount of memory required, the
* following amount is allocated for GPU Virtual CRAT. This is
* expected to cover all known conditions. But to be safe additional check
* is put in the code to ensure we don't overwrite.
*/
#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
*
* @numa_node_id: CPU NUMA node id
* @avail_size: Available size in the memory
* @sub_type_hdr: Memory into which compute info will be filled in
*
* Return 0 if successful else return -ve value
*/
static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
int proximity_domain,
struct crat_subtype_computeunit *sub_type_hdr)
{
const struct cpumask *cpumask;
*avail_size -= sizeof(struct crat_subtype_computeunit);
if (*avail_size < 0)
return -ENOMEM;
memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
/* Fill in subtype header data */
sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
cpumask = cpumask_of_node(numa_node_id);
/* Fill in CU data */
sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
sub_type_hdr->proximity_domain = proximity_domain;
sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
if (sub_type_hdr->processor_id_low == -1)
return -EINVAL;
sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
return 0;
}
/* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
*
* @numa_node_id: CPU NUMA node id
* @avail_size: Available size in the memory
* @sub_type_hdr: Memory into which compute info will be filled in
*
* Return 0 if successful else return -ve value
*/
static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
int proximity_domain,
struct crat_subtype_memory *sub_type_hdr)
{
uint64_t mem_in_bytes = 0;
pg_data_t *pgdat;
int zone_type;
*avail_size -= sizeof(struct crat_subtype_memory);
if (*avail_size < 0)
return -ENOMEM;
memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
/* Fill in subtype header data */
sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
sub_type_hdr->length = sizeof(struct crat_subtype_memory);
sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
/* Fill in Memory Subunit data */
/* Unlike si_meminfo, si_meminfo_node is not exported. So
* the following lines are duplicated from si_meminfo_node
* function
*/
pgdat = NODE_DATA(numa_node_id);
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
mem_in_bytes <<= PAGE_SHIFT;
sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
sub_type_hdr->proximity_domain = proximity_domain;
return 0;
}
#ifdef CONFIG_X86_64
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
uint32_t *num_entries,
struct crat_subtype_iolink *sub_type_hdr)
{
int nid;
struct cpuinfo_x86 *c = &cpu_data(0);
uint8_t link_type;
if (c->x86_vendor == X86_VENDOR_AMD)
link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
else
link_type = CRAT_IOLINK_TYPE_QPI_1_1;
*num_entries = 0;
/* Create IO links from this node to other CPU nodes */
for_each_online_node(nid) {
if (nid == numa_node_id) /* node itself */
continue;
*avail_size -= sizeof(struct crat_subtype_iolink);
if (*avail_size < 0)
return -ENOMEM;
memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
/* Fill in subtype header data */
sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
/* Fill in IO link data */
sub_type_hdr->proximity_domain_from = numa_node_id;
sub_type_hdr->proximity_domain_to = nid;
sub_type_hdr->io_interface_type = link_type;
(*num_entries)++;
sub_type_hdr++;
}
return 0;
}
#endif
/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
*
* @pcrat_image: Fill in VCRAT for CPU
* @size: [IN] allocated size of crat_image.
* [OUT] actual size of data filled in crat_image
*/
static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
{
struct crat_header *crat_table = (struct crat_header *)pcrat_image;
struct acpi_table_header *acpi_table;
acpi_status status;
struct crat_subtype_generic *sub_type_hdr;
int avail_size = *size;
int numa_node_id;
#ifdef CONFIG_X86_64
uint32_t entries = 0;
#endif
int ret = 0;
if (!pcrat_image)
return -EINVAL;
/* Fill in CRAT Header.
* Modify length and total_entries as subunits are added.
*/
avail_size -= sizeof(struct crat_header);
if (avail_size < 0)
return -ENOMEM;
memset(crat_table, 0, sizeof(struct crat_header));
memcpy(&crat_table->signature, CRAT_SIGNATURE,
sizeof(crat_table->signature));
crat_table->length = sizeof(struct crat_header);
status = acpi_get_table("DSDT", 0, &acpi_table);
if (status != AE_OK)
pr_warn("DSDT table not found for OEM information\n");
else {
crat_table->oem_revision = acpi_table->revision;
memcpy(crat_table->oem_id, acpi_table->oem_id,
CRAT_OEMID_LENGTH);
memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
CRAT_OEMTABLEID_LENGTH);
acpi_put_table(acpi_table);
}
crat_table->total_entries = 0;
crat_table->num_domains = 0;
sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
for_each_online_node(numa_node_id) {
if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
continue;
/* Fill in Subtype: Compute Unit */
ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
crat_table->num_domains,
(struct crat_subtype_computeunit *)sub_type_hdr);
if (ret < 0)
return ret;
crat_table->length += sub_type_hdr->length;
crat_table->total_entries++;
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
/* Fill in Subtype: Memory */
ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
crat_table->num_domains,
(struct crat_subtype_memory *)sub_type_hdr);
if (ret < 0)
return ret;
crat_table->length += sub_type_hdr->length;
crat_table->total_entries++;
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
/* Fill in Subtype: IO Link */
#ifdef CONFIG_X86_64
ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
&entries,
(struct crat_subtype_iolink *)sub_type_hdr);
if (ret < 0)
return ret;
if (entries) {
crat_table->length += (sub_type_hdr->length * entries);
crat_table->total_entries += entries;
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length * entries);
}
#else
pr_info("IO link not available for non x86 platforms\n");
#endif
crat_table->num_domains++;
}
/* TODO: Add cache Subtype for CPU.
* Currently, CPU cache information is available in function
* detect_cache_attributes(cpu) defined in the file
* ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
* exported and to get the same information the code needs to be
* duplicated.
*/
*size = crat_table->length;
pr_info("Virtual CRAT table created for CPU\n");
return 0;
}
static int kfd_fill_gpu_memory_affinity(int *avail_size,
struct kfd_node *kdev, uint8_t type, uint64_t size,
struct crat_subtype_memory *sub_type_hdr,
uint32_t proximity_domain,
const struct kfd_local_mem_info *local_mem_info)
{
*avail_size -= sizeof(struct crat_subtype_memory);
if (*avail_size < 0)
return -ENOMEM;
memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
sub_type_hdr->length = sizeof(struct crat_subtype_memory);
sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
sub_type_hdr->proximity_domain = proximity_domain;
pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
type, size);
sub_type_hdr->length_low = lower_32_bits(size);
sub_type_hdr->length_high = upper_32_bits(size);
sub_type_hdr->width = local_mem_info->vram_width;
sub_type_hdr->visibility_type = type;
return 0;
}
#ifdef CONFIG_ACPI_NUMA
static void kfd_find_numa_node_in_srat(struct kfd_node *kdev)
{
struct acpi_table_header *table_header = NULL;
struct acpi_subtable_header *sub_header = NULL;
unsigned long table_end, subtable_len;
u32 pci_id = pci_domain_nr(kdev->adev->pdev->bus) << 16 |
pci_dev_id(kdev->adev->pdev);
u32 bdf;
acpi_status status;
struct acpi_srat_cpu_affinity *cpu;
struct acpi_srat_generic_affinity *gpu;
int pxm = 0, max_pxm = 0;
int numa_node = NUMA_NO_NODE;
bool found = false;
/* Fetch the SRAT table from ACPI */
status = acpi_get_table(ACPI_SIG_SRAT, 0, &table_header);
if (status == AE_NOT_FOUND) {
pr_warn("SRAT table not found\n");
return;
} else if (ACPI_FAILURE(status)) {
const char *err = acpi_format_exception(status);
pr_err("SRAT table error: %s\n", err);
return;
}
table_end = (unsigned long)table_header + table_header->length;
/* Parse all entries looking for a match. */
sub_header = (struct acpi_subtable_header *)
((unsigned long)table_header +
sizeof(struct acpi_table_srat));
subtable_len = sub_header->length;
while (((unsigned long)sub_header) + subtable_len < table_end) {
/*
* If length is 0, break from this loop to avoid
* infinite loop.
*/
if (subtable_len == 0) {
pr_err("SRAT invalid zero length\n");
break;
}
switch (sub_header->type) {
case ACPI_SRAT_TYPE_CPU_AFFINITY:
cpu = (struct acpi_srat_cpu_affinity *)sub_header;
pxm = *((u32 *)cpu->proximity_domain_hi) << 8 |
cpu->proximity_domain_lo;
if (pxm > max_pxm)
max_pxm = pxm;
break;
case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
gpu = (struct acpi_srat_generic_affinity *)sub_header;
bdf = *((u16 *)(&gpu->device_handle[0])) << 16 |
*((u16 *)(&gpu->device_handle[2]));
if (bdf == pci_id) {
found = true;
numa_node = pxm_to_node(gpu->proximity_domain);
}
break;
default:
break;
}
if (found)
break;
sub_header = (struct acpi_subtable_header *)
((unsigned long)sub_header + subtable_len);
subtable_len = sub_header->length;
}
acpi_put_table(table_header);
/* Workaround bad cpu-gpu binding case */
if (found && (numa_node < 0 ||
numa_node > pxm_to_node(max_pxm)))
numa_node = 0;
if (numa_node != NUMA_NO_NODE)
set_dev_node(&kdev->adev->pdev->dev, numa_node);
}
#endif
#define KFD_CRAT_INTRA_SOCKET_WEIGHT 13
#define KFD_CRAT_XGMI_WEIGHT 15
/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
* to its NUMA node
* @avail_size: Available size in the memory
* @kdev - [IN] GPU device
* @sub_type_hdr: Memory into which io link info will be filled in
* @proximity_domain - proximity domain of the GPU node
*
* Return 0 if successful else return -ve value
*/
static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
struct kfd_node *kdev,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain)
{
*avail_size -= sizeof(struct crat_subtype_iolink);
if (*avail_size < 0)
return -ENOMEM;
memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
/* Fill in subtype header data */
sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
if (kfd_dev_is_large_bar(kdev))
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
/* Fill in IOLINK subtype.
* TODO: Fill-in other fields of iolink subtype
*/
if (kdev->adev->gmc.xgmi.connected_to_cpu ||
(KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 3) &&
kdev->adev->smuio.funcs->get_pkg_type(kdev->adev) ==
AMDGPU_PKG_TYPE_APU)) {
bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3);
int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT :
KFD_CRAT_INTRA_SOCKET_WEIGHT;
uint32_t bandwidth = ext_cpu ? amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
kdev->adev, NULL, true) : mem_bw;
/*
* with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional
* io link.
*/
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->weight_xgmi = weight;
sub_type_hdr->minimum_bandwidth_mbs = bandwidth;
sub_type_hdr->maximum_bandwidth_mbs = bandwidth;
} else {
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
sub_type_hdr->minimum_bandwidth_mbs =
amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true);
sub_type_hdr->maximum_bandwidth_mbs =
amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false);
}
sub_type_hdr->proximity_domain_from = proximity_domain;
#ifdef CONFIG_ACPI_NUMA
if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE &&
num_possible_nodes() > 1)
kfd_find_numa_node_in_srat(kdev);
#endif
#ifdef CONFIG_NUMA
if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE)
sub_type_hdr->proximity_domain_to = 0;
else
sub_type_hdr->proximity_domain_to = kdev->adev->pdev->dev.numa_node;
#else
sub_type_hdr->proximity_domain_to = 0;
#endif
return 0;
}
static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
struct kfd_node *kdev,
struct kfd_node *peer_kdev,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain_from,
uint32_t proximity_domain_to)
{
bool use_ta_info = kdev->kfd->num_nodes == 1;
*avail_size -= sizeof(struct crat_subtype_iolink);
if (*avail_size < 0)
return -ENOMEM;
memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->proximity_domain_from = proximity_domain_from;
sub_type_hdr->proximity_domain_to = proximity_domain_to;
if (use_ta_info) {
sub_type_hdr->weight_xgmi = KFD_CRAT_XGMI_WEIGHT *
amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
sub_type_hdr->maximum_bandwidth_mbs =
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev,
peer_kdev->adev, false);
sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
} else {
bool is_single_hop = kdev->kfd == peer_kdev->kfd;
int weight = is_single_hop ? KFD_CRAT_INTRA_SOCKET_WEIGHT :
(2 * KFD_CRAT_INTRA_SOCKET_WEIGHT) + KFD_CRAT_XGMI_WEIGHT;
int mem_bw = 819200;
sub_type_hdr->weight_xgmi = weight;
sub_type_hdr->maximum_bandwidth_mbs = is_single_hop ? mem_bw : 0;
sub_type_hdr->minimum_bandwidth_mbs = is_single_hop ? mem_bw : 0;
}
return 0;
}
/* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
*
* @pcrat_image: Fill in VCRAT for GPU
* @size: [IN] allocated size of crat_image.
* [OUT] actual size of data filled in crat_image
*/
static int kfd_create_vcrat_image_gpu(void *pcrat_image,
size_t *size, struct kfd_node *kdev,
uint32_t proximity_domain)
{
struct crat_header *crat_table = (struct crat_header *)pcrat_image;
struct crat_subtype_generic *sub_type_hdr;
struct kfd_local_mem_info local_mem_info;
struct kfd_topology_device *peer_dev;
struct crat_subtype_computeunit *cu;
struct kfd_cu_info cu_info;
int avail_size = *size;
uint32_t total_num_of_cu;
uint32_t nid = 0;
int ret = 0;
if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
return -EINVAL;
/* Fill the CRAT Header.
* Modify length and total_entries as subunits are added.
*/
avail_size -= sizeof(struct crat_header);
if (avail_size < 0)
return -ENOMEM;
memset(crat_table, 0, sizeof(struct crat_header));
memcpy(&crat_table->signature, CRAT_SIGNATURE,
sizeof(crat_table->signature));
/* Change length as we add more subtypes*/
crat_table->length = sizeof(struct crat_header);
crat_table->num_domains = 1;
crat_table->total_entries = 0;
/* Fill in Subtype: Compute Unit
* First fill in the sub type header and then sub type data
*/
avail_size -= sizeof(struct crat_subtype_computeunit);
if (avail_size < 0)
return -ENOMEM;
sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
/* Fill CU subtype data */
cu = (struct crat_subtype_computeunit *)sub_type_hdr;
cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
cu->proximity_domain = proximity_domain;
amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu;
cu->num_simd_cores = cu_info.simd_per_cu *
(cu_info.cu_active_number / kdev->kfd->num_nodes);
cu->max_waves_simd = cu_info.max_waves_per_simd;
cu->wave_front_size = cu_info.wave_front_size;
cu->array_count = cu_info.num_shader_arrays_per_engine *
cu_info.num_shader_engines;
total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
cu->num_cu_per_array = cu_info.num_cu_per_sh;
cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
cu->num_banks = cu_info.num_shader_engines;
cu->lds_size_in_kb = cu_info.lds_size;
cu->hsa_capability = 0;
crat_table->length += sub_type_hdr->length;
crat_table->total_entries++;
/* Fill in Subtype: Memory. Only on systems with large BAR (no
* private FB), report memory as public. On other systems
* report the total FB size (public+private) as a single
* private heap.
*/
local_mem_info = kdev->local_mem_info;
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
if (debug_largebar)
local_mem_info.local_mem_size_private = 0;
if (local_mem_info.local_mem_size_private == 0)
ret = kfd_fill_gpu_memory_affinity(&avail_size,
kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
local_mem_info.local_mem_size_public,
(struct crat_subtype_memory *)sub_type_hdr,
proximity_domain,
&local_mem_info);
else
ret = kfd_fill_gpu_memory_affinity(&avail_size,
kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
local_mem_info.local_mem_size_public +
local_mem_info.local_mem_size_private,
(struct crat_subtype_memory *)sub_type_hdr,
proximity_domain,
&local_mem_info);
if (ret < 0)
return ret;
crat_table->length += sizeof(struct crat_subtype_memory);
crat_table->total_entries++;
/* Fill in Subtype: IO_LINKS
* Only direct links are added here which is Link from GPU to
* its NUMA node. Indirect links are added by userspace.
*/
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
(struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
if (ret < 0)
return ret;
crat_table->length += sub_type_hdr->length;
crat_table->total_entries++;
/* Fill in Subtype: IO_LINKS
* Direct links from GPU to other GPUs through xGMI.
* We will loop GPUs that already be processed (with lower value
* of proximity_domain), add the link for the GPUs with same
* hive id (from this GPU to other GPU) . The reversed iolink
* (from other GPU to this GPU) will be added
* in kfd_parse_subtype_iolink.
*/
if (kdev->kfd->hive_id) {
for (nid = 0; nid < proximity_domain; ++nid) {
peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid);
if (!peer_dev->gpu)
continue;
if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id)
continue;
sub_type_hdr = (typeof(sub_type_hdr))(
(char *)sub_type_hdr +
sizeof(struct crat_subtype_iolink));
ret = kfd_fill_gpu_xgmi_link_to_gpu(
&avail_size, kdev, peer_dev->gpu,
(struct crat_subtype_iolink *)sub_type_hdr,
proximity_domain, nid);
if (ret < 0)
return ret;
crat_table->length += sub_type_hdr->length;
crat_table->total_entries++;
}
}
*size = crat_table->length;
pr_info("Virtual CRAT table created for GPU\n");
return ret;
}
/* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
* creates a Virtual CRAT (VCRAT) image
*
* NOTE: Call kfd_destroy_crat_image to free CRAT image memory
*
* @crat_image: VCRAT image created because ACPI does not have a
* CRAT for this device
* @size: [OUT] size of virtual crat_image
* @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
* COMPUTE_UNIT_GPU - Create VCRAT for GPU
* (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
* -- this option is not currently implemented.
* The assumption is that all AMD APUs will have CRAT
* @kdev: Valid kfd_node required if flags contain COMPUTE_UNIT_GPU
*
* Return 0 if successful else return -ve value
*/
int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
int flags, struct kfd_node *kdev,
uint32_t proximity_domain)
{
void *pcrat_image = NULL;
int ret = 0, num_nodes;
size_t dyn_size;
if (!crat_image)
return -EINVAL;
*crat_image = NULL;
/* Allocate the CPU Virtual CRAT size based on the number of online
* nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
* This should cover all the current conditions. A check is put not
* to overwrite beyond allocated size for GPUs
*/
switch (flags) {
case COMPUTE_UNIT_CPU:
num_nodes = num_online_nodes();
dyn_size = sizeof(struct crat_header) +
num_nodes * (sizeof(struct crat_subtype_computeunit) +
sizeof(struct crat_subtype_memory) +
(num_nodes - 1) * sizeof(struct crat_subtype_iolink));
pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
if (!pcrat_image)
return -ENOMEM;
*size = dyn_size;
pr_debug("CRAT size is %ld", dyn_size);
ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
break;
case COMPUTE_UNIT_GPU:
if (!kdev)
return -EINVAL;
pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
if (!pcrat_image)
return -ENOMEM;
*size = VCRAT_SIZE_FOR_GPU;
ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
proximity_domain);
break;
case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
/* TODO: */
ret = -EINVAL;
pr_err("VCRAT not implemented for APU\n");
break;
default:
ret = -EINVAL;
}
if (!ret)
*crat_image = pcrat_image;
else
kvfree(pcrat_image);
return ret;
}
/* kfd_destroy_crat_image
*
* @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
*
*/
void kfd_destroy_crat_image(void *crat_image)
{
kvfree(crat_image);
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_crat.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.