python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dccg.h" #include "clk_mgr_internal.h" // For dce12_get_dp_ref_freq_khz #include "dce100/dce_clk_mgr.h" // For dcn20_update_clocks_update_dpp_dto #include "dcn20/dcn20_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" #include "dcn315_clk_mgr.h" #include "core_types.h" #include "dcn315_smu.h" #include "dm_helpers.h" #include "dc_dmub_srv.h" #include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger #include "link.h" #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) #define UNSUPPORTED_DCFCLK 10000000 #define MIN_DPP_DISP_CLK 100000 static int dcn315_get_active_display_cnt_wa( struct dc *dc, struct dc_state *context) { int i, display_count; bool tmds_present = false; display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) tmds_present = true; } for (i = 0; i < dc->link_count; i++) { const struct dc_link *link = dc->links[i]; /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ if (link->link_enc && link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } /* WA for hang on HDMI after display off back back on*/ if (display_count == 0 && tmds_present) display_count = 1; return display_count; } static bool should_disable_otg(struct pipe_ctx *pipe) { bool ret = true; if (pipe->stream->link->link_enc && pipe->stream->link->link_enc->funcs->is_dig_enabled && pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc)) ret = false; return ret; } static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || dc_is_virtual_signal(pipe->stream->signal))) { /* This w/a should not trigger when we have a dig active */ if (should_disable_otg(pipe)) { if (disable) { pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); } } } } static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) { union dmub_rb_cmd cmd; struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; int display_count; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; if (dc->work_arounds.skip_clock_update) return; clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; /* * if it is safe to lower, but we are already in the lower state, we don't have to do anything * also if safe to lower is false, we just go in the higher state */ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; if (safe_to_lower) { /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = dcn315_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ if (display_count == 0) { union display_idle_optimization_u idle_info = { 0 }; idle_info.idle_info.df_request_disabled = 1; idle_info.idle_info.phy_ref_clk_off = 1; idle_info.idle_info.s0i2_rdy = 1; dcn315_smu_set_display_idle_optimization(clk_mgr, idle_info.data); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; } } } else { /* check that we're not already in D0 */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { union display_idle_optimization_u idle_info = { 0 }; dcn315_smu_set_display_idle_optimization(clk_mgr, idle_info.data); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; } } /* Lock pstate by requesting unsupported dcfclk if change is unsupported */ if (!new_clocks->p_state_change_support) new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK; if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); } if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; dcn315_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) dpp_clock_lowered = true; clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; update_dppclk = true; } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { /* No need to apply the w/a if we haven't taken over from bios yet */ if (clk_mgr_base->clks.dispclk_khz) dcn315_disable_otg_wa(clk_mgr_base, context, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); if (clk_mgr_base->clks.dispclk_khz) dcn315_disable_otg_wa(clk_mgr_base, context, false); update_dispclk = true; } if (dpp_clock_lowered) { // increase per DPP DTO before lowering global dppclk dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn315_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); } else { // increase global DPPCLK before lowering per DPP DTO if (update_dppclk || update_dispclk) dcn315_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); // always update dtos unless clock is lowered and not safe to lower if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } // notify DMCUB of latest clocks memset(&cmd, 0, sizeof(cmd)); cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR; cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS; cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz; cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz = clk_mgr_base->clks.dcfclk_deep_sleep_khz; cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { return; } static struct clk_bw_params dcn315_bw_params = { .vram_type = Ddr4MemType, .num_channels = 2, .clk_table = { .entries = { { .voltage = 0, .dispclk_mhz = 640, .dppclk_mhz = 640, .phyclk_mhz = 810, .phyclk_d18_mhz = 667, .dtbclk_mhz = 600, }, { .voltage = 1, .dispclk_mhz = 739, .dppclk_mhz = 739, .phyclk_mhz = 810, .phyclk_d18_mhz = 667, .dtbclk_mhz = 600, }, { .voltage = 2, .dispclk_mhz = 960, .dppclk_mhz = 960, .phyclk_mhz = 810, .phyclk_d18_mhz = 667, .dtbclk_mhz = 600, }, { .voltage = 3, .dispclk_mhz = 1200, .dppclk_mhz = 1200, .phyclk_mhz = 810, .phyclk_d18_mhz = 667, .dtbclk_mhz = 600, }, { .voltage = 4, .dispclk_mhz = 1372, .dppclk_mhz = 1372, .phyclk_mhz = 810, .phyclk_d18_mhz = 667, .dtbclk_mhz = 600, }, }, .num_entries = 5, }, }; static struct wm_table ddr5_wm_table = { .entries = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, } }; static struct wm_table lpddr5_wm_table = { .entries = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, } }; /* Temporary Place holder until we can get them from fuse */ static DpmClocks_315_t dummy_clocks = { 0 }; static struct dcn315_watermarks dummy_wms = { 0 }; static void dcn315_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn315_watermarks *table) { int i, num_valid_sets; num_valid_sets = 0; for (i = 0; i < WM_SET_COUNT; i++) { /* skip empty entries, the smu array has no holes*/ if (!bw_params->wm_table.entries[i].valid) continue; table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; /* We will not select WM based on fclk, so leave it as unconstrained */ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) { if (i == 0) table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0; else { /* add 1 to make it non-overlapping with next lvl */ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; } table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk = bw_params->clk_table.entries[i].dcfclk_mhz; } else { /* unconstrained for memory retraining */ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; /* Modify previous watermark range to cover up to max */ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; } num_valid_sets++; } ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ /* modify the min and max to make sure we cover the whole range*/ table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0; table->WatermarkRow[WM_DCFCLK][0].MinClock = 0; table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF; table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; /* This is for writeback only, does not matter currently as no writeback support*/ table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A; table->WatermarkRow[WM_SOCCLK][0].MinClock = 0; table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF; table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0; table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF; } static void dcn315_notify_wm_ranges(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr); struct dcn315_watermarks *table = clk_mgr_dcn315->smu_wm_set.wm_set; if (!clk_mgr->smu_ver) return; if (!table || clk_mgr_dcn315->smu_wm_set.mc_address.quad_part == 0) return; memset(table, 0, sizeof(*table)); dcn315_build_watermark_ranges(clk_mgr_base->bw_params, table); dcn315_smu_set_dram_addr_high(clk_mgr, clk_mgr_dcn315->smu_wm_set.mc_address.high_part); dcn315_smu_set_dram_addr_low(clk_mgr, clk_mgr_dcn315->smu_wm_set.mc_address.low_part); dcn315_smu_transfer_wm_table_dram_2_smu(clk_mgr); } static void dcn315_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, struct dcn315_smu_dpm_clks *smu_dpm_clks) { DpmClocks_315_t *table = smu_dpm_clks->dpm_clks; if (!clk_mgr->smu_ver) return; if (!table || smu_dpm_clks->mc_address.quad_part == 0) return; memset(table, 0, sizeof(*table)); dcn315_smu_set_dram_addr_high(clk_mgr, smu_dpm_clks->mc_address.high_part); dcn315_smu_set_dram_addr_low(clk_mgr, smu_dpm_clks->mc_address.low_part); dcn315_smu_transfer_dpm_table_smu_2_dram(clk_mgr); } static void dcn315_clk_mgr_helper_populate_bw_params( struct clk_mgr_internal *clk_mgr, struct integrated_info *bios_info, const DpmClocks_315_t *clock_table) { int i; struct clk_bw_params *bw_params = clk_mgr->base.bw_params; uint32_t max_pstate = clock_table->NumDfPstatesEnabled - 1; struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; /* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) { int j; /* DF table is sorted with clocks decreasing */ for (j = clock_table->NumDfPstatesEnabled - 2; j >= 0; j--) { if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) max_pstate = j; } /* Max DCFCLK should match up with max pstate */ if (i == clock_table->NumDcfClkLevelsEnabled - 1) max_pstate = 0; /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--) if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i]) break; bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz; bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz; bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz; /* Now update clocks we do read */ bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[max_pstate].FClk; bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk; bw_params->clk_table.entries[i].voltage = clock_table->SocVoltage[i]; bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i]; bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i]; bw_params->clk_table.entries[i].dispclk_mhz = clock_table->DispClocks[i]; bw_params->clk_table.entries[i].dppclk_mhz = clock_table->DppClocks[i]; bw_params->clk_table.entries[i].wck_ratio = 1; } /* Make sure to include at least one entry */ if (i == 0) { bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[0].FClk; bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[0].MemClk; bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[0].Voltage; bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[0]; bw_params->clk_table.entries[i].wck_ratio = 1; i++; } else if (clock_table->NumDcfClkLevelsEnabled != clock_table->NumSocClkLevelsEnabled) { bw_params->clk_table.entries[i-1].voltage = clock_table->SocVoltage[clock_table->NumSocClkLevelsEnabled - 1]; bw_params->clk_table.entries[i-1].socclk_mhz = clock_table->SocClocks[clock_table->NumSocClkLevelsEnabled - 1]; bw_params->clk_table.entries[i-1].dispclk_mhz = clock_table->DispClocks[clock_table->NumDispClkLevelsEnabled - 1]; bw_params->clk_table.entries[i-1].dppclk_mhz = clock_table->DppClocks[clock_table->NumDispClkLevelsEnabled - 1]; } bw_params->clk_table.num_entries = i; /* Set any 0 clocks to max default setting. Not an issue for * power since we aren't doing switching in such case anyway */ for (i = 0; i < bw_params->clk_table.num_entries; i++) { if (!bw_params->clk_table.entries[i].fclk_mhz) { bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz; bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz; bw_params->clk_table.entries[i].voltage = def_max.voltage; } if (!bw_params->clk_table.entries[i].dcfclk_mhz) bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz; if (!bw_params->clk_table.entries[i].socclk_mhz) bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz; if (!bw_params->clk_table.entries[i].dispclk_mhz) bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz; if (!bw_params->clk_table.entries[i].dppclk_mhz) bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz; if (!bw_params->clk_table.entries[i].phyclk_mhz) bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz; if (!bw_params->clk_table.entries[i].phyclk_d18_mhz) bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz; if (!bw_params->clk_table.entries[i].dtbclk_mhz) bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; } /* Make sure all highest default clocks are included*/ ASSERT(bw_params->clk_table.entries[i-1].phyclk_mhz == def_max.phyclk_mhz); ASSERT(bw_params->clk_table.entries[i-1].phyclk_d18_mhz == def_max.phyclk_d18_mhz); ASSERT(bw_params->clk_table.entries[i-1].dtbclk_mhz == def_max.dtbclk_mhz); ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz); bw_params->vram_type = bios_info->memory_type; bw_params->num_channels = bios_info->ma_channel_number; bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4; for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; if (i >= bw_params->clk_table.num_entries) { bw_params->wm_table.entries[i].valid = false; continue; } bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; bw_params->wm_table.entries[i].valid = true; } } static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); dcn315_smu_enable_pme_wa(clk_mgr); } static struct clk_mgr_funcs dcn315_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn315_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn315_enable_pme_wa, .are_clock_states_equal = dcn31_are_clock_states_equal, .notify_wm_ranges = dcn315_notify_wm_ranges }; extern struct clk_mgr_funcs dcn3_fpga_funcs; void dcn315_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn315 *clk_mgr, struct pp_smu_funcs *pp_smu, struct dccg *dccg) { struct dcn315_smu_dpm_clks smu_dpm_clks = { 0 }; struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn315_funcs; clk_mgr->base.pp_smu = pp_smu; clk_mgr->base.dccg = dccg; clk_mgr->base.dfs_bypass_disp_clk = 0; clk_mgr->base.dprefclk_ss_percentage = 0; clk_mgr->base.dprefclk_ss_divider = 1000; clk_mgr->base.ss_on_dprefclk = false; clk_mgr->base.dfs_ref_freq_khz = 48000; clk_mgr->smu_wm_set.wm_set = (struct dcn315_watermarks *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, sizeof(struct dcn315_watermarks), &clk_mgr->smu_wm_set.mc_address.quad_part); if (!clk_mgr->smu_wm_set.wm_set) { clk_mgr->smu_wm_set.wm_set = &dummy_wms; clk_mgr->smu_wm_set.mc_address.quad_part = 0; } ASSERT(clk_mgr->smu_wm_set.wm_set); smu_dpm_clks.dpm_clks = (DpmClocks_315_t *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, sizeof(DpmClocks_315_t), &smu_dpm_clks.mc_address.quad_part); if (smu_dpm_clks.dpm_clks == NULL) { smu_dpm_clks.dpm_clks = &dummy_clocks; smu_dpm_clks.mc_address.quad_part = 0; } ASSERT(smu_dpm_clks.dpm_clks); clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base); if (clk_mgr->base.smu_ver > 0) clk_mgr->base.smu_present = true; if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { dcn315_bw_params.wm_table = lpddr5_wm_table; } else { dcn315_bw_params.wm_table = ddr5_wm_table; } /* Saved clocks configured at boot for debug purposes */ dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); clk_mgr->base.base.clks.ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn315_bw_params; if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { int i; dcn315_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n" "NumDispClkLevelsEnabled: %d\n" "NumSocClkLevelsEnabled: %d\n" "VcnClkLevelsEnabled: %d\n" "NumDfPst atesEnabled: %d\n" "MinGfxClk: %d\n" "MaxGfxClk: %d\n", smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled, smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled, smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled, smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled, smu_dpm_clks.dpm_clks->NumDfPstatesEnabled, smu_dpm_clks.dpm_clks->MinGfxClk, smu_dpm_clks.dpm_clks->MaxGfxClk); for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) { DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n", i, smu_dpm_clks.dpm_clks->DcfClocks[i]); } for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) { DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n", i, smu_dpm_clks.dpm_clks->DispClocks[i]); } for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) { DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n", i, smu_dpm_clks.dpm_clks->SocClocks[i]); } for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n", i, smu_dpm_clks.dpm_clks->SocVoltage[i]); for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) { DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n" "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n" "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n", i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk, i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk, i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage); } if (ctx->dc_bios && ctx->dc_bios->integrated_info) { dcn315_clk_mgr_helper_populate_bw_params( &clk_mgr->base, ctx->dc_bios->integrated_info, smu_dpm_clks.dpm_clks); } } if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, smu_dpm_clks.dpm_clks); } void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) { struct clk_mgr_dcn315 *clk_mgr = TO_CLK_MGR_DCN315(clk_mgr_int); if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0) dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, clk_mgr->smu_wm_set.wm_set); }
linux-master
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
// SPDX-License-Identifier: MIT /* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" #include "dm_helpers.h" #include "dcn314_smu.h" #include "mp/mp_13_0_5_offset.h" /* TODO: Use the real headers when they're correct */ #define MP1_BASE__INST0_SEG0 0x00016000 #define MP1_BASE__INST0_SEG1 0x0243FC00 #define MP1_BASE__INST0_SEG2 0x00DC0000 #define MP1_BASE__INST0_SEG3 0x00E00000 #define MP1_BASE__INST0_SEG4 0x00E40000 #define MP1_BASE__INST0_SEG5 0 #ifdef BASE_INNER #undef BASE_INNER #endif #define BASE_INNER(seg) MP1_BASE__INST0_SEG ## seg #define BASE(seg) BASE_INNER(seg) #define REG(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name) #define FN(reg_name, field) \ FD(reg_name##__##field) #include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ CTX->logger #define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } #define VBIOSSMC_MSG_TestMessage 0x1 #define VBIOSSMC_MSG_GetSmuVersion 0x2 #define VBIOSSMC_MSG_PowerUpGfx 0x3 #define VBIOSSMC_MSG_SetDispclkFreq 0x4 #define VBIOSSMC_MSG_SetDprefclkFreq 0x5 //Not used. DPRef is constant #define VBIOSSMC_MSG_SetDppclkFreq 0x6 #define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x7 #define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x8 #define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x9 //Keep it in case VMIN dees not support phy clk #define VBIOSSMC_MSG_GetFclkFrequency 0xA #define VBIOSSMC_MSG_SetDisplayCount 0xB //Not used anymore #define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xC //Not used anymore #define VBIOSSMC_MSG_UpdatePmeRestore 0xD #define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0xE //Used for WM table txfr #define VBIOSSMC_MSG_SetVbiosDramAddrLow 0xF #define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10 #define VBIOSSMC_MSG_TransferTableDram2Smu 0x11 #define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12 #define VBIOSSMC_MSG_GetDprefclkFreq 0x13 #define VBIOSSMC_MSG_GetDtbclkFreq 0x14 #define VBIOSSMC_MSG_AllowZstatesEntry 0x15 #define VBIOSSMC_MSG_DisallowZstatesEntry 0x16 #define VBIOSSMC_MSG_SetDtbClk 0x17 #define VBIOSSMC_Message_Count 0x18 #define VBIOSSMC_Status_BUSY 0x0 #define VBIOSSMC_Result_OK 0x1 #define VBIOSSMC_Result_Failed 0xFF #define VBIOSSMC_Result_UnknownCmd 0xFE #define VBIOSSMC_Result_CmdRejectedPrereq 0xFD #define VBIOSSMC_Result_CmdRejectedBusy 0xFC /* * Function to be used instead of REG_WAIT macro because the wait ends when * the register is NOT EQUAL to zero, and because the translation in msg_if.h * won't work with REG_WAIT. */ static uint32_t dcn314_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { uint32_t res_val = VBIOSSMC_Status_BUSY; do { res_val = REG_READ(MP1_SMN_C2PMSG_91); if (res_val != VBIOSSMC_Status_BUSY) break; if (delay_us >= 1000) msleep(delay_us/1000); else if (delay_us > 0) udelay(delay_us); } while (max_retries--); return res_val; } static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param) { uint32_t result; result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000); if (result != VBIOSSMC_Result_OK) smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result); if (result == VBIOSSMC_Status_BUSY) return -1; /* First clear response register */ REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY); /* Set the parameter register for the SMU message, unit is Mhz */ REG_WRITE(MP1_SMN_C2PMSG_83, param); /* Trigger the message transaction by writing the message ID */ REG_WRITE(MP1_SMN_C2PMSG_67, msg_id); result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000); if (result == VBIOSSMC_Result_Failed) { if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && param == TABLE_WATERMARKS) DC_LOG_DEBUG("Watermarks table not configured properly by SMU"); else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq || msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk) DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS"); else ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); return -1; } if (IS_SMU_TIMEOUT(result)) { ASSERT(0); dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); } return REG_READ(MP1_SMN_C2PMSG_83); } int dcn314_smu_get_smu_version(struct clk_mgr_internal *clk_mgr) { return dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_GetSmuVersion, 0); } int dcn314_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) { int actual_dispclk_set_mhz = -1; if (!clk_mgr->smu_present) return requested_dispclk_khz; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDispclkFreq, khz_to_mhz_ceil(requested_dispclk_khz)); return actual_dispclk_set_mhz * 1000; } int dcn314_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr) { int actual_dprefclk_set_mhz = -1; if (!clk_mgr->smu_present) return clk_mgr->base.dprefclk_khz; actual_dprefclk_set_mhz = dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDprefclkFreq, khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz)); /* TODO: add code for programing DP DTO, currently this is down by command table */ return actual_dprefclk_set_mhz * 1000; } int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz) { int actual_dcfclk_set_mhz = -1; if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) return -1; if (!clk_mgr->smu_present) return requested_dcfclk_khz; actual_dcfclk_set_mhz = dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); #ifdef DBG smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); #endif return actual_dcfclk_set_mhz * 1000; } int dcn314_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz) { int actual_min_ds_dcfclk_mhz = -1; if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) return -1; if (!clk_mgr->smu_present) return requested_min_ds_dcfclk_khz; actual_min_ds_dcfclk_mhz = dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetMinDeepSleepDcfclk, khz_to_mhz_ceil(requested_min_ds_dcfclk_khz)); return actual_min_ds_dcfclk_mhz * 1000; } int dcn314_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz) { int actual_dppclk_set_mhz = -1; if (!clk_mgr->smu_present) return requested_dpp_khz; actual_dppclk_set_mhz = dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDppclkFreq, khz_to_mhz_ceil(requested_dpp_khz)); return actual_dppclk_set_mhz * 1000; } void dcn314_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info) { if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) return; if (!clk_mgr->smu_present) return; //TODO: Work with smu team to define optimization options. dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDisplayIdleOptimizations, idle_info); } void dcn314_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) { union display_idle_optimization_u idle_info = { 0 }; if (!clk_mgr->smu_present) return; if (enable) { idle_info.idle_info.df_request_disabled = 1; idle_info.idle_info.phy_ref_clk_off = 1; } dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDisplayIdleOptimizations, idle_info.data); } void dcn314_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) { if (!clk_mgr->smu_present) return; dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_UpdatePmeRestore, 0); } void dcn314_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high) { if (!clk_mgr->smu_present) return; dcn314_smu_send_msg_with_param(clk_mgr, VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high); } void dcn314_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low) { if (!clk_mgr->smu_present) return; dcn314_smu_send_msg_with_param(clk_mgr, VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low); } void dcn314_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr) { if (!clk_mgr->smu_present) return; dcn314_smu_send_msg_with_param(clk_mgr, VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS); } void dcn314_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) { if (!clk_mgr->smu_present) return; dcn314_smu_send_msg_with_param(clk_mgr, VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS); } void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support) { unsigned int msg_id, param; if (!clk_mgr->smu_present) return; switch (support) { case DCN_ZSTATE_SUPPORT_ALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 9) | (1 << 8); break; case DCN_ZSTATE_SUPPORT_DISALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = 0; break; case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 8); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 8); break; default: //DCN_ZSTATE_SUPPORT_UNKNOWN msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = 0; break; } dcn314_smu_send_msg_with_param( clk_mgr, msg_id, param); } /* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */ void dcn314_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable) { if (!clk_mgr->smu_present) return; dcn314_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDtbClk, enable); }
linux-master
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
// SPDX-License-Identifier: MIT /* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn314_clk_mgr.h" #include "dccg.h" #include "clk_mgr_internal.h" // For dce12_get_dp_ref_freq_khz #include "dce100/dce_clk_mgr.h" // For dcn20_update_clocks_update_dpp_dto #include "dcn20/dcn20_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" /* TODO: remove this include once we ported over remaining clk mgr functions*/ #include "dcn30/dcn30_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" #include "dc_dmub_srv.h" #include "link.h" #include "dcn314_smu.h" #include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger #define MAX_INSTANCE 7 #define MAX_SEGMENT 8 struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } }, { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } }, { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } }, { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } }, { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } }, { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } }, { { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } }; #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 #define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 #define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc #define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 #define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L #define REG(reg_name) \ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) #define TO_CLK_MGR_DCN314(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn314, base) static int dcn314_get_active_display_cnt_wa( struct dc *dc, struct dc_state *context) { int i, display_count; bool tmds_present = false; display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) tmds_present = true; /* Checking stream / link detection ensuring that PHY is active*/ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off) display_count++; } for (i = 0; i < dc->link_count; i++) { const struct dc_link *link = dc->links[i]; /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ if (link->link_enc && link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } /* WA for hang on HDMI after display off back on*/ if (display_count == 0 && tmds_present) display_count = 1; return display_count; } static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { struct stream_encoder *stream_enc = pipe->stream_res.stream_enc; if (disable) { if (stream_enc && stream_enc->funcs->disable_fifo) pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc); pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); } else { pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); if (stream_enc && stream_enc->funcs->enable_fifo) pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc); } } } } void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) { union dmub_rb_cmd cmd; struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; int display_count; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; if (dc->work_arounds.skip_clock_update) return; /* * if it is safe to lower, but we are already in the lower state, we don't have to do anything * also if safe to lower is false, we just go in the higher state */ if (safe_to_lower) { if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { dcn314_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support); dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { dcn314_smu_set_dtbclk(clk_mgr, false); clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; } /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = dcn314_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ if (display_count == 0) { union display_idle_optimization_u idle_info = { 0 }; idle_info.idle_info.df_request_disabled = 1; idle_info.idle_info.phy_ref_clk_off = 1; idle_info.idle_info.s0i2_rdy = 1; dcn314_smu_set_display_idle_optimization(clk_mgr, idle_info.data); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; } } } else { if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { dcn314_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW); dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { dcn314_smu_set_dtbclk(clk_mgr, true); clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; } /* check that we're not already in D0 */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { union display_idle_optimization_u idle_info = { 0 }; dcn314_smu_set_display_idle_optimization(clk_mgr, idle_info.data); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; } } if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; dcn314_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); } if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; dcn314_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. if (new_clocks->dppclk_khz < 100000) new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) dpp_clock_lowered = true; clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; update_dppclk = true; } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { dcn314_disable_otg_wa(clk_mgr_base, context, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); dcn314_disable_otg_wa(clk_mgr_base, context, false); update_dispclk = true; } if (dpp_clock_lowered) { // increase per DPP DTO before lowering global dppclk dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn314_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); } else { // increase global DPPCLK before lowering per DPP DTO if (update_dppclk || update_dispclk) dcn314_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); // always update dtos unless clock is lowered and not safe to lower if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } // notify DMCUB of latest clocks memset(&cmd, 0, sizeof(cmd)); cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR; cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS; cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz; cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz = clk_mgr_base->clks.dcfclk_deep_sleep_khz; cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) { /* get FbMult value */ struct fixed31_32 pll_req; unsigned int fbmult_frac_val = 0; unsigned int fbmult_int_val = 0; /* * Register value of fbmult is in 8.16 format, we are converting to 314.32 * to leverage the fix point operations available in driver */ REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ pll_req = dc_fixpt_from_int(fbmult_int_val); /* * since fractional part is only 16 bit in register definition but is 32 bit * in our fix point definiton, need to shift left by 16 to obtain correct value */ pll_req.value |= fbmult_frac_val << 16; /* multiply by REFCLK period */ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); /* integer part is now VCO frequency in kHz */ return dc_fixpt_floor(pll_req); } static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); dcn314_smu_enable_pme_wa(clk_mgr); } bool dcn314_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) { if (a->dispclk_khz != b->dispclk_khz) return false; else if (a->dppclk_khz != b->dppclk_khz) return false; else if (a->dcfclk_khz != b->dcfclk_khz) return false; else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) return false; else if (a->zstate_support != b->zstate_support) return false; else if (a->dtbclk_en != b->dtbclk_en) return false; return true; } static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { return; } static struct clk_bw_params dcn314_bw_params = { .vram_type = Ddr4MemType, .num_channels = 1, .clk_table = { .num_entries = 4, }, }; static struct wm_table ddr5_wm_table = { .entries = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, .sr_exit_time_us = 12.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, .sr_exit_time_us = 12.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, .sr_exit_time_us = 12.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, .sr_exit_time_us = 12.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, } }; static struct wm_table lpddr5_wm_table = { .entries = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, .sr_exit_time_us = 30.0, .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, .sr_exit_time_us = 30.0, .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, .sr_exit_time_us = 30.0, .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, .sr_exit_time_us = 30.0, .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, } }; static DpmClocks314_t dummy_clocks; static struct dcn314_watermarks dummy_wms = { 0 }; static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table) { int i, num_valid_sets; num_valid_sets = 0; for (i = 0; i < WM_SET_COUNT; i++) { /* skip empty entries, the smu array has no holes*/ if (!bw_params->wm_table.entries[i].valid) continue; table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; /* We will not select WM based on fclk, so leave it as unconstrained */ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) { if (i == 0) table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0; else { /* add 1 to make it non-overlapping with next lvl */ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; } table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk = bw_params->clk_table.entries[i].dcfclk_mhz; } else { /* unconstrained for memory retraining */ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; /* Modify previous watermark range to cover up to max */ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; } num_valid_sets++; } ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ /* modify the min and max to make sure we cover the whole range*/ table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0; table->WatermarkRow[WM_DCFCLK][0].MinClock = 0; table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF; table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; /* This is for writeback only, does not matter currently as no writeback support*/ table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A; table->WatermarkRow[WM_SOCCLK][0].MinClock = 0; table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF; table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0; table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF; } static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct clk_mgr_dcn314 *clk_mgr_dcn314 = TO_CLK_MGR_DCN314(clk_mgr); struct dcn314_watermarks *table = clk_mgr_dcn314->smu_wm_set.wm_set; if (!clk_mgr->smu_ver) return; if (!table || clk_mgr_dcn314->smu_wm_set.mc_address.quad_part == 0) return; memset(table, 0, sizeof(*table)); dcn314_build_watermark_ranges(clk_mgr_base->bw_params, table); dcn314_smu_set_dram_addr_high(clk_mgr, clk_mgr_dcn314->smu_wm_set.mc_address.high_part); dcn314_smu_set_dram_addr_low(clk_mgr, clk_mgr_dcn314->smu_wm_set.mc_address.low_part); dcn314_smu_transfer_wm_table_dram_2_smu(clk_mgr); } static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, struct dcn314_smu_dpm_clks *smu_dpm_clks) { DpmClocks314_t *table = smu_dpm_clks->dpm_clks; if (!clk_mgr->smu_ver) return; if (!table || smu_dpm_clks->mc_address.quad_part == 0) return; memset(table, 0, sizeof(*table)); dcn314_smu_set_dram_addr_high(clk_mgr, smu_dpm_clks->mc_address.high_part); dcn314_smu_set_dram_addr_low(clk_mgr, smu_dpm_clks->mc_address.low_part); dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr); } static inline bool is_valid_clock_value(uint32_t clock_value) { return clock_value > 1 && clock_value < 100000; } static unsigned int convert_wck_ratio(uint8_t wck_ratio) { switch (wck_ratio) { case WCK_RATIO_1_2: return 2; case WCK_RATIO_1_4: return 4; default: break; } return 1; } static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) { uint32_t max = 0; int i; for (i = 0; i < num_clocks; ++i) { if (clocks[i] > max) max = clocks[i]; } return max; } static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr, struct integrated_info *bios_info, const DpmClocks314_t *clock_table) { struct clk_bw_params *bw_params = clk_mgr->base.bw_params; struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0; int i; /* Find highest valid fclk pstate */ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) { if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) && clock_table->DfPstateTable[i].FClk > max_fclk) { max_fclk = clock_table->DfPstateTable[i].FClk; max_pstate = i; } } /* We expect the table to contain at least one valid fclk entry. */ ASSERT(is_valid_clock_value(max_fclk)); /* Dispclk and dppclk can be max at any voltage, same number of levels for both */ if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS && clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) { max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled); max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); } else { /* Invalid number of entries in the table from PMFW. */ ASSERT(0); } /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) { uint32_t min_fclk = clock_table->DfPstateTable[0].FClk; int j; for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) { if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) && clock_table->DfPstateTable[j].FClk < min_fclk && clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) { min_fclk = clock_table->DfPstateTable[j].FClk; min_pstate = j; } } /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--) if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i]) break; bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz; bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz; bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz; /* Now update clocks we do read */ bw_params->clk_table.entries[i].fclk_mhz = min_fclk; bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk; bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage; bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i]; bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i]; bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio( clock_table->DfPstateTable[min_pstate].WckRatio); } /* Make sure to include at least one entry at highest pstate */ if (max_pstate != min_pstate || i == 0) { if (i > MAX_NUM_DPM_LVL - 1) i = MAX_NUM_DPM_LVL - 1; bw_params->clk_table.entries[i].fclk_mhz = max_fclk; bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk; bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage; bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS); bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS); bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio( clock_table->DfPstateTable[max_pstate].WckRatio); i++; } bw_params->clk_table.num_entries = i--; /* Make sure all highest clocks are included*/ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS); bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS); bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS); ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS)); bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz; bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz; bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; /* * Set any 0 clocks to max default setting. Not an issue for * power since we aren't doing switching in such case anyway */ for (i = 0; i < bw_params->clk_table.num_entries; i++) { if (!bw_params->clk_table.entries[i].fclk_mhz) { bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz; bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz; bw_params->clk_table.entries[i].voltage = def_max.voltage; } if (!bw_params->clk_table.entries[i].dcfclk_mhz) bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz; if (!bw_params->clk_table.entries[i].socclk_mhz) bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz; if (!bw_params->clk_table.entries[i].dispclk_mhz) bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz; if (!bw_params->clk_table.entries[i].dppclk_mhz) bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz; if (!bw_params->clk_table.entries[i].phyclk_mhz) bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz; if (!bw_params->clk_table.entries[i].phyclk_d18_mhz) bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz; if (!bw_params->clk_table.entries[i].dtbclk_mhz) bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; } ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz); bw_params->vram_type = bios_info->memory_type; bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4; bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4; for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; if (i >= bw_params->clk_table.num_entries) { bw_params->wm_table.entries[i].valid = false; continue; } bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; bw_params->wm_table.entries[i].valid = true; } } static struct clk_mgr_funcs dcn314_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn314_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn314_enable_pme_wa, .are_clock_states_equal = dcn314_are_clock_states_equal, .notify_wm_ranges = dcn314_notify_wm_ranges }; extern struct clk_mgr_funcs dcn3_fpga_funcs; void dcn314_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn314 *clk_mgr, struct pp_smu_funcs *pp_smu, struct dccg *dccg) { struct dcn314_smu_dpm_clks smu_dpm_clks = { 0 }; struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn314_funcs; clk_mgr->base.pp_smu = pp_smu; clk_mgr->base.dccg = dccg; clk_mgr->base.dfs_bypass_disp_clk = 0; clk_mgr->base.dprefclk_ss_percentage = 0; clk_mgr->base.dprefclk_ss_divider = 1000; clk_mgr->base.ss_on_dprefclk = false; clk_mgr->base.dfs_ref_freq_khz = 48000; clk_mgr->smu_wm_set.wm_set = (struct dcn314_watermarks *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, sizeof(struct dcn314_watermarks), &clk_mgr->smu_wm_set.mc_address.quad_part); if (!clk_mgr->smu_wm_set.wm_set) { clk_mgr->smu_wm_set.wm_set = &dummy_wms; clk_mgr->smu_wm_set.mc_address.quad_part = 0; } ASSERT(clk_mgr->smu_wm_set.wm_set); smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, sizeof(DpmClocks314_t), &smu_dpm_clks.mc_address.quad_part); if (smu_dpm_clks.dpm_clks == NULL) { smu_dpm_clks.dpm_clks = &dummy_clocks; smu_dpm_clks.mc_address.quad_part = 0; } ASSERT(smu_dpm_clks.dpm_clks); clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base); if (clk_mgr->base.smu_ver) clk_mgr->base.smu_present = true; /* TODO: Check we get what we expect during bringup */ clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) dcn314_bw_params.wm_table = lpddr5_wm_table; else dcn314_bw_params.wm_table = ddr5_wm_table; /* Saved clocks configured at boot for debug purposes */ dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ clk_mgr->base.base.bw_params = &dcn314_bw_params; if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { int i; dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n" "NumDispClkLevelsEnabled: %d\n" "NumSocClkLevelsEnabled: %d\n" "VcnClkLevelsEnabled: %d\n" "NumDfPst atesEnabled: %d\n" "MinGfxClk: %d\n" "MaxGfxClk: %d\n", smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled, smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled, smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled, smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled, smu_dpm_clks.dpm_clks->NumDfPstatesEnabled, smu_dpm_clks.dpm_clks->MinGfxClk, smu_dpm_clks.dpm_clks->MaxGfxClk); for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) { DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n", i, smu_dpm_clks.dpm_clks->DcfClocks[i]); } for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) { DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n", i, smu_dpm_clks.dpm_clks->DispClocks[i]); } for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) { DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n", i, smu_dpm_clks.dpm_clks->SocClocks[i]); } for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n", i, smu_dpm_clks.dpm_clks->SocVoltage[i]); for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) { DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n" "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n" "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n", i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk, i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk, i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage); } if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) { dcn314_clk_mgr_helper_populate_bw_params( &clk_mgr->base, ctx->dc_bios->integrated_info, smu_dpm_clks.dpm_clks); } } if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, smu_dpm_clks.dpm_clks); } void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) { struct clk_mgr_dcn314 *clk_mgr = TO_CLK_MGR_DCN314(clk_mgr_int); if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0) dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, clk_mgr->smu_wm_set.wm_set); }
linux-master
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
/* * Copyright 2020 Mauro Rossi <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" /* include DCE6 register header files */ #include "dce/dce_6_0_d.h" #include "dce/dce_6_0_sh_mask.h" #include "dc_types.h" #include "include/grph_object_id.h" #include "include/logger_interface.h" #include "../dce110/dce110_timing_generator.h" #include "dce60_timing_generator.h" #include "timing_generator.h" enum black_color_format { BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, /* used as index in array */ BLACK_COLOR_FORMAT_RGB_LIMITED, BLACK_COLOR_FORMAT_YUV_TV, BLACK_COLOR_FORMAT_YUV_CV, BLACK_COLOR_FORMAT_YUV_SUPER_AA, BLACK_COLOR_FORMAT_COUNT }; static const struct dce110_timing_generator_offsets reg_offsets[] = { { .crtc = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), } }; #define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10 #define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1) #define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1) #define CRTC_REG(reg) (reg + tg110->offsets.crtc) #define DCP_REG(reg) (reg + tg110->offsets.dcp) #define DMIF_REG(reg) (reg + tg110->offsets.dmif) static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz) { uint64_t pix_dur; uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 + DCE110TG_FROM_TG(tg)->offsets.dmif; uint32_t value = dm_read_reg(tg->ctx, addr); if (pix_clk_100hz == 0) return; pix_dur = div_u64(10000000000ull, pix_clk_100hz); set_reg_field_value( value, pix_dur, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION); dm_write_reg(tg->ctx, addr, value); } static void program_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing, int vready_offset, int vstartup_start, int vupdate_offset, int vupdate_width, const enum signal_type signal, bool use_vbios) { if (!use_vbios) program_pix_dur(tg, timing->pix_clk_100hz); dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios); } static void dce60_timing_generator_enable_advanced_request( struct timing_generator *tg, bool enable, const struct dc_crtc_timing *timing) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL); uint32_t value = dm_read_reg(tg->ctx, addr); /* DCE6 has CRTC_PREFETCH_EN bit in CRTC_CONTROL register */ uint32_t addr2 = CRTC_REG(mmCRTC_CONTROL); uint32_t value2 = dm_read_reg(tg->ctx, addr2); /* DCE6 does not support CRTC_LEGACY_REQUESTOR_EN bit so here is not possible to set bit based on enable argument */ if ((timing->v_sync_width + timing->v_front_porch) <= 3) { set_reg_field_value( value, 3, CRTC_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); set_reg_field_value( value2, 0, CRTC_CONTROL, CRTC_PREFETCH_EN); } else { set_reg_field_value( value, 4, CRTC_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); set_reg_field_value( value2, 1, CRTC_CONTROL, CRTC_PREFETCH_EN); } set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_PROGRESSIVE_START_LINE_EARLY); set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_INTERLACE_START_LINE_EARLY); dm_write_reg(tg->ctx, addr, value); dm_write_reg(tg->ctx, addr2, value2); } static bool dce60_is_tg_enabled(struct timing_generator *tg) { uint32_t addr = 0; uint32_t value = 0; uint32_t field = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); addr = CRTC_REG(mmCRTC_CONTROL); value = dm_read_reg(tg->ctx, addr); field = get_reg_field_value(value, CRTC_CONTROL, CRTC_CURRENT_MASTER_EN_STATE); return field == 1; } static bool dce60_configure_crc(struct timing_generator *tg, const struct crc_params *params) { /* Cannot configure crc on a CRTC that is disabled */ if (!dce60_is_tg_enabled(tg)) return false; /* DCE6 has no CRTC_CRC_CNTL register, nothing to do */ return true; } static const struct timing_generator_funcs dce60_tg_funcs = { .validate_timing = dce110_tg_validate_timing, .program_timing = program_timing, .enable_crtc = dce110_timing_generator_enable_crtc, .disable_crtc = dce110_timing_generator_disable_crtc, .is_counter_moving = dce110_timing_generator_is_counter_moving, .get_position = dce110_timing_generator_get_position, .get_frame_count = dce110_timing_generator_get_vblank_counter, .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos, .set_early_control = dce110_timing_generator_set_early_control, .wait_for_state = dce110_tg_wait_for_state, .set_blank = dce110_tg_set_blank, .is_blanked = dce110_tg_is_blanked, .set_colors = dce110_tg_set_colors, .set_overscan_blank_color = dce110_timing_generator_set_overscan_color_black, .set_blank_color = dce110_timing_generator_program_blank_color, .disable_vga = dce110_timing_generator_disable_vga, .did_triggered_reset_occur = dce110_timing_generator_did_triggered_reset_occur, .setup_global_swap_lock = dce110_timing_generator_setup_global_swap_lock, .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger, .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger, .tear_down_global_swap_lock = dce110_timing_generator_tear_down_global_swap_lock, .set_drr = dce110_timing_generator_set_drr, .set_static_screen_control = dce110_timing_generator_set_static_screen_control, .set_test_pattern = dce110_timing_generator_set_test_pattern, .arm_vert_intr = dce110_arm_vert_intr, /* DCE6.0 overrides */ .enable_advanced_request = dce60_timing_generator_enable_advanced_request, .configure_crc = dce60_configure_crc, .get_crc = dce110_get_crc, }; void dce60_timing_generator_construct( struct dce110_timing_generator *tg110, struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { tg110->controller_id = CONTROLLER_ID_D0 + instance; tg110->base.inst = instance; tg110->offsets = *offsets; tg110->derived_offsets = reg_offsets[instance]; tg110->base.funcs = &dce60_tg_funcs; tg110->base.ctx = ctx; tg110->base.bp = ctx->dc_bios; tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1; tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1; tg110->min_h_blank = 56; tg110->min_h_front_porch = 4; tg110->min_h_back_porch = 4; }
linux-master
drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
/* * Copyright 2020 Mauro Rossi <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "core_types.h" #include "dce60_hw_sequencer.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dce100/dce100_hw_sequencer.h" /* include DCE6 register header files */ #include "dce/dce_6_0_d.h" #include "dce/dce_6_0_sh_mask.h" #define DC_LOGGER_INIT() /******************************************************************************* * Private definitions ******************************************************************************/ /***************************PIPE_CONTROL***********************************/ /* * Check if FBC can be enabled */ static bool dce60_should_enable_fbc(struct dc *dc, struct dc_state *context, uint32_t *pipe_idx) { uint32_t i; struct pipe_ctx *pipe_ctx = NULL; struct resource_context *res_ctx = &context->res_ctx; unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; ASSERT(dc->fbc_compressor); /* FBC memory should be allocated */ if (!dc->ctx->fbc_gpu_addr) return false; /* Only supports single display */ if (context->stream_count != 1) return false; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (res_ctx->pipe_ctx[i].stream) { pipe_ctx = &res_ctx->pipe_ctx[i]; if (!pipe_ctx) continue; /* fbc not applicable on underlay pipe */ if (pipe_ctx->pipe_idx != underlay_idx) { *pipe_idx = i; break; } } } if (i == dc->res_pool->pipe_count) return false; if (!pipe_ctx->stream->link) return false; /* Only supports eDP */ if (pipe_ctx->stream->link->connector_signal != SIGNAL_TYPE_EDP) return false; /* PSR should not be enabled */ if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled) return false; /* Nothing to compress */ if (!pipe_ctx->plane_state) return false; /* Only for non-linear tiling */ if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) return false; return true; } /* * Enable FBC */ static void dce60_enable_fbc( struct dc *dc, struct dc_state *context) { uint32_t pipe_idx = 0; if (dce60_should_enable_fbc(dc, context, &pipe_idx)) { /* Program GRPH COMPRESSED ADDRESS and PITCH */ struct compr_addr_and_pitch_params params = {0, 0, 0}; struct compressor *compr = dc->fbc_compressor; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; params.source_view_width = pipe_ctx->stream->timing.h_addressable; params.source_view_height = pipe_ctx->stream->timing.v_addressable; params.inst = pipe_ctx->stream_res.tg->inst; compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr; compr->funcs->surface_address_and_pitch(compr, &params); compr->funcs->set_fbc_invalidation_triggers(compr, 1); compr->funcs->enable_fbc(compr, &params); } } /******************************************************************************* * Front End programming ******************************************************************************/ static void dce60_set_default_colors(struct pipe_ctx *pipe_ctx) { struct default_adjustment default_adjust = { 0 }; default_adjust.force_hw_default = false; default_adjust.in_color_space = pipe_ctx->plane_state->color_space; default_adjust.out_color_space = pipe_ctx->stream->output_color_space; default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW; default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format; /* display color depth */ default_adjust.color_depth = pipe_ctx->stream->timing.display_color_depth; /* Lb color depth */ default_adjust.lb_color_depth = pipe_ctx->plane_res.scl_data.lb_params.depth; pipe_ctx->plane_res.xfm->funcs->opp_set_csc_default( pipe_ctx->plane_res.xfm, &default_adjust); } /******************************************************************************* * In order to turn on surface we will program * CRTC * * DCE6 has no bottom_pipe and no Blender HW * We need to set 'blank_target' to false in order to turn on the display * * |-----------|------------|---------| * |curr pipe | set_blank | | * |Surface |blank_target| CRCT | * |visibility | argument | | * |-----------|------------|---------| * | off | true | blank | * | on | false | unblank | * |-----------|------------|---------| * ******************************************************************************/ static void dce60_program_surface_visibility(const struct dc *dc, struct pipe_ctx *pipe_ctx) { bool blank_target = false; /* DCE6 has no bottom_pipe and no Blender HW */ if (!pipe_ctx->plane_state->visible) blank_target = true; /* DCE6 skip dce_set_blender_mode() but then proceed to 'unblank' CRTC */ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target); } static void dce60_get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4; switch (pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB8888: /* set boarder color to red */ color->color_r_cr = color_value; break; case PIXEL_FORMAT_ARGB2101010: /* set boarder color to blue */ color->color_b_cb = color_value; break; case PIXEL_FORMAT_420BPP8: /* set boarder color to green */ color->color_g_y = color_value; break; case PIXEL_FORMAT_420BPP10: /* set boarder color to yellow */ color->color_g_y = color_value; color->color_r_cr = color_value; break; case PIXEL_FORMAT_FP16: /* set boarder color to white */ color->color_r_cr = color_value; color->color_b_cb = color_value; color->color_g_y = color_value; break; default: break; } } static void dce60_program_scaler(const struct dc *dc, const struct pipe_ctx *pipe_ctx) { struct tg_color color = {0}; /* DCE6 skips DCN TOFPGA check for transform_set_pixel_storage_depth == NULL */ if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) dce60_get_surface_visual_confirm_color(pipe_ctx, &color); else color_space_to_black_color(dc, pipe_ctx->stream->output_color_space, &color); pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth( pipe_ctx->plane_res.xfm, pipe_ctx->plane_res.scl_data.lb_params.depth, &pipe_ctx->stream->bit_depth_params); if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) { /* * The way 420 is packed, 2 channels carry Y component, 1 channel * alternate between Cb and Cr, so both channels need the pixel * value for Y */ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) color.color_r_cr = color.color_g_y; pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color( pipe_ctx->stream_res.tg, &color); } pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data); } static void dce60_program_front_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx) { struct mem_input *mi = pipe_ctx->plane_res.mi; struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; unsigned int i; struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); memset(&adjust, 0, sizeof(adjust)); adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; dce_enable_fe_clock(dc->hwseq, mi->inst, true); dce60_set_default_colors(pipe_ctx); if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { tbl_entry.color_space = pipe_ctx->stream->output_color_space; for (i = 0; i < 12; i++) tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i]; pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment (pipe_ctx->plane_res.xfm, &tbl_entry); } if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) adjust.temperature_matrix[i] = pipe_ctx->stream->gamut_remap_matrix.matrix[i]; } pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; dce60_program_scaler(dc, pipe_ctx); mi->funcs->mem_input_program_surface_config( mi, plane_state->format, &plane_state->tiling_info, &plane_state->plane_size, plane_state->rotation, NULL, false); if (mi->funcs->set_blank) mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible); if (dc->config.gpu_vm_support) mi->funcs->mem_input_program_pte_vm( pipe_ctx->plane_res.mi, plane_state->format, &plane_state->tiling_info, plane_state->rotation); /* Moved programming gamma from dc to hwss */ if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); if (pipe_ctx->plane_state->update_flags.bits.full_update) hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); DC_LOG_SURFACE( "Pipe:%d %p: addr hi:0x%x, " "addr low:0x%x, " "src: %d, %d, %d," " %d; dst: %d, %d, %d, %d;" "clip: %d, %d, %d, %d\n", pipe_ctx->pipe_idx, (void *) pipe_ctx->plane_state, pipe_ctx->plane_state->address.grph.addr.high_part, pipe_ctx->plane_state->address.grph.addr.low_part, pipe_ctx->plane_state->src_rect.x, pipe_ctx->plane_state->src_rect.y, pipe_ctx->plane_state->src_rect.width, pipe_ctx->plane_state->src_rect.height, pipe_ctx->plane_state->dst_rect.x, pipe_ctx->plane_state->dst_rect.y, pipe_ctx->plane_state->dst_rect.width, pipe_ctx->plane_state->dst_rect.height, pipe_ctx->plane_state->clip_rect.x, pipe_ctx->plane_state->clip_rect.y, pipe_ctx->plane_state->clip_rect.width, pipe_ctx->plane_state->clip_rect.height); DC_LOG_SURFACE( "Pipe %d: width, height, x, y\n" "viewport:%d, %d, %d, %d\n" "recout: %d, %d, %d, %d\n", pipe_ctx->pipe_idx, pipe_ctx->plane_res.scl_data.viewport.width, pipe_ctx->plane_res.scl_data.viewport.height, pipe_ctx->plane_res.scl_data.viewport.x, pipe_ctx->plane_res.scl_data.viewport.y, pipe_ctx->plane_res.scl_data.recout.width, pipe_ctx->plane_res.scl_data.recout.height, pipe_ctx->plane_res.scl_data.recout.x, pipe_ctx->plane_res.scl_data.recout.y); } static void dce60_apply_ctx_for_surface( struct dc *dc, const struct dc_stream_state *stream, int num_planes, struct dc_state *context) { int i; if (num_planes == 0) return; if (dc->fbc_compressor) dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream != stream) continue; /* Need to allocate mem before program front end for Fiji */ pipe_ctx->plane_res.mi->funcs->allocate_mem_input( pipe_ctx->plane_res.mi, pipe_ctx->stream->timing.h_total, pipe_ctx->stream->timing.v_total, pipe_ctx->stream->timing.pix_clk_100hz / 10, context->stream_count); dce60_program_front_end_for_pipe(dc, pipe_ctx); dc->hwss.update_plane_addr(dc, pipe_ctx); dce60_program_surface_visibility(dc, pipe_ctx); } if (dc->fbc_compressor) dce60_enable_fbc(dc, context); } void dce60_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.apply_ctx_for_surface = dce60_apply_ctx_for_surface; dc->hwss.cursor_lock = dce60_pipe_control_lock; dc->hwss.pipe_control_lock = dce60_pipe_control_lock; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; }
linux-master
drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
/* * Copyright 2020 Mauro Rossi <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dce/dce_6_0_d.h" #include "dce/dce_6_0_sh_mask.h" #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "irq/dce60/irq_service_dce60.h" #include "dce110/dce110_timing_generator.h" #include "dce110/dce110_resource.h" #include "dce60/dce60_timing_generator.h" #include "dce/dce_mem_input.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "dce60/dce60_hw_sequencer.h" #include "dce100/dce100_resource.h" #include "dce/dce_panel_cntl.h" #include "reg_helper.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" /* TODO remove this include */ #include "dce60_resource.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_6_0_d.h" #include "gmc/gmc_6_0_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x1CCE #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE #endif #ifndef mmHPD_DC_HPD_CONTROL #define mmHPD_DC_HPD_CONTROL 0x189A #define mmHPD0_DC_HPD_CONTROL 0x189A #define mmHPD1_DC_HPD_CONTROL 0x18A2 #define mmHPD2_DC_HPD_CONTROL 0x18AA #define mmHPD3_DC_HPD_CONTROL 0x18B2 #define mmHPD4_DC_HPD_CONTROL 0x18BA #define mmHPD5_DC_HPD_CONTROL 0x18C2 #endif #define DCE11_DIG_FE_CNTL 0x4a00 #define DCE11_DIG_BE_CNTL 0x4a47 #define DCE11_DP_SEC 0x4ac3 static const struct dce110_timing_generator_offsets dce60_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL3 - mmDPG_PIPE_ARBITRATION_CONTROL3), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL3 - mmDPG_PIPE_ARBITRATION_CONTROL3), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL3 - mmDPG_PIPE_ARBITRATION_CONTROL3), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL3 - mmDPG_PIPE_ARBITRATION_CONTROL3), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL3 - mmDPG_PIPE_ARBITRATION_CONTROL3), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL3 - mmDPG_PIPE_ARBITRATION_CONTROL3), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name #define ipp_regs(id)\ [id] = {\ IPP_COMMON_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE60_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE60_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE60(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE60(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE60(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE60_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5) }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST_DCE_BASE(id),\ .AFMT_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) }; static const struct dce_panel_cntl_registers panel_cntl_regs[] = { { DCE_PANEL_CNTL_REG_LIST() } }; static const struct dce_panel_cntl_shift panel_cntl_shift = { DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dce_panel_cntl_mask panel_cntl_mask = { DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_60_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_60(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_60(_MASK) }; static const struct dce110_aux_registers_shift aux_shift = { DCE10_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCE10_AUX_MASK_SH_LIST(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), }; static const struct dce_audio_shift audio_shift = { AUD_DCE60_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_DCE60_MASK_SH_LIST(_MASK) }; #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_80(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 2, .num_ddc = 6, }; static const struct resource_caps res_cap_61 = { .num_timing_generator = 4, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, }; static const struct resource_caps res_cap_64 = { .num_timing_generator = 2, .num_audio = 2, .num_stream_encoder = 2, .num_pll = 2, .num_ddc = 2, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE60_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE60(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE60(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x1918 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static int map_transmitter_id_to_phy_instance( enum transmitter transmitter) { switch (transmitter) { case TRANSMITTER_UNIPHY_A: return 0; case TRANSMITTER_UNIPHY_B: return 1; case TRANSMITTER_UNIPHY_C: return 2; case TRANSMITTER_UNIPHY_D: return 3; case TRANSMITTER_UNIPHY_E: return 4; case TRANSMITTER_UNIPHY_F: return 5; case TRANSMITTER_UNIPHY_G: return 6; default: ASSERT(0); return 0; } } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce60_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce60_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce60_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct output_pixel_processor *dce60_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce60_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } static struct dce_aux *dce60_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static struct dce_i2c_hw *dce60_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct dce_i2c_sw *dce60_i2c_sw_create( struct dc_context *ctx) { struct dce_i2c_sw *dce_i2c_sw = kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL); if (!dce_i2c_sw) return NULL; dce_i2c_sw_construct(dce_i2c_sw, ctx); return dce_i2c_sw; } static struct stream_encoder *dce60_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE6_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE6_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE6_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce60_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce60_stream_encoder_create, .create_hwseq = dce60_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE6_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE6_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE6_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce60_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce60_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 2; return &dce_mi->base; } static void dce60_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce60_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce60_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); transform->prescaler_on = false; return &transform->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 297000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; static struct link_encoder *dce60_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; if (!enc110) return NULL; link_regs_id = map_transmitter_id_to_phy_instance(enc_init_data->transmitter); dce60_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct panel_cntl *dce60_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } static struct clock_source *dce60_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static void dce60_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static struct input_pixel_processor *dce60_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce60_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static void dce60_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce60_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dce60_clock_source_destroy(&pool->base.clock_sources[i]); } } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.dp_clock_source != NULL) dce60_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) { dce_aud_destroy(&pool->base.audios[i]); } } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } static bool dce60_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { int i; bool at_least_one_pipe = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].stream) at_least_one_pipe = true; } if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ context->bw_ctx.bw.dce.dispclk_khz = 681000; context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { context->bw_ctx.bw.dce.dispclk_khz = 0; context->bw_ctx.bw.dce.yclk_khz = 0; } return true; } static bool dce60_validate_surface_sets( struct dc_state *context) { int i; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 1) return false; if (context->stream_status[i].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; } return true; } static enum dc_status dce60_validate_global( struct dc *dc, struct dc_state *context) { if (!dce60_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static void dce60_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); dce60_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } static const struct resource_funcs dce60_res_pool_funcs = { .destroy = dce60_destroy_resource_pool, .link_enc_create = dce60_link_encoder_create, .panel_cntl_create = dce60_panel_cntl_create, .validate_bandwidth = dce60_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, .validate_global = dce60_validate_global, .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; static bool dce60_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce60_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = res_cap.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 64; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clk_src_count = 1; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce60_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce60_timing_generator_create( ctx, i, &dce60_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce60_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce60_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce60_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce60_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce60_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce60_hw_sequencer_construct(dc); return true; res_create_fail: dce60_resource_destruct(pool); return false; } struct resource_pool *dce60_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce60_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } static bool dce61_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_61; pool->base.funcs = &dce60_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap_61.num_timing_generator; pool->base.timing_generator_count = res_cap_61.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 64; dc->caps.is_apu = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[2] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 3; } else { pool->base.dp_clock_source = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce60_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce60_timing_generator_create( ctx, i, &dce60_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce60_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce60_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce60_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce60_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce60_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce60_hw_sequencer_construct(dc); return true; res_create_fail: dce60_resource_destruct(pool); return false; } struct resource_pool *dce61_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce61_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } static bool dce64_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_64; pool->base.funcs = &dce60_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap_64.num_timing_generator; pool->base.timing_generator_count = res_cap_64.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 64; dc->caps.is_apu = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); pool->base.clk_src_count = 1; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce60_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce60_timing_generator_create( ctx, i, &dce60_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce60_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce60_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce60_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce60_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce60_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce60_hw_sequencer_construct(dc); return true; res_create_fail: dce60_resource_destruct(pool); return false; } struct resource_pool *dce64_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce64_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "../virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_mem_input.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "dce100/dce100_hw_sequencer.h" #include "dce/dce_panel_cntl.h" #include "reg_helper.h" #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" #include "dce100_resource.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_8_2_d.h" #include "gmc/gmc_8_2_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_BS_SR_SWAP_CNTL #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x4ABC #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC #endif static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name #define ipp_regs(id)\ [id] = {\ IPP_DCE100_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE100(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE100_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST_DCE_BASE(id),\ .AFMT_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), stream_enc_regs(6) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) }; static const struct dce_panel_cntl_registers panel_cntl_regs[] = { { DCE_PANEL_CNTL_REG_LIST() } }; static const struct dce_panel_cntl_shift panel_cntl_shift = { DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dce_panel_cntl_mask panel_cntl_mask = { DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_100_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_100(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_100_110(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults = { .enable_legacy_fast_update = true, }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x1918 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static int map_transmitter_id_to_phy_instance( enum transmitter transmitter) { switch (transmitter) { case TRANSMITTER_UNIPHY_A: return 0; case TRANSMITTER_UNIPHY_B: return 1; case TRANSMITTER_UNIPHY_C: return 2; case TRANSMITTER_UNIPHY_D: return 3; case TRANSMITTER_UNIPHY_E: return 4; case TRANSMITTER_UNIPHY_F: return 5; case TRANSMITTER_UNIPHY_G: return 6; default: ASSERT(0); return 0; } } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce100_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce110_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct stream_encoder *dce100_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE10_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE10_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE10_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce100_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce100_stream_encoder_create, .create_hwseq = dce100_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE8_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE8_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE8_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static const struct dce110_aux_registers_shift aux_shift = { DCE10_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCE10_AUX_MASK_SH_LIST(_MASK) }; static struct mem_input *dce100_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 2; return &dce_mi->base; } static void dce100_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce100_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); return &transform->base; } static struct input_pixel_processor *dce100_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; static struct link_encoder *dce100_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; if (!enc110) return NULL; link_regs_id = map_transmitter_id_to_phy_instance(enc_init_data->transmitter); dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } static struct output_pixel_processor *dce100_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } static struct dce_aux *dce100_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static struct dce_i2c_hw *dce100_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct clock_source *dce100_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static void dce100_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static void dce100_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce100_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) dce100_clock_source_destroy(&pool->base.clock_sources[i]); } if (pool->base.dp_clock_source != NULL) dce100_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) dce_aud_destroy(&pool->base.audios[i]); } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.irqs != NULL) dal_irq_service_destroy(&pool->base.irqs); } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; dce110_resource_build_pipe_hw_param(pipe_ctx); resource_build_info_frame(pipe_ctx); return DC_OK; } static bool dce100_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { int i; bool at_least_one_pipe = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].stream) at_least_one_pipe = true; } if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ context->bw_ctx.bw.dce.dispclk_khz = 681000; context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { context->bw_ctx.bw.dce.dispclk_khz = 0; context->bw_ctx.bw.dce.yclk_khz = 0; } return true; } static bool dce100_validate_surface_sets( struct dc_state *context) { int i; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 1) return false; if (context->stream_status[i].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; } return true; } static enum dc_status dce100_validate_global( struct dc *dc, struct dc_state *context) { if (!dce100_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } enum dc_status dce100_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static void dce100_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); dce100_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return DC_OK; return DC_FAIL_SURFACE_VALIDATE; } struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * below can happen in cases when stream encoder is acquired: * 1) for second MST display in chain, so preferred engine already * acquired; * 2) for another link, which preferred engine already acquired by any * MST configuration. * * If signal is of DP type and preferred engine not found, return last available * * TODO - This is just a patch up and a generic solution is * required for non DP connectors. */ if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) return pool->stream_enc[j]; return NULL; } static const struct resource_funcs dce100_res_pool_funcs = { .destroy = dce100_destroy_resource_pool, .link_enc_create = dce100_link_encoder_create, .panel_cntl_create = dce100_panel_cntl_create, .validate_bandwidth = dce100_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, .validate_global = dce100_validate_global, .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; static bool dce100_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce100_res_pool_funcs; pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[2] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 3; } else { pool->base.dp_clock_source = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce110_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce100_timing_generator_create( ctx, i, &dce100_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce100_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce100_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce100_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce100_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce100_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce100_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce100_hw_sequencer_construct(dc); return true; res_create_fail: dce100_resource_destruct(pool); return false; } struct resource_pool *dce100_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce100_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "core_types.h" #include "clk_mgr.h" #include "dce100_hw_sequencer.h" #include "resource.h" #include "dce110/dce110_hw_sequencer.h" /* include DCE10 register header files */ #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" struct dce100_hw_seq_reg_offsets { uint32_t blnd; uint32_t crtc; }; static const struct dce100_hw_seq_reg_offsets reg_offsets[] = { { .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), } }; #define HW_REG_CRTC(reg, id)\ (reg + reg_offsets[id].crtc) /******************************************************************************* * Private definitions ******************************************************************************/ /***************************PIPE_CONTROL***********************************/ bool dce100_enable_display_power_gating( struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, enum pipe_gating_control power_gating) { enum bp_result bp_result = BP_RESULT_OK; enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) cntl = ASIC_PIPE_ENABLE; else cntl = ASIC_PIPE_DISABLE; if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){ bp_result = dcb->funcs->enable_disp_power_gating( dcb, controller_id + 1, cntl); /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2 * by default when command table is called */ dm_write_reg(ctx, HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id), 0); } if (bp_result == BP_RESULT_OK) return true; else return false; } void dce100_prepare_bandwidth( struct dc *dc, struct dc_state *context) { dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, false); } void dce100_optimize_bandwidth( struct dc *dc, struct dc_state *context) { dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, true); } /**************************************************************************/ void dce100_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; }
linux-master
drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
// SPDX-License-Identifier: MIT /* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "dcn32/dcn32_init.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn32/dcn32_resource.h" #include "dcn321_resource.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" #include "dml/dcn321/dcn321_fpu.h" #include "dcn10/dcn10_ipp.h" #include "dcn30/dcn30_hubbub.h" #include "dcn31/dcn31_hubbub.h" #include "dcn32/dcn32_hubbub.h" #include "dcn32/dcn32_mpc.h" #include "dcn32/dcn32_hubp.h" #include "irq/dcn32/irq_service_dcn32.h" #include "dcn32/dcn32_dpp.h" #include "dcn32/dcn32_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn30/dcn30_opp.h" #include "dcn20/dcn20_dsc.h" #include "dcn30/dcn30_vpg.h" #include "dcn30/dcn30_afmt.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn32/dcn32_dio_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" #include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32/dcn32_dio_link_encoder.h" #include "dcn321_dio_link_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" #include "virtual/virtual_stream_encoder.h" #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" #include "link.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" #include "dcn32/dcn32_mmhubbub.h" #include "dcn/dcn_3_2_1_offset.h" #include "dcn/dcn_3_2_1_sh_mask.h" #include "nbio/nbio_4_3_0_offset.h" #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" #define DC_LOGGER_INIT(logger) enum dcn321_clk_src_array_id { DCN321_CLK_SRC_PLL0, DCN321_CLK_SRC_PLL1, DCN321_CLK_SRC_PLL2, DCN321_CLK_SRC_PLL3, DCN321_CLK_SRC_PLL4, DCN321_CLK_SRC_TOTAL }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] #define BASE(seg) BASE_INNER(seg) #define SR(reg_name)\ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SR_ARR(reg_name, id)\ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SR_ARR_INIT(reg_name, id, value)\ REG_STRUCT[id].reg_name = value #define SRI(reg_name, block, id)\ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRI_ARR(reg_name, block, id)\ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SR_ARR_I2C(reg_name, id) \ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name #define SRI_ARR_I2C(reg_name, block, id)\ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRI_ARR_ALPHABET(reg_name, block, index, id)\ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRI2(reg_name, block, id)\ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SRI2_ARR(reg_name, block, id)\ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SRIR(var_name, reg_name, block, id)\ .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII_ARR_2(reg_name, block, id, inst)\ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII_MPC_RMU(reg_name, block, id)\ .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII_DWB(reg_name, temp_name, block, id)\ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## temp_name #define DCCG_SRII(reg_name, block, id)\ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SF_DWB2(reg_name, block, id, field_name, post_fix) \ .field_name = reg_name ## __ ## field_name ## post_fix #define VUPDATE_SRII(reg_name, block, id)\ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ reg ## reg_name ## _ ## block ## id /* NBIO */ #define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ regBIF_BX0_ ## reg_name #define NBIO_SR_ARR(reg_name, id)\ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ regBIF_BX0_ ## reg_name #define CTX ctx #define REG(reg_name) \ (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) static struct bios_registers bios_regs; #define bios_regs_init() \ ( \ NBIO_SR(BIOS_SCRATCH_3),\ NBIO_SR(BIOS_SCRATCH_6)\ ) #define clk_src_regs_init(index, pllid)\ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) static struct dce110_clk_src_regs clk_src_regs[5]; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK) }; #define abm_regs_init(id)\ ABM_DCN32_REG_LIST_RI(id) static struct dce_abm_registers abm_regs[4]; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN32(_MASK) }; #define audio_regs_init(id)\ AUD_COMMON_REG_LIST_RI(id) static struct dce_audio_registers audio_regs[5]; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define vpg_regs_init(id)\ VPG_DCN3_REG_LIST_RI(id) static struct dcn30_vpg_registers vpg_regs[10]; static const struct dcn30_vpg_shift vpg_shift = { DCN3_VPG_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_vpg_mask vpg_mask = { DCN3_VPG_MASK_SH_LIST(_MASK) }; #define afmt_regs_init(id)\ AFMT_DCN3_REG_LIST_RI(id) static struct dcn30_afmt_registers afmt_regs[6]; static const struct dcn30_afmt_shift afmt_shift = { DCN3_AFMT_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_afmt_mask afmt_mask = { DCN3_AFMT_MASK_SH_LIST(_MASK) }; #define apg_regs_init(id)\ APG_DCN31_REG_LIST_RI(id) static struct dcn31_apg_registers apg_regs[4]; static const struct dcn31_apg_shift apg_shift = { DCN31_APG_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_apg_mask apg_mask = { DCN31_APG_MASK_SH_LIST(_MASK) }; #define stream_enc_regs_init(id)\ SE_DCN32_REG_LIST_RI(id) static struct dcn10_stream_enc_registers stream_enc_regs[5]; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN32(_MASK) }; #define aux_regs_init(id)\ DCN2_AUX_REG_LIST_RI(id) static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; #define hpd_regs_init(id)\ HPD_REG_LIST_RI(id) static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; #define link_regs_init(id, phyid)\ ( \ LE_DCN31_REG_LIST_RI(id), \ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ ) /*DPCS_DCN31_REG_LIST(id),*/ \ static struct dcn10_link_enc_registers link_enc_regs[5]; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ // DPCS_DCN31_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ // DPCS_DCN31_MASK_SH_LIST(_MASK) }; #define hpo_dp_stream_encoder_reg_init(id)\ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) }; #define hpo_dp_link_encoder_reg_init(id)\ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) /*DCN3_1_RDPCSTX_REG_LIST(0),*/ /*DCN3_1_RDPCSTX_REG_LIST(1),*/ /*DCN3_1_RDPCSTX_REG_LIST(2),*/ /*DCN3_1_RDPCSTX_REG_LIST(3),*/ static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) }; #define dpp_regs_init(id)\ DPP_REG_LIST_DCN30_COMMON_RI(id) static struct dcn3_dpp_registers dpp_regs[4]; static const struct dcn3_dpp_shift tf_shift = { DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT) }; static const struct dcn3_dpp_mask tf_mask = { DPP_REG_LIST_SH_MASK_DCN30_COMMON(_MASK) }; #define opp_regs_init(id)\ OPP_REG_LIST_DCN30_RI(id) static struct dcn20_opp_registers opp_regs[4]; static const struct dcn20_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn20_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN20(_MASK) }; #define aux_engine_regs_init(id) \ ( \ AUX_COMMON_REG_LIST0_RI(id), SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\ ) static struct dce110_aux_registers aux_engine_regs[5]; static const struct dce110_aux_registers_shift aux_shift = { DCN_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCN_AUX_MASK_SH_LIST(_MASK) }; #define dwbc_regs_dcn3_init(id)\ DWBC_COMMON_REG_LIST_DCN30_RI(id) static struct dcn30_dwbc_registers dwbc30_regs[1]; static const struct dcn30_dwbc_shift dwbc30_shift = { DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_dwbc_mask dwbc30_mask = { DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define mcif_wb_regs_dcn3_init(id)\ MCIF_WB_COMMON_REG_LIST_DCN32_RI(id) static struct dcn30_mmhubbub_registers mcif_wb30_regs[1]; static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK) }; #define dsc_regsDCN20_init(id)\ DSC_REG_LIST_DCN20_RI(id) static struct dcn20_dsc_registers dsc_regs[4]; static const struct dcn20_dsc_shift dsc_shift = { DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; static struct dcn30_mpc_registers mpc_regs; #define dcn_mpc_regs_init()\ MPC_REG_LIST_DCN3_2_RI(0),\ MPC_REG_LIST_DCN3_2_RI(1),\ MPC_REG_LIST_DCN3_2_RI(2),\ MPC_REG_LIST_DCN3_2_RI(3),\ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) static const struct dcn30_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn30_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) }; #define optc_regs_init(id)\ OPTC_COMMON_REG_LIST_DCN3_2_RI(id) static struct dcn_optc_registers optc_regs[4]; static const struct dcn_optc_shift optc_shift = { OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) }; static const struct dcn_optc_mask optc_mask = { OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK) }; #define hubp_regs_init(id) \ HUBP_REG_LIST_DCN32_RI(id) static struct dcn_hubp2_registers hubp_regs[4]; static const struct dcn_hubp2_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn_hubp2_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN32(_MASK) }; static struct dcn_hubbub_registers hubbub_reg; #define hubbub_reg_init()\ HUBBUB_REG_LIST_DCN32_RI(0) static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN32(_MASK) }; static struct dccg_registers dccg_regs; #define dccg_regs_init()\ DCCG_REG_LIST_DCN32_RI() static const struct dccg_shift dccg_shift = { DCCG_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dccg_mask dccg_mask = { DCCG_MASK_SH_LIST_DCN32(_MASK) }; #define SRII2(reg_name_pre, reg_name_post, id)\ .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ ## id ## _ ## reg_name_post ## _BASE_IDX) + \ reg ## reg_name_pre ## id ## _ ## reg_name_post #define HWSEQ_DCN32_REG_LIST()\ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ SR(DIO_MEM_PWR_CTRL), \ SR(ODM_MEM_PWR_CTRL3), \ SR(MMHUBBUB_MEM_PWR_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL2), \ SR(DCFCLK_CNTL),\ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ SRII(PIXEL_RATE_CNTL, OTG, 0), \ SRII(PIXEL_RATE_CNTL, OTG, 1),\ SRII(PIXEL_RATE_CNTL, OTG, 2),\ SRII(PIXEL_RATE_CNTL, OTG, 3),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ SR(MICROSECOND_TIME_BASE_DIV), \ SR(MILLISECOND_TIME_BASE_DIV), \ SR(DISPCLK_FREQ_CHANGE_CNTL), \ SR(RBBMIF_TIMEOUT_DIS), \ SR(RBBMIF_TIMEOUT_DIS_2), \ SR(DCHUBBUB_CRC_CTRL), \ SR(DPP_TOP0_DPP_CRC_CTRL), \ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ SR(MPC_CRC_CTRL), \ SR(MPC_CRC_RESULT_GB), \ SR(MPC_CRC_RESULT_C), \ SR(MPC_CRC_RESULT_AR), \ SR(DOMAIN0_PG_CONFIG), \ SR(DOMAIN1_PG_CONFIG), \ SR(DOMAIN2_PG_CONFIG), \ SR(DOMAIN3_PG_CONFIG), \ SR(DOMAIN16_PG_CONFIG), \ SR(DOMAIN17_PG_CONFIG), \ SR(DOMAIN18_PG_CONFIG), \ SR(DOMAIN19_PG_CONFIG), \ SR(DOMAIN0_PG_STATUS), \ SR(DOMAIN1_PG_STATUS), \ SR(DOMAIN2_PG_STATUS), \ SR(DOMAIN3_PG_STATUS), \ SR(DOMAIN16_PG_STATUS), \ SR(DOMAIN17_PG_STATUS), \ SR(DOMAIN18_PG_STATUS), \ SR(DOMAIN19_PG_STATUS), \ SR(D1VGA_CONTROL), \ SR(D2VGA_CONTROL), \ SR(D3VGA_CONTROL), \ SR(D4VGA_CONTROL), \ SR(D5VGA_CONTROL), \ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ SR(AZALIA_AUDIO_DTO), \ SR(AZALIA_CONTROLLER_CLOCK_GATING) static struct dce_hwseq_registers hwseq_reg; #define hwseq_reg_init()\ HWSEQ_DCN32_REG_LIST() #define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN32_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN32_MASK_SH_LIST(_MASK) }; #define vmid_regs_init(id)\ DCN20_VMID_REG_LIST_RI(id) static struct dcn_vmid_registers vmid_regs[16]; static const struct dcn20_vmid_shift vmid_shifts = { DCN20_VMID_MASK_SH_LIST(__SHIFT) }; static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; static const struct resource_caps res_cap_dcn321 = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, .num_audio = 5, .num_stream_encoder = 5, .num_hpo_dp_stream_encoder = 4, .num_hpo_dp_link_encoder = 2, .num_pll = 5, .num_dwb = 1, .num_ddc = 5, .num_vmid = 16, .num_mpc_3dlut = 4, .num_dsc = 4, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true, .p010 = true, .ayuv = false, }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 16000 }, // 6:1 downscaling ratio: 1000/6 = 166.666 .max_downscale_factor = { .argb8888 = 167, .nv12 = 167, .fp16 = 167 }, 64, 64 }; static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, .max_downscale_src_width = 7680,/*upto 8K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .enable_mem_low_power = { .bits = { .vga = false, .i2c = false, .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled .dscl = false, .cm = false, .mpc = false, .optc = true, } }, .use_max_lb = true, .force_disable_subvp = false, .exit_idle_opt_for_cursor_updates = true, .enable_single_display_2to1_odm_policy = true, /*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/ .enable_double_buffered_dsc_pg_support = true, .enable_dp_dig_pixel_rate_div_policy = 1, .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" .alloc_extra_way_for_cursor = true, .min_prefetch_in_strobe_ns = 60000, // 60us .disable_unbounded_requesting = false, .override_dispclk_programming = true, .disable_fpo_optimizations = false, .fpo_vactive_margin_us = 2000, // 2000us .disable_fpo_vactive = false, .disable_boot_optimizations = false, .disable_subvp_high_refresh = false, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, .enable_legacy_fast_update = false, .disable_dc_mode_overwrite = true, }; static struct dce_aux *dcn321_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; #undef REG_STRUCT #define REG_STRUCT aux_engine_regs aux_engine_regs_init(0), aux_engine_regs_init(1), aux_engine_regs_init(2), aux_engine_regs_init(3), aux_engine_regs_init(4); dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs_init(id)\ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) static struct dce_i2c_registers i2c_hw_regs[5]; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static struct dce_i2c_hw *dcn321_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; #undef REG_STRUCT #define REG_STRUCT i2c_hw_regs i2c_inst_regs_init(1), i2c_inst_regs_init(2), i2c_inst_regs_init(3), i2c_inst_regs_init(4), i2c_inst_regs_init(5); dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct clock_source *dcn321_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn31_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static struct hubbub *dcn321_hubbub_create(struct dc_context *ctx) { int i; struct dcn20_hubbub *hubbub2 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub2) return NULL; #undef REG_STRUCT #define REG_STRUCT hubbub_reg hubbub_reg_init(); #undef REG_STRUCT #define REG_STRUCT vmid_regs vmid_regs_init(0), vmid_regs_init(1), vmid_regs_init(2), vmid_regs_init(3), vmid_regs_init(4), vmid_regs_init(5), vmid_regs_init(6), vmid_regs_init(7), vmid_regs_init(8), vmid_regs_init(9), vmid_regs_init(10), vmid_regs_init(11), vmid_regs_init(12), vmid_regs_init(13), vmid_regs_init(14), vmid_regs_init(15); hubbub32_construct(hubbub2, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask, ctx->dc->dml.ip.det_buffer_size_kbytes, ctx->dc->dml.ip.pixel_chunk_size_kbytes, ctx->dc->dml.ip.config_return_buffer_size_in_kbytes); for (i = 0; i < res_cap_dcn321.num_vmid; i++) { struct dcn20_vmid *vmid = &hubbub2->vmid[i]; vmid->ctx = ctx; vmid->regs = &vmid_regs[i]; vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } return &hubbub2->base; } static struct hubp *dcn321_hubp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; #undef REG_STRUCT #define REG_STRUCT hubp_regs hubp_regs_init(0), hubp_regs_init(1), hubp_regs_init(2), hubp_regs_init(3); if (hubp32_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) return &hubp2->base; BREAK_TO_DEBUGGER(); kfree(hubp2); return NULL; } static void dcn321_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN30_DPP(*dpp)); *dpp = NULL; } static struct dpp *dcn321_dpp_create( struct dc_context *ctx, uint32_t inst) { struct dcn3_dpp *dpp3 = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); if (!dpp3) return NULL; #undef REG_STRUCT #define REG_STRUCT dpp_regs dpp_regs_init(0), dpp_regs_init(1), dpp_regs_init(2), dpp_regs_init(3); if (dpp32_construct(dpp3, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) return &dpp3->base; BREAK_TO_DEBUGGER(); kfree(dpp3); return NULL; } static struct mpc *dcn321_mpc_create( struct dc_context *ctx, int num_mpcc, int num_rmu) { struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); if (!mpc30) return NULL; #undef REG_STRUCT #define REG_STRUCT mpc_regs dcn_mpc_regs_init(); dcn32_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); return &mpc30->base; } static struct output_pixel_processor *dcn321_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp2 = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp2) { BREAK_TO_DEBUGGER(); return NULL; } #undef REG_STRUCT #define REG_STRUCT opp_regs opp_regs_init(0), opp_regs_init(1), opp_regs_init(2), opp_regs_init(3); dcn20_opp_construct(opp2, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp2->base; } static struct timing_generator *dcn321_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; #undef REG_STRUCT #define REG_STRUCT optc_regs optc_regs_init(0), optc_regs_init(1), optc_regs_init(2), optc_regs_init(3); tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &optc_regs[instance]; tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; dcn32_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; static struct link_encoder *dcn321_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; #undef REG_STRUCT #define REG_STRUCT link_enc_aux_regs aux_regs_init(0), aux_regs_init(1), aux_regs_init(2), aux_regs_init(3), aux_regs_init(4); #undef REG_STRUCT #define REG_STRUCT link_enc_hpd_regs hpd_regs_init(0), hpd_regs_init(1), hpd_regs_init(2), hpd_regs_init(3), hpd_regs_init(4); #undef REG_STRUCT #define REG_STRUCT link_enc_regs link_regs_init(0, A), link_regs_init(1, B), link_regs_init(2, C), link_regs_init(3, D), link_regs_init(4, E); dcn321_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc20->enc10.base; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS, FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *dcn321_create_audio( struct dc_context *ctx, unsigned int inst) { #undef REG_STRUCT #define REG_STRUCT audio_regs audio_regs_init(0), audio_regs_init(1), audio_regs_init(2), audio_regs_init(3), audio_regs_init(4); return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct vpg *dcn321_vpg_create( struct dc_context *ctx, uint32_t inst) { struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); if (!vpg3) return NULL; #undef REG_STRUCT #define REG_STRUCT vpg_regs vpg_regs_init(0), vpg_regs_init(1), vpg_regs_init(2), vpg_regs_init(3), vpg_regs_init(4), vpg_regs_init(5), vpg_regs_init(6), vpg_regs_init(7), vpg_regs_init(8), vpg_regs_init(9); vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); return &vpg3->base; } static struct afmt *dcn321_afmt_create( struct dc_context *ctx, uint32_t inst) { struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); if (!afmt3) return NULL; #undef REG_STRUCT #define REG_STRUCT afmt_regs afmt_regs_init(0), afmt_regs_init(1), afmt_regs_init(2), afmt_regs_init(3), afmt_regs_init(4), afmt_regs_init(5); afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); return &afmt3->base; } static struct apg *dcn321_apg_create( struct dc_context *ctx, uint32_t inst) { struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); if (!apg31) return NULL; #undef REG_STRUCT #define REG_STRUCT apg_regs apg_regs_init(0), apg_regs_init(1), apg_regs_init(2), apg_regs_init(3); apg31_construct(apg31, ctx, inst, &apg_regs[inst], &apg_shift, &apg_mask); return &apg31->base; } static struct stream_encoder *dcn321_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; struct afmt *afmt; int vpg_inst; int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ if (eng_id <= ENGINE_ID_DIGF) { vpg_inst = eng_id; afmt_inst = eng_id; } else return NULL; enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn321_vpg_create(ctx, vpg_inst); afmt = dcn321_afmt_create(ctx, afmt_inst); if (!enc1 || !vpg || !afmt) { kfree(enc1); kfree(vpg); kfree(afmt); return NULL; } #undef REG_STRUCT #define REG_STRUCT stream_enc_regs stream_enc_regs_init(0), stream_enc_regs_init(1), stream_enc_regs_init(2), stream_enc_regs_init(3), stream_enc_regs_init(4); dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static struct hpo_dp_stream_encoder *dcn321_hpo_dp_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; struct vpg *vpg; struct apg *apg; uint32_t hpo_dp_inst; uint32_t vpg_inst; uint32_t apg_inst; ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; /* Mapping of VPG register blocks to HPO DP block instance: * VPG[6] -> HPO_DP[0] * VPG[7] -> HPO_DP[1] * VPG[8] -> HPO_DP[2] * VPG[9] -> HPO_DP[3] */ vpg_inst = hpo_dp_inst + 6; /* Mapping of APG register blocks to HPO DP block instance: * APG[0] -> HPO_DP[0] * APG[1] -> HPO_DP[1] * APG[2] -> HPO_DP[2] * APG[3] -> HPO_DP[3] */ apg_inst = hpo_dp_inst; /* allocate HPO stream encoder and create VPG sub-block */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); vpg = dcn321_vpg_create(ctx, vpg_inst); apg = dcn321_apg_create(ctx, apg_inst); if (!hpo_dp_enc31 || !vpg || !apg) { kfree(hpo_dp_enc31); kfree(vpg); kfree(apg); return NULL; } #undef REG_STRUCT #define REG_STRUCT hpo_dp_stream_enc_regs hpo_dp_stream_encoder_reg_init(0), hpo_dp_stream_encoder_reg_init(1), hpo_dp_stream_encoder_reg_init(2), hpo_dp_stream_encoder_reg_init(3); dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, hpo_dp_inst, eng_id, vpg, apg, &hpo_dp_stream_enc_regs[hpo_dp_inst], &hpo_dp_se_shift, &hpo_dp_se_mask); return &hpo_dp_enc31->base; } static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create( uint8_t inst, struct dc_context *ctx) { struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); #undef REG_STRUCT #define REG_STRUCT hpo_dp_link_enc_regs hpo_dp_link_encoder_reg_init(0), hpo_dp_link_encoder_reg_init(1); hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], &hpo_dp_le_shift, &hpo_dp_le_mask); return &hpo_dp_enc31->base; } static struct dce_hwseq *dcn321_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); #undef REG_STRUCT #define REG_STRUCT hwseq_reg hwseq_reg_init(); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn321_create_audio, .create_stream_encoder = dcn321_stream_encoder_create, .create_hpo_dp_stream_encoder = dcn321_hpo_dp_stream_encoder_create, .create_hpo_dp_link_encoder = dcn321_hpo_dp_link_encoder_create, .create_hwseq = dcn321_hwseq_create, }; static void dcn321_resource_destruct(struct dcn321_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { if (pool->base.stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); pool->base.stream_enc[i]->vpg = NULL; } if (pool->base.stream_enc[i]->afmt != NULL) { kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); pool->base.stream_enc[i]->afmt = NULL; } kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { if (pool->base.hpo_dp_stream_enc[i] != NULL) { if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); pool->base.hpo_dp_stream_enc[i]->vpg = NULL; } if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); pool->base.hpo_dp_stream_enc[i]->apg = NULL; } kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); pool->base.hpo_dp_stream_enc[i] = NULL; } } for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { if (pool->base.hpo_dp_link_enc[i] != NULL) { kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); pool->base.hpo_dp_link_enc[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(TO_DCN20_HUBBUB(pool->base.hubbub)); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.dpps[i] != NULL) dcn321_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN20_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) dal_irq_service_destroy(&pool->base.irqs); } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_opp; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dwb; i++) { if (pool->base.dwbc[i] != NULL) { kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); pool->base.dwbc[i] = NULL; } if (pool->base.mcif_wb[i] != NULL) { kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); pool->base.mcif_wb[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn20_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { if (pool->base.mpc_lut[i] != NULL) { dc_3dlut_func_release(pool->base.mpc_lut[i]); pool->base.mpc_lut[i] = NULL; } if (pool->base.mpc_shaper[i] != NULL) { dc_transfer_func_release(pool->base.mpc_shaper[i]); pool->base.mpc_shaper[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn20_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.multiple_abms[i] != NULL) dce_abm_destroy(&pool->base.multiple_abms[i]); } if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.oem_device != NULL) { struct dc *dc = pool->base.oem_device->ctx->dc; dc->link_srv->destroy_ddc_service(&pool->base.oem_device); } } static bool dcn321_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t dwb_count = pool->res_cap->num_dwb; for (i = 0; i < dwb_count; i++) { struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); if (!dwbc30) { dm_error("DC: failed to create dwbc30!\n"); return false; } #undef REG_STRUCT #define REG_STRUCT dwbc30_regs dwbc_regs_dcn3_init(0); dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); pool->dwbc[i] = &dwbc30->base; } return true; } static bool dcn321_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t dwb_count = pool->res_cap->num_dwb; for (i = 0; i < dwb_count; i++) { struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); if (!mcif_wb30) { dm_error("DC: failed to create mcif_wb30!\n"); return false; } #undef REG_STRUCT #define REG_STRUCT mcif_wb30_regs mcif_wb_regs_dcn3_init(0); dcn32_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); pool->mcif_wb[i] = &mcif_wb30->base; } return true; } static struct display_stream_compressor *dcn321_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); return NULL; } #undef REG_STRUCT #define REG_STRUCT dsc_regs dsc_regsDCN20_init(0), dsc_regsDCN20_init(1), dsc_regsDCN20_init(2), dsc_regsDCN20_init(3); dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); dsc->max_image_width = 6016; return &dsc->base; } static void dcn321_destroy_resource_pool(struct resource_pool **pool) { struct dcn321_resource_pool *dcn321_pool = TO_DCN321_RES_POOL(*pool); dcn321_resource_destruct(dcn321_pool); kfree(dcn321_pool); *pool = NULL; } static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { DC_FP_START(); dcn321_update_bw_bounding_box_fpu(dc, bw_params); DC_FP_END(); } static struct resource_funcs dcn321_res_pool_funcs = { .destroy = dcn321_destroy_resource_pool, .link_enc_create = dcn321_link_encoder_create, .link_enc_create_minimal = NULL, .panel_cntl_create = dcn32_panel_cntl_create, .validate_bandwidth = dcn32_validate_bandwidth, .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, .set_mcif_arb_params = dcn30_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut, .release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut, .update_bw_bounding_box = dcn321_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, .remove_phantom_pipes = dcn32_remove_phantom_pipes, .retain_phantom_pipes = dcn32_retain_phantom_pipes, .save_mall_state = dcn32_save_mall_state, .restore_mall_state = dcn32_restore_mall_state, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = REG_READ(CC_DC_PIPE_DIS); /* DCN321 support max 4 pipes */ value = value & 0xf; return value; } static bool dcn321_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn321_resource_pool *pool) { int i, j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; struct ddc_service_init_data ddc_init_data = {0}; uint32_t pipe_fuses = 0; uint32_t num_pipes = 4; #undef REG_STRUCT #define REG_STRUCT bios_regs bios_regs_init(); #undef REG_STRUCT #define REG_STRUCT clk_src_regs clk_src_regs_init(0, A), clk_src_regs_init(1, B), clk_src_regs_init(2, C), clk_src_regs_init(3, D), clk_src_regs_init(4, E); #undef REG_STRUCT #define REG_STRUCT abm_regs abm_regs_init(0), abm_regs_init(1), abm_regs_init(2), abm_regs_init(3); #undef REG_STRUCT #define REG_STRUCT dccg_regs dccg_regs_init(); ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_dcn321; /* max number of pipes for ASIC before checking for pipe fuses */ num_pipes = pool->base.res_cap->num_timing_generator; pipe_fuses = read_pipe_fuses(ctx); for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) if (pipe_fuses & 1 << i) num_pipes--; if (pipe_fuses & 1) ASSERT(0); //Unexpected - Pipe 0 should always be fully functional! if (pipe_fuses & CC_DC_PIPE_DIS__DC_FULL_DIS_MASK) ASSERT(0); //Entire DCN is harvested! /* within dml lib, initial value is hard coded, if ASIC pipe is fused, the * value will be changed, update max_num_dpp and max_num_otg for dml. */ dcn3_21_ip.max_num_dpp = num_pipes; dcn3_21_ip.max_num_otg = num_pipes; pool->base.funcs = &dcn321_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.timing_generator_count = num_pipes; pool->base.pipe_count = num_pipes; pool->base.mpcc_count = num_pipes; dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/ dc->caps.max_cursor_size = 64; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; dc->caps.mall_size_total = 0; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; /* Calculate the available MALL space */ dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( dc, dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel * 1024 * 1024; dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; dc->caps.subvp_fw_processing_delay_us = 15; dc->caps.subvp_drr_max_vblank_margin_us = 40; dc->caps.subvp_prefetch_end_to_mall_start_us = 15; dc->caps.subvp_swath_height_margin_lines = 16; dc->caps.subvp_pstate_allow_width_us = 20; dc->caps.subvp_vertical_int_margin_us = 30; dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin dc->caps.max_slave_planes = 2; dc->caps.max_slave_yuv_planes = 2; dc->caps.max_slave_rgb_planes = 2; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.dp_hpo = true; dc->caps.dp_hdmi21_pcon_support = true; dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.input_lut_shared = 0; dc->caps.color.dpp.icsc = 1; dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr dc->caps.color.dpp.dgam_rom_caps.srgb = 1; dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; dc->caps.color.dpp.dgam_rom_caps.pq = 1; dc->caps.color.dpp.dgam_rom_caps.hlg = 1; dc->caps.color.dpp.post_csc = 1; dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; dc->caps.color.dpp.hw_3d_lut = 1; dc->caps.color.dpp.ogam_ram = 1; // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.dpp.ogam_rom_caps.pq = 0; dc->caps.color.dpp.ogam_rom_caps.hlg = 0; dc->caps.color.dpp.ocsc = 0; dc->caps.color.mpc.gamut_remap = 1; dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //4, configurable to be before or after BLND in MPCC dc->caps.color.mpc.ogam_ram = 1; dc->caps.color.mpc.ogam_rom_caps.srgb = 0; dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; uint8_t is_vbios_lttpr_enable = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; } /* interop bit is implicit */ { dc->caps.vbios_lttpr_aware = true; } } if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); /************************************************* * Create resources * *************************************************/ /* Clock Sources for Pixel Clock*/ pool->base.clock_sources[DCN321_CLK_SRC_PLL0] = dcn321_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN321_CLK_SRC_PLL1] = dcn321_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCN321_CLK_SRC_PLL2] = dcn321_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCN321_CLK_SRC_PLL3] = dcn321_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCN321_CLK_SRC_PLL4] = dcn321_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clk_src_count = DCN321_CLK_SRC_TOTAL; /* todo: not reuse phy_pll registers */ pool->base.dp_clock_source = dcn321_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } } /* DCCG */ pool->base.dccg = dccg32_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); if (pool->base.dccg == NULL) { dm_error("DC: failed to create dccg!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* DML */ dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); /* IRQ Service */ init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn32_create(&init_data); if (!pool->base.irqs) goto create_fail; /* HUBBUB */ pool->base.hubbub = dcn321_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto create_fail; } /* HUBPs, DPPs, OPPs, TGs, ABMs */ for (i = 0, j = 0; i < pool->base.res_cap->num_timing_generator; i++) { /* if pipe is disabled, skip instance of HW pipe, * i.e, skip ASIC register instance */ if (pipe_fuses & 1 << i) continue; pool->base.hubps[j] = dcn321_hubp_create(ctx, i); if (pool->base.hubps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create hubps!\n"); goto create_fail; } pool->base.dpps[j] = dcn321_dpp_create(ctx, i); if (pool->base.dpps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpps!\n"); goto create_fail; } pool->base.opps[j] = dcn321_opp_create(ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto create_fail; } pool->base.timing_generators[j] = dcn321_timing_generator_create( ctx, i); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; } pool->base.multiple_abms[j] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); if (pool->base.multiple_abms[j] == NULL) { dm_error("DC: failed to create abm for pipe %d!\n", i); BREAK_TO_DEBUGGER(); goto create_fail; } /* index for resource pool arrays for next valid pipe */ j++; } /* PSR */ pool->base.psr = dmub_psr_create(ctx); if (pool->base.psr == NULL) { dm_error("DC: failed to create psr obj!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* MPCCs */ pool->base.mpc = dcn321_mpc_create(ctx, pool->base.res_cap->num_timing_generator, pool->base.res_cap->num_mpc_3dlut); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto create_fail; } /* DSCs */ for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn321_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create display stream compressor %d!\n", i); goto create_fail; } } /* DWB */ if (!dcn321_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dwbc!\n"); goto create_fail; } /* MMHUBBUB */ if (!dcn321_mmhubbub_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mcif_wb!\n"); goto create_fail; } /* AUX and I2C */ for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn321_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto create_fail; } pool->base.hw_i2cs[i] = dcn321_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto create_fail; } pool->base.sw_i2cs[i] = NULL; } /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto create_fail; /* HW Sequencer init functions and Plane caps */ dcn32_hw_sequencer_init_functions(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { ddc_init_data.ctx = dc->ctx; ddc_init_data.link = NULL; ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } return true; create_fail: dcn321_resource_destruct(pool); return false; } struct resource_pool *dcn321_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn321_resource_pool *pool = kzalloc(sizeof(struct dcn321_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dcn321_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); kfree(pool); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "link_encoder.h" #include "dcn321_dio_link_encoder.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "stream_encoder.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" #ifndef MIN #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif #define CTX \ enc10->base.ctx #define DC_LOGGER \ enc10->base.ctx->logger #define REG(reg)\ (enc10->link_regs->reg) #undef FN #define FN(reg_name, field_name) \ enc10->link_shift->field_name, enc10->link_mask->field_name #define AUX_REG(reg)\ (enc10->aux_regs->reg) #define AUX_REG_READ(reg_name) \ dm_read_reg(CTX, AUX_REG(reg_name)) #define AUX_REG_WRITE(reg_name, val) \ dm_write_reg(CTX, AUX_REG(reg_name), val) static const struct link_encoder_funcs dcn321_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = dcn30_link_encoder_validate_output_with_stream, .hw_init = enc32_hw_init, .setup = dcn10_link_encoder_setup, .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, .enable_dp_output = dcn32_link_encoder_enable_dp_output, .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output, .disable_output = dcn10_link_encoder_disable_output, .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, .update_mst_stream_allocation_table = dcn10_link_encoder_update_mst_stream_allocation_table, .psr_program_dp_dphy_fast_training = dcn10_psr_program_dp_dphy_fast_training, .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, .enable_hpd = dcn10_link_encoder_enable_hpd, .disable_hpd = dcn10_link_encoder_disable_hpd, .is_dig_enabled = dcn10_is_dig_enabled, .destroy = dcn10_link_encoder_destroy, .fec_set_enable = enc2_fec_set_enable, .fec_set_ready = enc2_fec_set_ready, .fec_is_active = enc2_fec_is_active, .get_dig_frontend = dcn10_get_dig_frontend, .get_dig_mode = dcn10_get_dig_mode, .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode, .get_max_link_cap = dcn20_link_encoder_get_max_link_cap, .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux, }; void dcn321_link_encoder_construct( struct dcn20_link_encoder *enc20, const struct encoder_init_data *init_data, const struct encoder_feature_support *enc_features, const struct dcn10_link_enc_registers *link_regs, const struct dcn10_link_enc_aux_registers *aux_regs, const struct dcn10_link_enc_hpd_registers *hpd_regs, const struct dcn10_link_enc_shift *link_shift, const struct dcn10_link_enc_mask *link_mask) { struct bp_connector_speed_cap_info bp_cap_info = {0}; const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; enum bp_result result = BP_RESULT_OK; struct dcn10_link_encoder *enc10 = &enc20->enc10; enc10->base.funcs = &dcn321_link_enc_funcs; enc10->base.ctx = init_data->ctx; enc10->base.id = init_data->encoder; enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; if (enc10->base.connector.id == CONNECTOR_ID_USBC) enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; enc10->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin * while doing the DP sink detect */ /* if (dal_adapter_service_is_feature_supported(as, FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) enc10->base.features.flags.bits. DP_SINK_DETECT_POLL_DATA_PIN = true;*/ enc10->base.output_signals = SIGNAL_TYPE_DVI_SINGLE_LINK | SIGNAL_TYPE_DVI_DUAL_LINK | SIGNAL_TYPE_LVDS | SIGNAL_TYPE_DISPLAY_PORT | SIGNAL_TYPE_DISPLAY_PORT_MST | SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; enc10->link_regs = link_regs; enc10->aux_regs = aux_regs; enc10->hpd_regs = hpd_regs; enc10->link_shift = link_shift; enc10->link_mask = link_mask; switch (enc10->base.transmitter) { case TRANSMITTER_UNIPHY_A: enc10->base.preferred_engine = ENGINE_ID_DIGA; break; case TRANSMITTER_UNIPHY_B: enc10->base.preferred_engine = ENGINE_ID_DIGB; break; case TRANSMITTER_UNIPHY_C: enc10->base.preferred_engine = ENGINE_ID_DIGC; break; case TRANSMITTER_UNIPHY_D: enc10->base.preferred_engine = ENGINE_ID_DIGD; break; case TRANSMITTER_UNIPHY_E: enc10->base.preferred_engine = ENGINE_ID_DIGE; break; default: ASSERT_CRITICAL(false); enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; } /* default to one to mirror Windows behavior */ enc10->base.features.flags.bits.HDMI_6GB_EN = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, enc10->base.connector, &bp_cap_info); /* Override features with DCE-specific values */ if (result == BP_RESULT_OK) { enc10->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc10->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; enc10->base.features.flags.bits.IS_DP2_CAPABLE = 1; enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN; enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN; enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN; } else { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); } if (enc10->base.ctx->dc->debug.hdmi20_disable) { enc10->base.features.flags.bits.HDMI_6GB_EN = 0; } }
linux-master
drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #include "dce110_transform_v.h" static void power_on_lut(struct transform *xfm, bool power_on, bool inputgamma, bool regamma) { uint32_t value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL); int i; if (power_on) { if (inputgamma) set_reg_field_value( value, 1, DCFEV_MEM_PWR_CTRL, COL_MAN_INPUT_GAMMA_MEM_PWR_DIS); if (regamma) set_reg_field_value( value, 1, DCFEV_MEM_PWR_CTRL, COL_MAN_GAMMA_CORR_MEM_PWR_DIS); } else { if (inputgamma) set_reg_field_value( value, 0, DCFEV_MEM_PWR_CTRL, COL_MAN_INPUT_GAMMA_MEM_PWR_DIS); if (regamma) set_reg_field_value( value, 0, DCFEV_MEM_PWR_CTRL, COL_MAN_GAMMA_CORR_MEM_PWR_DIS); } dm_write_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL, value); for (i = 0; i < 3; i++) { value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL); if (get_reg_field_value(value, DCFEV_MEM_PWR_CTRL, COL_MAN_INPUT_GAMMA_MEM_PWR_DIS) && get_reg_field_value(value, DCFEV_MEM_PWR_CTRL, COL_MAN_GAMMA_CORR_MEM_PWR_DIS)) break; udelay(2); } } static void set_bypass_input_gamma(struct dce_transform *xfm_dce) { uint32_t value; value = dm_read_reg(xfm_dce->base.ctx, mmCOL_MAN_INPUT_GAMMA_CONTROL1); set_reg_field_value( value, 0, COL_MAN_INPUT_GAMMA_CONTROL1, INPUT_GAMMA_MODE); dm_write_reg(xfm_dce->base.ctx, mmCOL_MAN_INPUT_GAMMA_CONTROL1, value); } static void configure_regamma_mode(struct dce_transform *xfm_dce, uint32_t mode) { uint32_t value = 0; set_reg_field_value( value, mode, GAMMA_CORR_CONTROL, GAMMA_CORR_MODE); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CONTROL, 0); } /* ***************************************************************************** * Function: regamma_config_regions_and_segments * * build regamma curve by using predefined hw points * uses interface parameters ,like EDID coeff. * * @param : parameters interface parameters * @return void * * @note * * @see * ***************************************************************************** */ static void regamma_config_regions_and_segments( struct dce_transform *xfm_dce, const struct pwl_params *params) { const struct gamma_curve *curve; uint32_t value = 0; { set_reg_field_value( value, params->arr_points[0].custom_float_x, GAMMA_CORR_CNTLA_START_CNTL, GAMMA_CORR_CNTLA_EXP_REGION_START); set_reg_field_value( value, 0, GAMMA_CORR_CNTLA_START_CNTL, GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_START_CNTL, value); } { value = 0; set_reg_field_value( value, params->arr_points[0].custom_float_slope, GAMMA_CORR_CNTLA_SLOPE_CNTL, GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_SLOPE_CNTL, value); } { value = 0; set_reg_field_value( value, params->arr_points[1].custom_float_x, GAMMA_CORR_CNTLA_END_CNTL1, GAMMA_CORR_CNTLA_EXP_REGION_END); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_END_CNTL1, value); } { value = 0; set_reg_field_value( value, params->arr_points[1].custom_float_slope, GAMMA_CORR_CNTLA_END_CNTL2, GAMMA_CORR_CNTLA_EXP_REGION_END_BASE); set_reg_field_value( value, params->arr_points[1].custom_float_y, GAMMA_CORR_CNTLA_END_CNTL2, GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_END_CNTL2, value); } curve = params->arr_curve_points; { value = 0; set_reg_field_value( value, curve[0].offset, GAMMA_CORR_CNTLA_REGION_0_1, GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET); set_reg_field_value( value, curve[0].segments_num, GAMMA_CORR_CNTLA_REGION_0_1, GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS); set_reg_field_value( value, curve[1].offset, GAMMA_CORR_CNTLA_REGION_0_1, GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET); set_reg_field_value( value, curve[1].segments_num, GAMMA_CORR_CNTLA_REGION_0_1, GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS); dm_write_reg( xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_REGION_0_1, value); } curve += 2; { value = 0; set_reg_field_value( value, curve[0].offset, GAMMA_CORR_CNTLA_REGION_2_3, GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET); set_reg_field_value( value, curve[0].segments_num, GAMMA_CORR_CNTLA_REGION_2_3, GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS); set_reg_field_value( value, curve[1].offset, GAMMA_CORR_CNTLA_REGION_2_3, GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET); set_reg_field_value( value, curve[1].segments_num, GAMMA_CORR_CNTLA_REGION_2_3, GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_REGION_2_3, value); } curve += 2; { value = 0; set_reg_field_value( value, curve[0].offset, GAMMA_CORR_CNTLA_REGION_4_5, GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET); set_reg_field_value( value, curve[0].segments_num, GAMMA_CORR_CNTLA_REGION_4_5, GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS); set_reg_field_value( value, curve[1].offset, GAMMA_CORR_CNTLA_REGION_4_5, GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET); set_reg_field_value( value, curve[1].segments_num, GAMMA_CORR_CNTLA_REGION_4_5, GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_REGION_4_5, value); } curve += 2; { value = 0; set_reg_field_value( value, curve[0].offset, GAMMA_CORR_CNTLA_REGION_6_7, GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET); set_reg_field_value( value, curve[0].segments_num, GAMMA_CORR_CNTLA_REGION_6_7, GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS); set_reg_field_value( value, curve[1].offset, GAMMA_CORR_CNTLA_REGION_6_7, GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET); set_reg_field_value( value, curve[1].segments_num, GAMMA_CORR_CNTLA_REGION_6_7, GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_REGION_6_7, value); } curve += 2; { value = 0; set_reg_field_value( value, curve[0].offset, GAMMA_CORR_CNTLA_REGION_8_9, GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET); set_reg_field_value( value, curve[0].segments_num, GAMMA_CORR_CNTLA_REGION_8_9, GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS); set_reg_field_value( value, curve[1].offset, GAMMA_CORR_CNTLA_REGION_8_9, GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET); set_reg_field_value( value, curve[1].segments_num, GAMMA_CORR_CNTLA_REGION_8_9, GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_REGION_8_9, value); } curve += 2; { value = 0; set_reg_field_value( value, curve[0].offset, GAMMA_CORR_CNTLA_REGION_10_11, GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET); set_reg_field_value( value, curve[0].segments_num, GAMMA_CORR_CNTLA_REGION_10_11, GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS); set_reg_field_value( value, curve[1].offset, GAMMA_CORR_CNTLA_REGION_10_11, GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET); set_reg_field_value( value, curve[1].segments_num, GAMMA_CORR_CNTLA_REGION_10_11, GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_REGION_10_11, value); } curve += 2; { value = 0; set_reg_field_value( value, curve[0].offset, GAMMA_CORR_CNTLA_REGION_12_13, GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET); set_reg_field_value( value, curve[0].segments_num, GAMMA_CORR_CNTLA_REGION_12_13, GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS); set_reg_field_value( value, curve[1].offset, GAMMA_CORR_CNTLA_REGION_12_13, GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET); set_reg_field_value( value, curve[1].segments_num, GAMMA_CORR_CNTLA_REGION_12_13, GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_REGION_12_13, value); } curve += 2; { value = 0; set_reg_field_value( value, curve[0].offset, GAMMA_CORR_CNTLA_REGION_14_15, GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET); set_reg_field_value( value, curve[0].segments_num, GAMMA_CORR_CNTLA_REGION_14_15, GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS); set_reg_field_value( value, curve[1].offset, GAMMA_CORR_CNTLA_REGION_14_15, GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET); set_reg_field_value( value, curve[1].segments_num, GAMMA_CORR_CNTLA_REGION_14_15, GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_REGION_14_15, value); } } static void program_pwl(struct dce_transform *xfm_dce, const struct pwl_params *params) { uint32_t value = 0; set_reg_field_value( value, 7, GAMMA_CORR_LUT_WRITE_EN_MASK, GAMMA_CORR_LUT_WRITE_EN_MASK); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_LUT_WRITE_EN_MASK, value); dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_LUT_INDEX, 0); /* Program REGAMMA_LUT_DATA */ { const uint32_t addr = mmGAMMA_CORR_LUT_DATA; uint32_t i = 0; const struct pwl_result_data *rgb = params->rgb_resulted; while (i != params->hw_points_num) { dm_write_reg(xfm_dce->base.ctx, addr, rgb->red_reg); dm_write_reg(xfm_dce->base.ctx, addr, rgb->green_reg); dm_write_reg(xfm_dce->base.ctx, addr, rgb->blue_reg); dm_write_reg(xfm_dce->base.ctx, addr, rgb->delta_red_reg); dm_write_reg(xfm_dce->base.ctx, addr, rgb->delta_green_reg); dm_write_reg(xfm_dce->base.ctx, addr, rgb->delta_blue_reg); ++rgb; ++i; } } } void dce110_opp_program_regamma_pwl_v( struct transform *xfm, const struct pwl_params *params) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); /* Setup regions */ regamma_config_regions_and_segments(xfm_dce, params); set_bypass_input_gamma(xfm_dce); /* Power on gamma LUT memory */ power_on_lut(xfm, true, false, true); /* Program PWL */ program_pwl(xfm_dce, params); /* program regamma config */ configure_regamma_mode(xfm_dce, 1); /* Power return to auto back */ power_on_lut(xfm, false, false, true); } void dce110_opp_power_on_regamma_lut_v( struct transform *xfm, bool power_on) { uint32_t value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL); set_reg_field_value( value, 0, DCFEV_MEM_PWR_CTRL, COL_MAN_GAMMA_CORR_MEM_PWR_FORCE); set_reg_field_value( value, power_on, DCFEV_MEM_PWR_CTRL, COL_MAN_GAMMA_CORR_MEM_PWR_DIS); set_reg_field_value( value, 0, DCFEV_MEM_PWR_CTRL, COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE); set_reg_field_value( value, power_on, DCFEV_MEM_PWR_CTRL, COL_MAN_INPUT_GAMMA_MEM_PWR_DIS); dm_write_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL, value); } void dce110_opp_set_regamma_mode_v( struct transform *xfm, enum opp_regamma mode) { // TODO: need to implement the function }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #include "dce/dce_opp.h" #include "dce110_opp_v.h" /*****************************************/ /* Constructor, Destructor */ /*****************************************/ static const struct opp_funcs funcs = { .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion, .opp_destroy = dce110_opp_destroy, .opp_program_fmt = dce110_opp_program_fmt, .opp_program_bit_depth_reduction = dce110_opp_program_bit_depth_reduction }; void dce110_opp_v_construct(struct dce110_opp *opp110, struct dc_context *ctx) { opp110->base.funcs = &funcs; opp110->base.ctx = ctx; }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dce110_transform_v.h" #include "basics/conversion.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #include "dce/dce_11_0_enum.h" enum { OUTPUT_CSC_MATRIX_SIZE = 12 }; /* constrast:0 - 2.0, default 1.0 */ #define UNDERLAY_CONTRAST_DEFAULT 100 #define UNDERLAY_CONTRAST_MAX 200 #define UNDERLAY_CONTRAST_MIN 0 #define UNDERLAY_CONTRAST_STEP 1 #define UNDERLAY_CONTRAST_DIVIDER 100 /* Saturation: 0 - 2.0; default 1.0 */ #define UNDERLAY_SATURATION_DEFAULT 100 /*1.00*/ #define UNDERLAY_SATURATION_MIN 0 #define UNDERLAY_SATURATION_MAX 200 /* 2.00 */ #define UNDERLAY_SATURATION_STEP 1 /* 0.01 */ /*actual max overlay saturation * value = UNDERLAY_SATURATION_MAX /UNDERLAY_SATURATION_DIVIDER */ /* Hue */ #define UNDERLAY_HUE_DEFAULT 0 #define UNDERLAY_HUE_MIN -300 #define UNDERLAY_HUE_MAX 300 #define UNDERLAY_HUE_STEP 5 #define UNDERLAY_HUE_DIVIDER 10 /* HW range: -30 ~ +30 */ #define UNDERLAY_SATURATION_DIVIDER 100 /* Brightness: in DAL usually -.25 ~ .25. * In MMD is -100 to +100 in 16-235 range; which when scaled to full range is * ~-116 to +116. When normalized this is about 0.4566. * With 100 divider this becomes 46, but we may use another for better precision * The ideal one is 100/219 ((100/255)*(255/219)), * i.e. min/max = +-100, divider = 219 * default 0.0 */ #define UNDERLAY_BRIGHTNESS_DEFAULT 0 #define UNDERLAY_BRIGHTNESS_MIN -46 /* ~116/255 */ #define UNDERLAY_BRIGHTNESS_MAX 46 #define UNDERLAY_BRIGHTNESS_STEP 1 /* .01 */ #define UNDERLAY_BRIGHTNESS_DIVIDER 100 static const struct out_csc_color_matrix global_color_matrix[] = { { COLOR_SPACE_SRGB, { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, { COLOR_SPACE_SRGB_LIMITED, { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} }, { COLOR_SPACE_YCBCR601, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} }, { COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }, /* TODO: correct values below */ { COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991, 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} }, { COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3, 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} } }; enum csc_color_mode { /* 00 - BITS2:0 Bypass */ CSC_COLOR_MODE_GRAPHICS_BYPASS, /* 01 - hard coded coefficient TV RGB */ CSC_COLOR_MODE_GRAPHICS_PREDEFINED, /* 04 - programmable OUTPUT CSC coefficient */ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC, }; enum grph_color_adjust_option { GRPH_COLOR_MATRIX_HW_DEFAULT = 1, GRPH_COLOR_MATRIX_SW }; static void program_color_matrix_v( struct dce_transform *xfm_dce, const struct out_csc_color_matrix *tbl_entry, enum grph_color_adjust_option options) { struct dc_context *ctx = xfm_dce->base.ctx; uint32_t cntl_value = dm_read_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL); bool use_set_a = (get_reg_field_value(cntl_value, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE) != 4); set_reg_field_value( cntl_value, 0, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); if (use_set_a) { { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C11_C12_A; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[0], OUTPUT_CSC_C11_C12_A, OUTPUT_CSC_C11_A); set_reg_field_value( value, tbl_entry->regval[1], OUTPUT_CSC_C11_C12_A, OUTPUT_CSC_C12_A); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C13_C14_A; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[2], OUTPUT_CSC_C13_C14_A, OUTPUT_CSC_C13_A); /* fixed S0.13 format */ set_reg_field_value( value, tbl_entry->regval[3], OUTPUT_CSC_C13_C14_A, OUTPUT_CSC_C14_A); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C21_C22_A; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[4], OUTPUT_CSC_C21_C22_A, OUTPUT_CSC_C21_A); /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[5], OUTPUT_CSC_C21_C22_A, OUTPUT_CSC_C22_A); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C23_C24_A; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[6], OUTPUT_CSC_C23_C24_A, OUTPUT_CSC_C23_A); /* fixed S0.13 format */ set_reg_field_value( value, tbl_entry->regval[7], OUTPUT_CSC_C23_C24_A, OUTPUT_CSC_C24_A); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C31_C32_A; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[8], OUTPUT_CSC_C31_C32_A, OUTPUT_CSC_C31_A); /* fixed S0.13 format */ set_reg_field_value( value, tbl_entry->regval[9], OUTPUT_CSC_C31_C32_A, OUTPUT_CSC_C32_A); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C33_C34_A; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[10], OUTPUT_CSC_C33_C34_A, OUTPUT_CSC_C33_A); /* fixed S0.13 format */ set_reg_field_value( value, tbl_entry->regval[11], OUTPUT_CSC_C33_C34_A, OUTPUT_CSC_C34_A); dm_write_reg(ctx, addr, value); } set_reg_field_value( cntl_value, 4, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); } else { { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C11_C12_B; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[0], OUTPUT_CSC_C11_C12_B, OUTPUT_CSC_C11_B); set_reg_field_value( value, tbl_entry->regval[1], OUTPUT_CSC_C11_C12_B, OUTPUT_CSC_C12_B); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C13_C14_B; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[2], OUTPUT_CSC_C13_C14_B, OUTPUT_CSC_C13_B); /* fixed S0.13 format */ set_reg_field_value( value, tbl_entry->regval[3], OUTPUT_CSC_C13_C14_B, OUTPUT_CSC_C14_B); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C21_C22_B; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[4], OUTPUT_CSC_C21_C22_B, OUTPUT_CSC_C21_B); /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[5], OUTPUT_CSC_C21_C22_B, OUTPUT_CSC_C22_B); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C23_C24_B; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[6], OUTPUT_CSC_C23_C24_B, OUTPUT_CSC_C23_B); /* fixed S0.13 format */ set_reg_field_value( value, tbl_entry->regval[7], OUTPUT_CSC_C23_C24_B, OUTPUT_CSC_C24_B); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C31_C32_B; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[8], OUTPUT_CSC_C31_C32_B, OUTPUT_CSC_C31_B); /* fixed S0.13 format */ set_reg_field_value( value, tbl_entry->regval[9], OUTPUT_CSC_C31_C32_B, OUTPUT_CSC_C32_B); dm_write_reg(ctx, addr, value); } { uint32_t value = 0; uint32_t addr = mmOUTPUT_CSC_C33_C34_B; /* fixed S2.13 format */ set_reg_field_value( value, tbl_entry->regval[10], OUTPUT_CSC_C33_C34_B, OUTPUT_CSC_C33_B); /* fixed S0.13 format */ set_reg_field_value( value, tbl_entry->regval[11], OUTPUT_CSC_C33_C34_B, OUTPUT_CSC_C34_B); dm_write_reg(ctx, addr, value); } set_reg_field_value( cntl_value, 5, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); } dm_write_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL, cntl_value); } static bool configure_graphics_mode_v( struct dce_transform *xfm_dce, enum csc_color_mode config, enum graphics_csc_adjust_type csc_adjust_type, enum dc_color_space color_space) { struct dc_context *ctx = xfm_dce->base.ctx; uint32_t addr = mmCOL_MAN_OUTPUT_CSC_CONTROL; uint32_t value = dm_read_reg(ctx, addr); set_reg_field_value( value, 0, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) { if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC) return true; switch (color_space) { case COLOR_SPACE_SRGB: /* by pass */ set_reg_field_value( value, 0, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); break; case COLOR_SPACE_SRGB_LIMITED: /* not supported for underlay on CZ */ return false; case COLOR_SPACE_YCBCR601_LIMITED: /* YCbCr601 */ set_reg_field_value( value, 2, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: /* YCbCr709 */ set_reg_field_value( value, 3, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); break; default: return false; } } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) { switch (color_space) { case COLOR_SPACE_SRGB: /* by pass */ set_reg_field_value( value, 0, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); break; case COLOR_SPACE_SRGB_LIMITED: /* not supported for underlay on CZ */ return false; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: /* YCbCr601 */ set_reg_field_value( value, 2, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: /* YCbCr709 */ set_reg_field_value( value, 3, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); break; default: return false; } } else /* by pass */ set_reg_field_value( value, 0, COL_MAN_OUTPUT_CSC_CONTROL, OUTPUT_CSC_MODE); addr = mmCOL_MAN_OUTPUT_CSC_CONTROL; dm_write_reg(ctx, addr, value); return true; } /*TODO: color depth is not correct when this is called*/ static void set_Denormalization(struct transform *xfm, enum dc_color_depth color_depth) { uint32_t value = dm_read_reg(xfm->ctx, mmDENORM_CLAMP_CONTROL); switch (color_depth) { case COLOR_DEPTH_888: /* 255/256 for 8 bit output color depth */ set_reg_field_value( value, 1, DENORM_CLAMP_CONTROL, DENORM_MODE); break; case COLOR_DEPTH_101010: /* 1023/1024 for 10 bit output color depth */ set_reg_field_value( value, 2, DENORM_CLAMP_CONTROL, DENORM_MODE); break; case COLOR_DEPTH_121212: /* 4095/4096 for 12 bit output color depth */ set_reg_field_value( value, 3, DENORM_CLAMP_CONTROL, DENORM_MODE); break; default: /* not valid case */ break; } set_reg_field_value( value, 1, DENORM_CLAMP_CONTROL, DENORM_10BIT_OUT); dm_write_reg(xfm->ctx, mmDENORM_CLAMP_CONTROL, value); } struct input_csc_matrix { enum dc_color_space color_space; uint32_t regval[12]; }; static const struct input_csc_matrix input_csc_matrix[] = { {COLOR_SPACE_SRGB, /*1_1 1_2 1_3 1_4 2_1 2_2 2_3 2_4 3_1 3_2 3_3 3_4 */ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, {COLOR_SPACE_SRGB_LIMITED, {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, {COLOR_SPACE_YCBCR601, {0x2cdd, 0x2000, 0x0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef, 0x0, 0x2000, 0x38b4, 0xe3a6} }, {COLOR_SPACE_YCBCR601_LIMITED, {0x3353, 0x2568, 0x0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108, 0x0, 0x2568, 0x40de, 0xdd3a} }, {COLOR_SPACE_YCBCR709, {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0, 0x2000, 0x3b61, 0xe24f} }, {COLOR_SPACE_YCBCR709_LIMITED, {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, 0x2568, 0x43ee, 0xdbb2} } }; static void program_input_csc( struct transform *xfm, enum dc_color_space color_space) { int arr_size = sizeof(input_csc_matrix)/sizeof(struct input_csc_matrix); struct dc_context *ctx = xfm->ctx; const uint32_t *regval = NULL; bool use_set_a; uint32_t value; int i; for (i = 0; i < arr_size; i++) if (input_csc_matrix[i].color_space == color_space) { regval = input_csc_matrix[i].regval; break; } if (regval == NULL) { BREAK_TO_DEBUGGER(); return; } /* * 1 == set A, the logic is 'if currently we're not using set A, * then use set A, otherwise use set B' */ value = dm_read_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL); use_set_a = get_reg_field_value( value, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_MODE) != 1; if (use_set_a) { /* fixed S2.13 format */ value = 0; set_reg_field_value( value, regval[0], INPUT_CSC_C11_C12_A, INPUT_CSC_C11_A); set_reg_field_value( value, regval[1], INPUT_CSC_C11_C12_A, INPUT_CSC_C12_A); dm_write_reg(ctx, mmINPUT_CSC_C11_C12_A, value); value = 0; set_reg_field_value( value, regval[2], INPUT_CSC_C13_C14_A, INPUT_CSC_C13_A); set_reg_field_value( value, regval[3], INPUT_CSC_C13_C14_A, INPUT_CSC_C14_A); dm_write_reg(ctx, mmINPUT_CSC_C13_C14_A, value); value = 0; set_reg_field_value( value, regval[4], INPUT_CSC_C21_C22_A, INPUT_CSC_C21_A); set_reg_field_value( value, regval[5], INPUT_CSC_C21_C22_A, INPUT_CSC_C22_A); dm_write_reg(ctx, mmINPUT_CSC_C21_C22_A, value); value = 0; set_reg_field_value( value, regval[6], INPUT_CSC_C23_C24_A, INPUT_CSC_C23_A); set_reg_field_value( value, regval[7], INPUT_CSC_C23_C24_A, INPUT_CSC_C24_A); dm_write_reg(ctx, mmINPUT_CSC_C23_C24_A, value); value = 0; set_reg_field_value( value, regval[8], INPUT_CSC_C31_C32_A, INPUT_CSC_C31_A); set_reg_field_value( value, regval[9], INPUT_CSC_C31_C32_A, INPUT_CSC_C32_A); dm_write_reg(ctx, mmINPUT_CSC_C31_C32_A, value); value = 0; set_reg_field_value( value, regval[10], INPUT_CSC_C33_C34_A, INPUT_CSC_C33_A); set_reg_field_value( value, regval[11], INPUT_CSC_C33_C34_A, INPUT_CSC_C34_A); dm_write_reg(ctx, mmINPUT_CSC_C33_C34_A, value); } else { /* fixed S2.13 format */ value = 0; set_reg_field_value( value, regval[0], INPUT_CSC_C11_C12_B, INPUT_CSC_C11_B); set_reg_field_value( value, regval[1], INPUT_CSC_C11_C12_B, INPUT_CSC_C12_B); dm_write_reg(ctx, mmINPUT_CSC_C11_C12_B, value); value = 0; set_reg_field_value( value, regval[2], INPUT_CSC_C13_C14_B, INPUT_CSC_C13_B); set_reg_field_value( value, regval[3], INPUT_CSC_C13_C14_B, INPUT_CSC_C14_B); dm_write_reg(ctx, mmINPUT_CSC_C13_C14_B, value); value = 0; set_reg_field_value( value, regval[4], INPUT_CSC_C21_C22_B, INPUT_CSC_C21_B); set_reg_field_value( value, regval[5], INPUT_CSC_C21_C22_B, INPUT_CSC_C22_B); dm_write_reg(ctx, mmINPUT_CSC_C21_C22_B, value); value = 0; set_reg_field_value( value, regval[6], INPUT_CSC_C23_C24_B, INPUT_CSC_C23_B); set_reg_field_value( value, regval[7], INPUT_CSC_C23_C24_B, INPUT_CSC_C24_B); dm_write_reg(ctx, mmINPUT_CSC_C23_C24_B, value); value = 0; set_reg_field_value( value, regval[8], INPUT_CSC_C31_C32_B, INPUT_CSC_C31_B); set_reg_field_value( value, regval[9], INPUT_CSC_C31_C32_B, INPUT_CSC_C32_B); dm_write_reg(ctx, mmINPUT_CSC_C31_C32_B, value); value = 0; set_reg_field_value( value, regval[10], INPUT_CSC_C33_C34_B, INPUT_CSC_C33_B); set_reg_field_value( value, regval[11], INPUT_CSC_C33_C34_B, INPUT_CSC_C34_B); dm_write_reg(ctx, mmINPUT_CSC_C33_C34_B, value); } /* KK: leave INPUT_CSC_CONVERSION_MODE at default */ value = 0; /* * select 8.4 input type instead of default 12.0. From the discussion * with HW team, this format depends on the UNP surface format, so for * 8-bit we should select 8.4 (4 bits truncated). For 10 it should be * 10.2. For Carrizo we only support 8-bit surfaces on underlay pipe * so we can always keep this at 8.4 (input_type=2). If the later asics * start supporting 10+ bits, we will have a problem: surface * programming including UNP_GRPH* is being done in DalISR after this, * so either we pass surface format to here, or move this logic to ISR */ set_reg_field_value( value, 2, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_INPUT_TYPE); set_reg_field_value( value, use_set_a ? 1 : 2, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_MODE); dm_write_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL, value); } void dce110_opp_v_set_csc_default( struct transform *xfm, const struct default_adjustment *default_adjust) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_PREDEFINED; if (default_adjust->force_hw_default == false) { const struct out_csc_color_matrix *elm; /* currently parameter not in use */ enum grph_color_adjust_option option; uint32_t i; /* * HW default false we program locally defined matrix * HW default true we use predefined hw matrix and we * do not need to program matrix * OEM wants the HW default via runtime parameter. */ option = GRPH_COLOR_MATRIX_SW; for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) { elm = &global_color_matrix[i]; if (elm->color_space != default_adjust->out_color_space) continue; /* program the matrix with default values from this * file */ program_color_matrix_v(xfm_dce, elm, option); config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC; break; } } program_input_csc(xfm, default_adjust->in_color_space); /* configure the what we programmed : * 1. Default values from this file * 2. Use hardware default from ROM_A and we do not need to program * matrix */ configure_graphics_mode_v(xfm_dce, config, default_adjust->csc_adjust_type, default_adjust->out_color_space); set_Denormalization(xfm, default_adjust->color_depth); } void dce110_opp_v_set_csc_adjustment( struct transform *xfm, const struct out_csc_color_matrix *tbl_entry) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC; program_color_matrix_v( xfm_dce, tbl_entry, GRPH_COLOR_MATRIX_SW); /* We did everything ,now program DxOUTPUT_CSC_CONTROL */ configure_graphics_mode_v(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW, tbl_entry->color_space); /*TODO: Check if denormalization is needed*/ /*set_Denormalization(opp, adjust->color_depth);*/ }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
/* * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" /* TODO: this needs to be looked at, used by Stella's workaround*/ #include "gmc/gmc_8_2_d.h" #include "gmc/gmc_8_2_sh_mask.h" #include "include/logger_interface.h" #include "inc/dce_calcs.h" #include "dce/dce_mem_input.h" #include "dce110_mem_input_v.h" static void set_flip_control( struct dce_mem_input *mem_input110, bool immediate) { uint32_t value = 0; value = dm_read_reg( mem_input110->base.ctx, mmUNP_FLIP_CONTROL); set_reg_field_value(value, 1, UNP_FLIP_CONTROL, GRPH_SURFACE_UPDATE_PENDING_MODE); dm_write_reg( mem_input110->base.ctx, mmUNP_FLIP_CONTROL, value); } /* chroma part */ static void program_pri_addr_c( struct dce_mem_input *mem_input110, PHYSICAL_ADDRESS_LOC address) { uint32_t value = 0; uint32_t temp = 0; /*high register MUST be programmed first*/ temp = address.high_part & UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK; set_reg_field_value(value, temp, UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C, value); temp = 0; value = 0; temp = address.low_part >> UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT; set_reg_field_value(value, temp, UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C, GRPH_PRIMARY_SURFACE_ADDRESS_C); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_C, value); } /* luma part */ static void program_pri_addr_l( struct dce_mem_input *mem_input110, PHYSICAL_ADDRESS_LOC address) { uint32_t value = 0; uint32_t temp = 0; /*high register MUST be programmed first*/ temp = address.high_part & UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MASK; set_reg_field_value(value, temp, UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L, value); temp = 0; value = 0; temp = address.low_part >> UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT; set_reg_field_value(value, temp, UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L, GRPH_PRIMARY_SURFACE_ADDRESS_L); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_L, value); } static void program_addr( struct dce_mem_input *mem_input110, const struct dc_plane_address *addr) { switch (addr->type) { case PLN_ADDR_TYPE_GRAPHICS: program_pri_addr_l( mem_input110, addr->grph.addr); break; case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: program_pri_addr_c( mem_input110, addr->video_progressive.chroma_addr); program_pri_addr_l( mem_input110, addr->video_progressive.luma_addr); break; default: /* not supported */ BREAK_TO_DEBUGGER(); } } static void enable(struct dce_mem_input *mem_input110) { uint32_t value = 0; value = dm_read_reg(mem_input110->base.ctx, mmUNP_GRPH_ENABLE); set_reg_field_value(value, 1, UNP_GRPH_ENABLE, GRPH_ENABLE); dm_write_reg(mem_input110->base.ctx, mmUNP_GRPH_ENABLE, value); } static void program_tiling( struct dce_mem_input *mem_input110, const union dc_tiling_info *info, const enum surface_pixel_format pixel_format) { uint32_t value = 0; set_reg_field_value(value, info->gfx8.num_banks, UNP_GRPH_CONTROL, GRPH_NUM_BANKS); set_reg_field_value(value, info->gfx8.bank_width, UNP_GRPH_CONTROL, GRPH_BANK_WIDTH_L); set_reg_field_value(value, info->gfx8.bank_height, UNP_GRPH_CONTROL, GRPH_BANK_HEIGHT_L); set_reg_field_value(value, info->gfx8.tile_aspect, UNP_GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT_L); set_reg_field_value(value, info->gfx8.tile_split, UNP_GRPH_CONTROL, GRPH_TILE_SPLIT_L); set_reg_field_value(value, info->gfx8.tile_mode, UNP_GRPH_CONTROL, GRPH_MICRO_TILE_MODE_L); set_reg_field_value(value, info->gfx8.pipe_config, UNP_GRPH_CONTROL, GRPH_PIPE_CONFIG); set_reg_field_value(value, info->gfx8.array_mode, UNP_GRPH_CONTROL, GRPH_ARRAY_MODE); set_reg_field_value(value, 1, UNP_GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE); set_reg_field_value(value, 0, UNP_GRPH_CONTROL, GRPH_Z); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_CONTROL, value); value = 0; set_reg_field_value(value, info->gfx8.bank_width_c, UNP_GRPH_CONTROL_C, GRPH_BANK_WIDTH_C); set_reg_field_value(value, info->gfx8.bank_height_c, UNP_GRPH_CONTROL_C, GRPH_BANK_HEIGHT_C); set_reg_field_value(value, info->gfx8.tile_aspect_c, UNP_GRPH_CONTROL_C, GRPH_MACRO_TILE_ASPECT_C); set_reg_field_value(value, info->gfx8.tile_split_c, UNP_GRPH_CONTROL_C, GRPH_TILE_SPLIT_C); set_reg_field_value(value, info->gfx8.tile_mode_c, UNP_GRPH_CONTROL_C, GRPH_MICRO_TILE_MODE_C); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_CONTROL_C, value); } static void program_size_and_rotation( struct dce_mem_input *mem_input110, enum dc_rotation_angle rotation, const struct plane_size *plane_size) { uint32_t value = 0; struct plane_size local_size = *plane_size; if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) { swap(local_size.surface_size.x, local_size.surface_size.y); swap(local_size.surface_size.width, local_size.surface_size.height); swap(local_size.chroma_size.x, local_size.chroma_size.y); swap(local_size.chroma_size.width, local_size.chroma_size.height); } value = 0; set_reg_field_value(value, local_size.surface_pitch, UNP_GRPH_PITCH_L, GRPH_PITCH_L); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_PITCH_L, value); value = 0; set_reg_field_value(value, local_size.chroma_pitch, UNP_GRPH_PITCH_C, GRPH_PITCH_C); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_PITCH_C, value); value = 0; set_reg_field_value(value, 0, UNP_GRPH_X_START_L, GRPH_X_START_L); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_X_START_L, value); value = 0; set_reg_field_value(value, 0, UNP_GRPH_X_START_C, GRPH_X_START_C); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_X_START_C, value); value = 0; set_reg_field_value(value, 0, UNP_GRPH_Y_START_L, GRPH_Y_START_L); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_Y_START_L, value); value = 0; set_reg_field_value(value, 0, UNP_GRPH_Y_START_C, GRPH_Y_START_C); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_Y_START_C, value); value = 0; set_reg_field_value(value, local_size.surface_size.x + local_size.surface_size.width, UNP_GRPH_X_END_L, GRPH_X_END_L); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_X_END_L, value); value = 0; set_reg_field_value(value, local_size.chroma_size.x + local_size.chroma_size.width, UNP_GRPH_X_END_C, GRPH_X_END_C); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_X_END_C, value); value = 0; set_reg_field_value(value, local_size.surface_size.y + local_size.surface_size.height, UNP_GRPH_Y_END_L, GRPH_Y_END_L); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_Y_END_L, value); value = 0; set_reg_field_value(value, local_size.chroma_size.y + local_size.chroma_size.height, UNP_GRPH_Y_END_C, GRPH_Y_END_C); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_Y_END_C, value); value = 0; switch (rotation) { case ROTATION_ANGLE_90: set_reg_field_value(value, 3, UNP_HW_ROTATION, ROTATION_ANGLE); break; case ROTATION_ANGLE_180: set_reg_field_value(value, 2, UNP_HW_ROTATION, ROTATION_ANGLE); break; case ROTATION_ANGLE_270: set_reg_field_value(value, 1, UNP_HW_ROTATION, ROTATION_ANGLE); break; default: set_reg_field_value(value, 0, UNP_HW_ROTATION, ROTATION_ANGLE); break; } dm_write_reg( mem_input110->base.ctx, mmUNP_HW_ROTATION, value); } static void program_pixel_format( struct dce_mem_input *mem_input110, enum surface_pixel_format format) { if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { uint32_t value; uint8_t grph_depth; uint8_t grph_format; value = dm_read_reg( mem_input110->base.ctx, mmUNP_GRPH_CONTROL); switch (format) { case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: grph_depth = 0; grph_format = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB565: grph_depth = 1; grph_format = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: grph_depth = 2; grph_format = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: grph_depth = 2; grph_format = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: grph_depth = 3; grph_format = 0; break; default: grph_depth = 2; grph_format = 0; break; } set_reg_field_value( value, grph_depth, UNP_GRPH_CONTROL, GRPH_DEPTH); set_reg_field_value( value, grph_format, UNP_GRPH_CONTROL, GRPH_FORMAT); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_CONTROL, value); value = dm_read_reg( mem_input110->base.ctx, mmUNP_GRPH_CONTROL_EXP); /* VIDEO FORMAT 0 */ set_reg_field_value( value, 0, UNP_GRPH_CONTROL_EXP, VIDEO_FORMAT); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_CONTROL_EXP, value); } else { /* Video 422 and 420 needs UNP_GRPH_CONTROL_EXP programmed */ uint32_t value; uint8_t video_format; value = dm_read_reg( mem_input110->base.ctx, mmUNP_GRPH_CONTROL_EXP); switch (format) { case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: video_format = 2; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: video_format = 3; break; default: video_format = 0; break; } set_reg_field_value( value, video_format, UNP_GRPH_CONTROL_EXP, VIDEO_FORMAT); dm_write_reg( mem_input110->base.ctx, mmUNP_GRPH_CONTROL_EXP, value); } } static bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input) { struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input); uint32_t value; value = dm_read_reg(mem_input110->base.ctx, mmUNP_GRPH_UPDATE); if (get_reg_field_value(value, UNP_GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING)) return true; mem_input->current_address = mem_input->request_address; return false; } static bool dce_mem_input_v_program_surface_flip_and_addr( struct mem_input *mem_input, const struct dc_plane_address *address, bool flip_immediate) { struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input); set_flip_control(mem_input110, flip_immediate); program_addr(mem_input110, address); mem_input->request_address = *address; return true; } /* Scatter Gather param tables */ static const unsigned int dvmm_Hw_Setting_2DTiling[4][9] = { { 8, 64, 64, 8, 8, 1, 4, 0, 0}, { 16, 64, 32, 8, 16, 1, 8, 0, 0}, { 32, 32, 32, 16, 16, 1, 8, 0, 0}, { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */ }; static const unsigned int dvmm_Hw_Setting_1DTiling[4][9] = { { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */ { 16, 256, 8, 2, 0, 1, 0, 0, 0}, { 32, 128, 8, 4, 0, 1, 0, 0, 0}, { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */ }; static const unsigned int dvmm_Hw_Setting_Linear[4][9] = { { 8, 4096, 1, 8, 0, 1, 0, 0, 0}, { 16, 2048, 1, 8, 0, 1, 0, 0, 0}, { 32, 1024, 1, 8, 0, 1, 0, 0, 0}, { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */ }; /* Helper to get table entry from surface info */ static const unsigned int *get_dvmm_hw_setting( union dc_tiling_info *tiling_info, enum surface_pixel_format format, bool chroma) { enum bits_per_pixel { bpp_8 = 0, bpp_16, bpp_32, bpp_64 } bpp; if (format >= SURFACE_PIXEL_FORMAT_INVALID) bpp = bpp_32; else if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) bpp = chroma ? bpp_16 : bpp_8; else bpp = bpp_8; switch (tiling_info->gfx8.array_mode) { case DC_ARRAY_1D_TILED_THIN1: case DC_ARRAY_1D_TILED_THICK: case DC_ARRAY_PRT_TILED_THIN1: return dvmm_Hw_Setting_1DTiling[bpp]; case DC_ARRAY_2D_TILED_THIN1: case DC_ARRAY_2D_TILED_THICK: case DC_ARRAY_2D_TILED_X_THICK: case DC_ARRAY_PRT_2D_TILED_THIN1: case DC_ARRAY_PRT_2D_TILED_THICK: return dvmm_Hw_Setting_2DTiling[bpp]; case DC_ARRAY_LINEAR_GENERAL: case DC_ARRAY_LINEAR_ALLIGNED: return dvmm_Hw_Setting_Linear[bpp]; default: return dvmm_Hw_Setting_2DTiling[bpp]; } } static void dce_mem_input_v_program_pte_vm( struct mem_input *mem_input, enum surface_pixel_format format, union dc_tiling_info *tiling_info, enum dc_rotation_angle rotation) { struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input); const unsigned int *pte = get_dvmm_hw_setting(tiling_info, format, false); const unsigned int *pte_chroma = get_dvmm_hw_setting(tiling_info, format, true); unsigned int page_width = 0; unsigned int page_height = 0; unsigned int page_width_chroma = 0; unsigned int page_height_chroma = 0; unsigned int temp_page_width = pte[1]; unsigned int temp_page_height = pte[2]; unsigned int min_pte_before_flip = 0; unsigned int min_pte_before_flip_chroma = 0; uint32_t value = 0; while ((temp_page_width >>= 1) != 0) page_width++; while ((temp_page_height >>= 1) != 0) page_height++; temp_page_width = pte_chroma[1]; temp_page_height = pte_chroma[2]; while ((temp_page_width >>= 1) != 0) page_width_chroma++; while ((temp_page_height >>= 1) != 0) page_height_chroma++; switch (rotation) { case ROTATION_ANGLE_90: case ROTATION_ANGLE_270: min_pte_before_flip = pte[4]; min_pte_before_flip_chroma = pte_chroma[4]; break; default: min_pte_before_flip = pte[3]; min_pte_before_flip_chroma = pte_chroma[3]; break; } value = dm_read_reg(mem_input110->base.ctx, mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT); /* TODO: un-hardcode requestlimit */ set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L); set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C); dm_write_reg(mem_input110->base.ctx, mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT, value); value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL); set_reg_field_value(value, page_width, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH); set_reg_field_value(value, page_height, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT); set_reg_field_value(value, min_pte_before_flip, UNP_DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP); dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL, value); value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL); set_reg_field_value(value, pte[5], UNP_DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK); set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING); dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL, value); value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL_C); set_reg_field_value(value, page_width_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_WIDTH_C); set_reg_field_value(value, page_height_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_HEIGHT_C); set_reg_field_value(value, min_pte_before_flip_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_MIN_PTE_BEFORE_FLIP_C); dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL_C, value); value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C); set_reg_field_value(value, pte_chroma[5], UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_PTE_REQ_PER_CHUNK_C); set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_MAX_PTE_REQ_OUTSTANDING_C); dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C, value); } static void dce_mem_input_v_program_surface_config( struct mem_input *mem_input, enum surface_pixel_format format, union dc_tiling_info *tiling_info, struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizotal_mirror) { struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input); enable(mem_input110); program_tiling(mem_input110, tiling_info, format); program_size_and_rotation(mem_input110, rotation, plane_size); program_pixel_format(mem_input110, format); } static void program_urgency_watermark( const struct dc_context *ctx, const uint32_t urgency_addr, const uint32_t wm_addr, struct dce_watermarks marks_low, uint32_t total_dest_line_time_ns) { /* register value */ uint32_t urgency_cntl = 0; uint32_t wm_mask_cntl = 0; /*Write mask to enable reading/writing of watermark set A*/ wm_mask_cntl = dm_read_reg(ctx, wm_addr); set_reg_field_value(wm_mask_cntl, 1, DPGV0_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK); dm_write_reg(ctx, wm_addr, wm_mask_cntl); urgency_cntl = dm_read_reg(ctx, urgency_addr); set_reg_field_value( urgency_cntl, marks_low.a_mark, DPGV0_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK); set_reg_field_value( urgency_cntl, total_dest_line_time_ns, DPGV0_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK); dm_write_reg(ctx, urgency_addr, urgency_cntl); /*Write mask to enable reading/writing of watermark set B*/ wm_mask_cntl = dm_read_reg(ctx, wm_addr); set_reg_field_value(wm_mask_cntl, 2, DPGV0_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK); dm_write_reg(ctx, wm_addr, wm_mask_cntl); urgency_cntl = dm_read_reg(ctx, urgency_addr); set_reg_field_value(urgency_cntl, marks_low.b_mark, DPGV0_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK); set_reg_field_value(urgency_cntl, total_dest_line_time_ns, DPGV0_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK); dm_write_reg(ctx, urgency_addr, urgency_cntl); } static void program_urgency_watermark_l( const struct dc_context *ctx, struct dce_watermarks marks_low, uint32_t total_dest_line_time_ns) { program_urgency_watermark( ctx, mmDPGV0_PIPE_URGENCY_CONTROL, mmDPGV0_WATERMARK_MASK_CONTROL, marks_low, total_dest_line_time_ns); } static void program_urgency_watermark_c( const struct dc_context *ctx, struct dce_watermarks marks_low, uint32_t total_dest_line_time_ns) { program_urgency_watermark( ctx, mmDPGV1_PIPE_URGENCY_CONTROL, mmDPGV1_WATERMARK_MASK_CONTROL, marks_low, total_dest_line_time_ns); } static void program_stutter_watermark( const struct dc_context *ctx, const uint32_t stutter_addr, const uint32_t wm_addr, struct dce_watermarks marks) { /* register value */ uint32_t stutter_cntl = 0; uint32_t wm_mask_cntl = 0; /*Write mask to enable reading/writing of watermark set A*/ wm_mask_cntl = dm_read_reg(ctx, wm_addr); set_reg_field_value(wm_mask_cntl, 1, DPGV0_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK); dm_write_reg(ctx, wm_addr, wm_mask_cntl); stutter_cntl = dm_read_reg(ctx, stutter_addr); if (ctx->dc->debug.disable_stutter) { set_reg_field_value(stutter_cntl, 0, DPGV0_PIPE_STUTTER_CONTROL, STUTTER_ENABLE); } else { set_reg_field_value(stutter_cntl, 1, DPGV0_PIPE_STUTTER_CONTROL, STUTTER_ENABLE); } set_reg_field_value(stutter_cntl, 1, DPGV0_PIPE_STUTTER_CONTROL, STUTTER_IGNORE_FBC); /*Write watermark set A*/ set_reg_field_value(stutter_cntl, marks.a_mark, DPGV0_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK); dm_write_reg(ctx, stutter_addr, stutter_cntl); /*Write mask to enable reading/writing of watermark set B*/ wm_mask_cntl = dm_read_reg(ctx, wm_addr); set_reg_field_value(wm_mask_cntl, 2, DPGV0_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK); dm_write_reg(ctx, wm_addr, wm_mask_cntl); stutter_cntl = dm_read_reg(ctx, stutter_addr); /*Write watermark set B*/ set_reg_field_value(stutter_cntl, marks.b_mark, DPGV0_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK); dm_write_reg(ctx, stutter_addr, stutter_cntl); } static void program_stutter_watermark_l( const struct dc_context *ctx, struct dce_watermarks marks) { program_stutter_watermark(ctx, mmDPGV0_PIPE_STUTTER_CONTROL, mmDPGV0_WATERMARK_MASK_CONTROL, marks); } static void program_stutter_watermark_c( const struct dc_context *ctx, struct dce_watermarks marks) { program_stutter_watermark(ctx, mmDPGV1_PIPE_STUTTER_CONTROL, mmDPGV1_WATERMARK_MASK_CONTROL, marks); } static void program_nbp_watermark( const struct dc_context *ctx, const uint32_t wm_mask_ctrl_addr, const uint32_t nbp_pstate_ctrl_addr, struct dce_watermarks marks) { uint32_t value; /* Write mask to enable reading/writing of watermark set A */ value = dm_read_reg(ctx, wm_mask_ctrl_addr); set_reg_field_value( value, 1, DPGV0_WATERMARK_MASK_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK); dm_write_reg(ctx, wm_mask_ctrl_addr, value); value = dm_read_reg(ctx, nbp_pstate_ctrl_addr); set_reg_field_value( value, 1, DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE); set_reg_field_value( value, 1, DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST); set_reg_field_value( value, 1, DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST); dm_write_reg(ctx, nbp_pstate_ctrl_addr, value); /* Write watermark set A */ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr); set_reg_field_value( value, marks.a_mark, DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK); dm_write_reg(ctx, nbp_pstate_ctrl_addr, value); /* Write mask to enable reading/writing of watermark set B */ value = dm_read_reg(ctx, wm_mask_ctrl_addr); set_reg_field_value( value, 2, DPGV0_WATERMARK_MASK_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK); dm_write_reg(ctx, wm_mask_ctrl_addr, value); value = dm_read_reg(ctx, nbp_pstate_ctrl_addr); set_reg_field_value( value, 1, DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE); set_reg_field_value( value, 1, DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST); set_reg_field_value( value, 1, DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST); dm_write_reg(ctx, nbp_pstate_ctrl_addr, value); /* Write watermark set B */ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr); set_reg_field_value( value, marks.b_mark, DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK); dm_write_reg(ctx, nbp_pstate_ctrl_addr, value); } static void program_nbp_watermark_l( const struct dc_context *ctx, struct dce_watermarks marks) { program_nbp_watermark(ctx, mmDPGV0_WATERMARK_MASK_CONTROL, mmDPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL, marks); } static void program_nbp_watermark_c( const struct dc_context *ctx, struct dce_watermarks marks) { program_nbp_watermark(ctx, mmDPGV1_WATERMARK_MASK_CONTROL, mmDPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL, marks); } static void dce_mem_input_v_program_display_marks( struct mem_input *mem_input, struct dce_watermarks nbp, struct dce_watermarks stutter, struct dce_watermarks stutter_enter, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { program_urgency_watermark_l( mem_input->ctx, urgent, total_dest_line_time_ns); program_nbp_watermark_l( mem_input->ctx, nbp); program_stutter_watermark_l( mem_input->ctx, stutter); } static void dce_mem_input_program_chroma_display_marks( struct mem_input *mem_input, struct dce_watermarks nbp, struct dce_watermarks stutter, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { program_urgency_watermark_c( mem_input->ctx, urgent, total_dest_line_time_ns); program_nbp_watermark_c( mem_input->ctx, nbp); program_stutter_watermark_c( mem_input->ctx, stutter); } static void dce110_allocate_mem_input_v( struct mem_input *mi, uint32_t h_total,/* for current stream */ uint32_t v_total,/* for current stream */ uint32_t pix_clk_khz,/* for current stream */ uint32_t total_stream_num) { uint32_t addr; uint32_t value; uint32_t pix_dur; if (pix_clk_khz != 0) { addr = mmDPGV0_PIPE_ARBITRATION_CONTROL1; value = dm_read_reg(mi->ctx, addr); pix_dur = 1000000000ULL / pix_clk_khz; set_reg_field_value( value, pix_dur, DPGV0_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION); dm_write_reg(mi->ctx, addr, value); addr = mmDPGV1_PIPE_ARBITRATION_CONTROL1; value = dm_read_reg(mi->ctx, addr); pix_dur = 1000000000ULL / pix_clk_khz; set_reg_field_value( value, pix_dur, DPGV1_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION); dm_write_reg(mi->ctx, addr, value); addr = mmDPGV0_PIPE_ARBITRATION_CONTROL2; value = 0x4000800; dm_write_reg(mi->ctx, addr, value); addr = mmDPGV1_PIPE_ARBITRATION_CONTROL2; value = 0x4000800; dm_write_reg(mi->ctx, addr, value); } } static void dce110_free_mem_input_v( struct mem_input *mi, uint32_t total_stream_num) { } static const struct mem_input_funcs dce110_mem_input_v_funcs = { .mem_input_program_display_marks = dce_mem_input_v_program_display_marks, .mem_input_program_chroma_display_marks = dce_mem_input_program_chroma_display_marks, .allocate_mem_input = dce110_allocate_mem_input_v, .free_mem_input = dce110_free_mem_input_v, .mem_input_program_surface_flip_and_addr = dce_mem_input_v_program_surface_flip_and_addr, .mem_input_program_pte_vm = dce_mem_input_v_program_pte_vm, .mem_input_program_surface_config = dce_mem_input_v_program_surface_config, .mem_input_is_flip_pending = dce_mem_input_v_is_surface_pending }; /*****************************************/ /* Constructor, Destructor */ /*****************************************/ void dce110_mem_input_v_construct( struct dce_mem_input *dce_mi, struct dc_context *ctx) { dce_mi->base.funcs = &dce110_mem_input_v_funcs; dce_mi->base.ctx = ctx; }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #include "gmc/gmc_8_2_sh_mask.h" #include "gmc/gmc_8_2_d.h" #include "include/logger_interface.h" #include "dce110_compressor.h" #define DC_LOGGER \ cp110->base.ctx->logger #define DCP_REG(reg)\ (reg + cp110->offsets.dcp_offset) #define DMIF_REG(reg)\ (reg + cp110->offsets.dmif_offset) static const struct dce110_compressor_reg_offsets reg_offsets[] = { { .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL), }, { .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL), }, { .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL), } }; static uint32_t align_to_chunks_number_per_line(uint32_t pixels) { return 256 * ((pixels + 255) / 256); } static void reset_lb_on_vblank(struct compressor *compressor, uint32_t crtc_inst) { uint32_t value; uint32_t frame_count; uint32_t status_pos; uint32_t retry = 0; struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); cp110->offsets = reg_offsets[crtc_inst]; status_pos = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION)); /* Only if CRTC is enabled and counter is moving we wait for one frame. */ if (status_pos != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION))) { /* Resetting LB on VBlank */ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL)); set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL); set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2); dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value); frame_count = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT)); for (retry = 10000; retry > 0; retry--) { if (frame_count != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT))) break; udelay(10); } if (!retry) dm_error("Frame count did not increase for 100ms.\n"); /* Resetting LB on VBlank */ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL)); set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL); set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2); dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value); } } static void wait_for_fbc_state_changed( struct dce110_compressor *cp110, bool enabled) { uint32_t counter = 0; uint32_t addr = mmFBC_STATUS; uint32_t value; while (counter < 1000) { value = dm_read_reg(cp110->base.ctx, addr); if (get_reg_field_value( value, FBC_STATUS, FBC_ENABLE_STATUS) == enabled) break; udelay(100); counter++; } if (counter == 1000) { DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied", __func__); } else { DC_LOG_SYNC("FBC status changed to %d", enabled); } } void dce110_compressor_power_up_fbc(struct compressor *compressor) { uint32_t value; uint32_t addr; addr = mmFBC_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN); set_reg_field_value(value, 1, FBC_CNTL, FBC_EN); set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE); if (compressor->options.bits.CLK_GATING_DISABLED == 1) { /* HW needs to do power measurement comparison. */ set_reg_field_value( value, 0, FBC_CNTL, FBC_COMP_CLK_GATE_EN); } dm_write_reg(compressor->ctx, addr, value); addr = mmFBC_COMP_MODE; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN); dm_write_reg(compressor->ctx, addr, value); addr = mmFBC_COMP_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN); dm_write_reg(compressor->ctx, addr, value); /*FBC_MIN_COMPRESSION 0 ==> 2:1 */ /* 1 ==> 4:1 */ /* 2 ==> 8:1 */ /* 0xF ==> 1:1 */ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION); dm_write_reg(compressor->ctx, addr, value); compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1; value = 0; dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value); value = 0xFFFFFF; dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value); } void dce110_compressor_enable_fbc( struct compressor *compressor, struct compr_addr_and_pitch_params *params) { struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); if (compressor->options.bits.FBC_SUPPORT && (!dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL))) { uint32_t addr; uint32_t value, misc_value; addr = mmFBC_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN); /* params->inst is valid HW CRTC instance start from 0 */ set_reg_field_value( value, params->inst, FBC_CNTL, FBC_SRC_SEL); dm_write_reg(compressor->ctx, addr, value); /* Keep track of enum controller_id FBC is attached to */ compressor->is_enabled = true; /* attached_inst is SW CRTC instance start from 1 * 0 = CONTROLLER_ID_UNDEFINED means not attached crtc */ compressor->attached_inst = params->inst + CONTROLLER_ID_D0; /* Toggle it as there is bug in HW */ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, addr, value); /* FBC usage with scatter & gather for dce110 */ misc_value = dm_read_reg(compressor->ctx, mmFBC_MISC); set_reg_field_value(misc_value, 1, FBC_MISC, FBC_INVALIDATE_ON_ERROR); set_reg_field_value(misc_value, 1, FBC_MISC, FBC_DECOMPRESS_ERROR_CLEAR); set_reg_field_value(misc_value, 0x14, FBC_MISC, FBC_SLOW_REQ_INTERVAL); dm_write_reg(compressor->ctx, mmFBC_MISC, misc_value); /* Enable FBC */ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, addr, value); wait_for_fbc_state_changed(cp110, true); } } void dce110_compressor_disable_fbc(struct compressor *compressor) { struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); uint32_t crtc_inst = 0; if (compressor->options.bits.FBC_SUPPORT) { if (dce110_compressor_is_fbc_enabled_in_hw(compressor, &crtc_inst)) { uint32_t reg_data; /* Turn off compression */ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL); set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data); /* Reset enum controller_id to undefined */ compressor->attached_inst = 0; compressor->is_enabled = false; wait_for_fbc_state_changed(cp110, false); } /* Sync line buffer which fbc was attached to dce100/110 only */ if (crtc_inst > CONTROLLER_ID_UNDEFINED && crtc_inst < CONTROLLER_ID_D3) reset_lb_on_vblank(compressor, crtc_inst - CONTROLLER_ID_D0); } } bool dce110_compressor_is_fbc_enabled_in_hw( struct compressor *compressor, uint32_t *inst) { /* Check the hardware register */ uint32_t value; value = dm_read_reg(compressor->ctx, mmFBC_STATUS); if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) { if (inst != NULL) *inst = compressor->attached_inst; return true; } value = dm_read_reg(compressor->ctx, mmFBC_MISC); if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) { value = dm_read_reg(compressor->ctx, mmFBC_CNTL); if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) { if (inst != NULL) *inst = compressor->attached_inst; return true; } } return false; } void dce110_compressor_program_compressed_surface_address_and_pitch( struct compressor *compressor, struct compr_addr_and_pitch_params *params) { struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); uint32_t value = 0; uint32_t fbc_pitch = 0; uint32_t compressed_surf_address_low_part = compressor->compr_surface_address.addr.low_part; cp110->offsets = reg_offsets[params->inst]; /* Clear content first. */ dm_write_reg( compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH), 0); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0); /* Write address, HIGH has to be first. */ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH), compressor->compr_surface_address.addr.high_part); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), compressed_surf_address_low_part); fbc_pitch = align_to_chunks_number_per_line(params->source_view_width); if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1) fbc_pitch = fbc_pitch / 8; else DC_LOG_WARNING("%s: Unexpected DCE11 compression ratio", __func__); /* Clear content first. */ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0); /* Write FBC Pitch. */ set_reg_field_value( value, fbc_pitch, GRPH_COMPRESS_PITCH, GRPH_COMPRESS_PITCH); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value); } void dce110_compressor_set_fbc_invalidation_triggers( struct compressor *compressor, uint32_t fbc_trigger) { /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19) * for DCE 11 regions cannot be used - does not work with S/G */ uint32_t addr = mmFBC_CLIENT_REGION_MASK; uint32_t value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0, FBC_CLIENT_REGION_MASK, FBC_MEMORY_REGION_MASK); dm_write_reg(compressor->ctx, addr, value); /* Setup events when to clear all CSM entries (effectively marking * current compressed data invalid) * For DCE 11 CSM metadata 11111 means - "Not Compressed" * Used as the initial value of the metadata sent to the compressor * after invalidation, to indicate that the compressor should attempt * to compress all chunks on the current pass. Also used when the chunk * is not successfully written to memory. * When this CSM value is detected, FBC reads from the uncompressed * buffer. Set events according to passed in value, these events are * valid for DCE11: * - bit 0 - display register updated * - bit 28 - memory write from any client except from MCIF * - bit 29 - CG static screen signal is inactive * In addition, DCE11.1 also needs to set new DCE11.1 specific events * that are used to trigger invalidation on certain register changes, * for example enabling of Alpha Compression may trigger invalidation of * FBC once bit is set. These events are as follows: * - Bit 2 - FBC_GRPH_COMP_EN register updated * - Bit 3 - FBC_SRC_SEL register updated * - Bit 4 - FBC_MIN_COMPRESSION register updated * - Bit 5 - FBC_ALPHA_COMP_EN register updated * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */ addr = mmFBC_IDLE_FORCE_CLEAR_MASK; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, fbc_trigger, FBC_IDLE_FORCE_CLEAR_MASK, FBC_IDLE_FORCE_CLEAR_MASK); dm_write_reg(compressor->ctx, addr, value); } struct compressor *dce110_compressor_create(struct dc_context *ctx) { struct dce110_compressor *cp110 = kzalloc(sizeof(struct dce110_compressor), GFP_KERNEL); if (!cp110) return NULL; dce110_compressor_construct(cp110, ctx); return &cp110->base; } void dce110_compressor_destroy(struct compressor **compressor) { kfree(TO_DCE110_COMPRESSOR(*compressor)); *compressor = NULL; } void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y) { *max_x = FBC_MAX_X; *max_y = FBC_MAX_Y; /* if (m_smallLocalFrameBufferMemory == 1) * { * *max_x = FBC_MAX_X_SG; * *max_y = FBC_MAX_Y_SG; * } */ } static const struct compressor_funcs dce110_compressor_funcs = { .power_up_fbc = dce110_compressor_power_up_fbc, .enable_fbc = dce110_compressor_enable_fbc, .disable_fbc = dce110_compressor_disable_fbc, .set_fbc_invalidation_triggers = dce110_compressor_set_fbc_invalidation_triggers, .surface_address_and_pitch = dce110_compressor_program_compressed_surface_address_and_pitch, .is_fbc_enabled_in_hw = dce110_compressor_is_fbc_enabled_in_hw }; void dce110_compressor_construct(struct dce110_compressor *compressor, struct dc_context *ctx) { compressor->base.options.raw = 0; compressor->base.options.bits.FBC_SUPPORT = true; /* for dce 11 always use one dram channel for lpt */ compressor->base.lpt_channels_num = 1; compressor->base.options.bits.DUMMY_BACKEND = false; /* * check if this system has more than 1 dram channel; if only 1 then lpt * should not be supported */ compressor->base.options.bits.CLK_GATING_DISABLED = false; compressor->base.ctx = ctx; compressor->base.embedded_panel_h_size = 0; compressor->base.embedded_panel_v_size = 0; compressor->base.memory_bus_width = ctx->asic_id.vram_width; compressor->base.allocated_size = 0; compressor->base.preferred_requested_size = 0; compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID; compressor->base.banks_num = 0; compressor->base.raw_size = 0; compressor->base.channel_interleave_size = 0; compressor->base.dram_channels_num = 0; compressor->base.lpt_channels_num = 0; compressor->base.attached_inst = CONTROLLER_ID_UNDEFINED; compressor->base.is_enabled = false; compressor->base.funcs = &dce110_compressor_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #include "dc_types.h" #include "dc_bios_types.h" #include "dc.h" #include "include/grph_object_id.h" #include "include/logger_interface.h" #include "dce110_timing_generator.h" #include "timing_generator.h" #define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10 #define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1) #define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1) #define CRTC_REG(reg) (reg + tg110->offsets.crtc) #define DCP_REG(reg) (reg + tg110->offsets.dcp) /* Flowing register offsets are same in files of * dce/dce_11_0_d.h * dce/vi_polaris10_p/vi_polaris10_d.h * * So we can create dce110 timing generator to use it. */ /* * apply_front_porch_workaround * * This is a workaround for a bug that has existed since R5xx and has not been * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. */ static void dce110_timing_generator_apply_front_porch_workaround( struct timing_generator *tg, struct dc_crtc_timing *timing) { if (timing->flags.INTERLACE == 1) { if (timing->v_front_porch < 2) timing->v_front_porch = 2; } else { if (timing->v_front_porch < 1) timing->v_front_porch = 1; } } /* ***************************************************************************** * Function: is_in_vertical_blank * * @brief * check the current status of CRTC to check if we are in Vertical Blank * regioneased" state * * @return * true if currently in blank region, false otherwise * ***************************************************************************** */ static bool dce110_timing_generator_is_in_vertical_blank( struct timing_generator *tg) { uint32_t addr = 0; uint32_t value = 0; uint32_t field = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); addr = CRTC_REG(mmCRTC_STATUS); value = dm_read_reg(tg->ctx, addr); field = get_reg_field_value(value, CRTC_STATUS, CRTC_V_BLANK); return field == 1; } void dce110_timing_generator_set_early_control( struct timing_generator *tg, uint32_t early_cntl) { uint32_t regval; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t address = CRTC_REG(mmCRTC_CONTROL); regval = dm_read_reg(tg->ctx, address); set_reg_field_value(regval, early_cntl, CRTC_CONTROL, CRTC_HBLANK_EARLY_CONTROL); dm_write_reg(tg->ctx, address, regval); } /* * Enable CRTC * Enable CRTC - call ASIC Control Object to enable Timing generator. */ bool dce110_timing_generator_enable_crtc(struct timing_generator *tg) { enum bp_result result; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = 0; /* * 3 is used to make sure V_UPDATE occurs at the beginning of the first * line of vertical front porch */ set_reg_field_value( value, 0, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE), value); /* TODO: may want this on to catch underflow */ value = 0; dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK), value); result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true); return result == BP_RESULT_OK; } void dce110_timing_generator_program_blank_color( struct timing_generator *tg, const struct tg_color *black_color) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR); uint32_t value = dm_read_reg(tg->ctx, addr); set_reg_field_value( value, black_color->color_b_cb, CRTC_BLACK_COLOR, CRTC_BLACK_COLOR_B_CB); set_reg_field_value( value, black_color->color_g_y, CRTC_BLACK_COLOR, CRTC_BLACK_COLOR_G_Y); set_reg_field_value( value, black_color->color_r_cr, CRTC_BLACK_COLOR, CRTC_BLACK_COLOR_R_CR); dm_write_reg(tg->ctx, addr, value); } /* ***************************************************************************** * Function: disable_stereo * * @brief * Disables active stereo on controller * Frame Packing need to be disabled in vBlank or when CRTC not running ***************************************************************************** */ #if 0 @TODOSTEREO static void disable_stereo(struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = CRTC_REG(mmCRTC_3D_STRUCTURE_CONTROL); uint32_t value = 0; uint32_t test = 0; uint32_t field = 0; uint32_t struc_en = 0; uint32_t struc_stereo_sel_ovr = 0; value = dm_read_reg(tg->ctx, addr); struc_en = get_reg_field_value( value, CRTC_3D_STRUCTURE_CONTROL, CRTC_3D_STRUCTURE_EN); struc_stereo_sel_ovr = get_reg_field_value( value, CRTC_3D_STRUCTURE_CONTROL, CRTC_3D_STRUCTURE_STEREO_SEL_OVR); /* * When disabling Frame Packing in 2 step mode, we need to program both * registers at the same frame * Programming it in the beginning of VActive makes sure we are ok */ if (struc_en != 0 && struc_stereo_sel_ovr == 0) { tg->funcs->wait_for_vblank(tg); tg->funcs->wait_for_vactive(tg); } value = 0; dm_write_reg(tg->ctx, addr, value); addr = tg->regs[IDX_CRTC_STEREO_CONTROL]; dm_write_reg(tg->ctx, addr, value); } #endif /* * disable_crtc - call ASIC Control Object to disable Timing generator. */ bool dce110_timing_generator_disable_crtc(struct timing_generator *tg) { enum bp_result result; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, false); /* Need to make sure stereo is disabled according to the DCE5.0 spec */ /* * @TODOSTEREO call this when adding stereo support * tg->funcs->disable_stereo(tg); */ return result == BP_RESULT_OK; } /* * program_horz_count_by_2 * Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise */ static void program_horz_count_by_2( struct timing_generator *tg, const struct dc_crtc_timing *timing) { uint32_t regval; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); regval = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_COUNT_CONTROL)); set_reg_field_value(regval, 0, CRTC_COUNT_CONTROL, CRTC_HORZ_COUNT_BY2_EN); if (timing->flags.HORZ_COUNT_BY_TWO) set_reg_field_value(regval, 1, CRTC_COUNT_CONTROL, CRTC_HORZ_COUNT_BY2_EN); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_COUNT_CONTROL), regval); } /* * program_timing_generator * Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition. * Call ASIC Control Object to program Timings. */ bool dce110_timing_generator_program_timing_generator( struct timing_generator *tg, const struct dc_crtc_timing *dc_crtc_timing) { enum bp_result result; struct bp_hw_crtc_timing_parameters bp_params; struct dc_crtc_timing patched_crtc_timing; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t vsync_offset = dc_crtc_timing->v_border_bottom + dc_crtc_timing->v_front_porch; uint32_t v_sync_start = dc_crtc_timing->v_addressable + vsync_offset; uint32_t hsync_offset = dc_crtc_timing->h_border_right + dc_crtc_timing->h_front_porch; uint32_t h_sync_start = dc_crtc_timing->h_addressable + hsync_offset; memset(&bp_params, 0, sizeof(struct bp_hw_crtc_timing_parameters)); /* Due to an asic bug we need to apply the Front Porch workaround prior * to programming the timing. */ patched_crtc_timing = *dc_crtc_timing; dce110_timing_generator_apply_front_porch_workaround(tg, &patched_crtc_timing); bp_params.controller_id = tg110->controller_id; bp_params.h_total = patched_crtc_timing.h_total; bp_params.h_addressable = patched_crtc_timing.h_addressable; bp_params.v_total = patched_crtc_timing.v_total; bp_params.v_addressable = patched_crtc_timing.v_addressable; bp_params.h_sync_start = h_sync_start; bp_params.h_sync_width = patched_crtc_timing.h_sync_width; bp_params.v_sync_start = v_sync_start; bp_params.v_sync_width = patched_crtc_timing.v_sync_width; /* Set overscan */ bp_params.h_overscan_left = patched_crtc_timing.h_border_left; bp_params.h_overscan_right = patched_crtc_timing.h_border_right; bp_params.v_overscan_top = patched_crtc_timing.v_border_top; bp_params.v_overscan_bottom = patched_crtc_timing.v_border_bottom; /* Set flags */ if (patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY == 1) bp_params.flags.HSYNC_POSITIVE_POLARITY = 1; if (patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY == 1) bp_params.flags.VSYNC_POSITIVE_POLARITY = 1; if (patched_crtc_timing.flags.INTERLACE == 1) bp_params.flags.INTERLACE = 1; if (patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1) bp_params.flags.HORZ_COUNT_BY_TWO = 1; result = tg->bp->funcs->program_crtc_timing(tg->bp, &bp_params); program_horz_count_by_2(tg, &patched_crtc_timing); tg110->base.funcs->enable_advanced_request(tg, true, &patched_crtc_timing); /* Enable stereo - only when we need to pack 3D frame. Other types * of stereo handled in explicit call */ return result == BP_RESULT_OK; } /* ***************************************************************************** * Function: set_drr * * @brief * Program dynamic refresh rate registers m_DxCRTC_V_TOTAL_*. * * @param [in] pHwCrtcTiming: point to H * wCrtcTiming struct ***************************************************************************** */ void dce110_timing_generator_set_drr( struct timing_generator *tg, const struct drr_params *params) { /* register values */ uint32_t v_total_min = 0; uint32_t v_total_max = 0; uint32_t v_total_cntl = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = 0; addr = CRTC_REG(mmCRTC_V_TOTAL_MIN); v_total_min = dm_read_reg(tg->ctx, addr); addr = CRTC_REG(mmCRTC_V_TOTAL_MAX); v_total_max = dm_read_reg(tg->ctx, addr); addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL); v_total_cntl = dm_read_reg(tg->ctx, addr); if (params != NULL && params->vertical_total_max > 0 && params->vertical_total_min > 0) { set_reg_field_value(v_total_max, params->vertical_total_max - 1, CRTC_V_TOTAL_MAX, CRTC_V_TOTAL_MAX); set_reg_field_value(v_total_min, params->vertical_total_min - 1, CRTC_V_TOTAL_MIN, CRTC_V_TOTAL_MIN); set_reg_field_value(v_total_cntl, 1, CRTC_V_TOTAL_CONTROL, CRTC_V_TOTAL_MIN_SEL); set_reg_field_value(v_total_cntl, 1, CRTC_V_TOTAL_CONTROL, CRTC_V_TOTAL_MAX_SEL); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_FORCE_LOCK_ON_EVENT); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_FORCE_LOCK_TO_MASTER_VSYNC); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_SET_V_TOTAL_MIN_MASK_EN); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_SET_V_TOTAL_MIN_MASK); } else { set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_SET_V_TOTAL_MIN_MASK); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_V_TOTAL_MIN_SEL); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_V_TOTAL_MAX_SEL); set_reg_field_value(v_total_min, 0, CRTC_V_TOTAL_MIN, CRTC_V_TOTAL_MIN); set_reg_field_value(v_total_max, 0, CRTC_V_TOTAL_MAX, CRTC_V_TOTAL_MAX); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_FORCE_LOCK_ON_EVENT); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, CRTC_FORCE_LOCK_TO_MASTER_VSYNC); } addr = CRTC_REG(mmCRTC_V_TOTAL_MIN); dm_write_reg(tg->ctx, addr, v_total_min); addr = CRTC_REG(mmCRTC_V_TOTAL_MAX); dm_write_reg(tg->ctx, addr, v_total_max); addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL); dm_write_reg(tg->ctx, addr, v_total_cntl); } void dce110_timing_generator_set_static_screen_control( struct timing_generator *tg, uint32_t event_triggers, uint32_t num_frames) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t static_screen_cntl = 0; uint32_t addr = 0; // By register spec, it only takes 8 bit value if (num_frames > 0xFF) num_frames = 0xFF; addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL); static_screen_cntl = dm_read_reg(tg->ctx, addr); set_reg_field_value(static_screen_cntl, event_triggers, CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_EVENT_MASK); set_reg_field_value(static_screen_cntl, num_frames, CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_FRAME_COUNT); dm_write_reg(tg->ctx, addr, static_screen_cntl); } /* * get_vblank_counter * * @brief * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which * holds the counter of frames. * * @param * struct timing_generator *tg - [in] timing generator which controls the * desired CRTC * * @return * Counter of frames, which should equal to number of vblanks. */ uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = CRTC_REG(mmCRTC_STATUS_FRAME_COUNT); uint32_t value = dm_read_reg(tg->ctx, addr); uint32_t field = get_reg_field_value( value, CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT); return field; } /* ***************************************************************************** * Function: dce110_timing_generator_get_position * * @brief * Returns CRTC vertical/horizontal counters * * @param [out] position ***************************************************************************** */ void dce110_timing_generator_get_position(struct timing_generator *tg, struct crtc_position *position) { uint32_t value; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_STATUS_POSITION)); position->horizontal_count = get_reg_field_value( value, CRTC_STATUS_POSITION, CRTC_HORZ_COUNT); position->vertical_count = get_reg_field_value( value, CRTC_STATUS_POSITION, CRTC_VERT_COUNT); value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_NOM_VERT_POSITION)); position->nominal_vcount = get_reg_field_value( value, CRTC_NOM_VERT_POSITION, CRTC_VERT_COUNT_NOM); } /* ***************************************************************************** * Function: get_crtc_scanoutpos * * @brief * Returns CRTC vertical/horizontal counters * * @param [out] vpos, hpos ***************************************************************************** */ void dce110_timing_generator_get_crtc_scanoutpos( struct timing_generator *tg, uint32_t *v_blank_start, uint32_t *v_blank_end, uint32_t *h_position, uint32_t *v_position) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); struct crtc_position position; uint32_t value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_V_BLANK_START_END)); *v_blank_start = get_reg_field_value(value, CRTC_V_BLANK_START_END, CRTC_V_BLANK_START); *v_blank_end = get_reg_field_value(value, CRTC_V_BLANK_START_END, CRTC_V_BLANK_END); dce110_timing_generator_get_position( tg, &position); *h_position = position.horizontal_count; *v_position = position.vertical_count; } /* TODO: is it safe to assume that mask/shift of Primary and Underlay * are the same? * For example: today CRTC_H_TOTAL == CRTCV_H_TOTAL but is it always * guaranteed? */ void dce110_timing_generator_program_blanking( struct timing_generator *tg, const struct dc_crtc_timing *timing) { uint32_t vsync_offset = timing->v_border_bottom + timing->v_front_porch; uint32_t v_sync_start = timing->v_addressable + vsync_offset; uint32_t hsync_offset = timing->h_border_right + timing->h_front_porch; uint32_t h_sync_start = timing->h_addressable + hsync_offset; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); struct dc_context *ctx = tg->ctx; uint32_t value = 0; uint32_t addr = 0; uint32_t tmp = 0; addr = CRTC_REG(mmCRTC_H_TOTAL); value = dm_read_reg(ctx, addr); set_reg_field_value( value, timing->h_total - 1, CRTC_H_TOTAL, CRTC_H_TOTAL); dm_write_reg(ctx, addr, value); addr = CRTC_REG(mmCRTC_V_TOTAL); value = dm_read_reg(ctx, addr); set_reg_field_value( value, timing->v_total - 1, CRTC_V_TOTAL, CRTC_V_TOTAL); dm_write_reg(ctx, addr, value); /* In case of V_TOTAL_CONTROL is on, make sure V_TOTAL_MAX and * V_TOTAL_MIN are equal to V_TOTAL. */ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX); value = dm_read_reg(ctx, addr); set_reg_field_value( value, timing->v_total - 1, CRTC_V_TOTAL_MAX, CRTC_V_TOTAL_MAX); dm_write_reg(ctx, addr, value); addr = CRTC_REG(mmCRTC_V_TOTAL_MIN); value = dm_read_reg(ctx, addr); set_reg_field_value( value, timing->v_total - 1, CRTC_V_TOTAL_MIN, CRTC_V_TOTAL_MIN); dm_write_reg(ctx, addr, value); addr = CRTC_REG(mmCRTC_H_BLANK_START_END); value = dm_read_reg(ctx, addr); tmp = timing->h_total - (h_sync_start + timing->h_border_left); set_reg_field_value( value, tmp, CRTC_H_BLANK_START_END, CRTC_H_BLANK_END); tmp = tmp + timing->h_addressable + timing->h_border_left + timing->h_border_right; set_reg_field_value( value, tmp, CRTC_H_BLANK_START_END, CRTC_H_BLANK_START); dm_write_reg(ctx, addr, value); addr = CRTC_REG(mmCRTC_V_BLANK_START_END); value = dm_read_reg(ctx, addr); tmp = timing->v_total - (v_sync_start + timing->v_border_top); set_reg_field_value( value, tmp, CRTC_V_BLANK_START_END, CRTC_V_BLANK_END); tmp = tmp + timing->v_addressable + timing->v_border_top + timing->v_border_bottom; set_reg_field_value( value, tmp, CRTC_V_BLANK_START_END, CRTC_V_BLANK_START); dm_write_reg(ctx, addr, value); } void dce110_timing_generator_set_test_pattern( struct timing_generator *tg, /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode' * because this is not DP-specific (which is probably somewhere in DP * encoder) */ enum controller_dp_test_pattern test_pattern, enum dc_color_depth color_depth) { struct dc_context *ctx = tg->ctx; uint32_t value; uint32_t addr; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); enum test_pattern_color_format bit_depth; enum test_pattern_dyn_range dyn_range; enum test_pattern_mode mode; /* color ramp generator mixes 16-bits color */ uint32_t src_bpc = 16; /* requested bpc */ uint32_t dst_bpc; uint32_t index; /* RGB values of the color bars. * Produce two RGB colors: RGB0 - white (all Fs) * and RGB1 - black (all 0s) * (three RGB components for two colors) */ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000}; /* dest color (converted to the specified color format) */ uint16_t dst_color[6]; uint32_t inc_base; /* translate to bit depth */ switch (color_depth) { case COLOR_DEPTH_666: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6; break; case COLOR_DEPTH_888: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; break; case COLOR_DEPTH_101010: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10; break; case COLOR_DEPTH_121212: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12; break; default: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; break; } switch (test_pattern) { case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: { dyn_range = (test_pattern == CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ? TEST_PATTERN_DYN_RANGE_CEA : TEST_PATTERN_DYN_RANGE_VESA); mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; value = 0; addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS); set_reg_field_value( value, 6, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_VRES); set_reg_field_value( value, 6, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_HRES); dm_write_reg(ctx, addr, value); addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL); value = 0; set_reg_field_value( value, 1, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_EN); set_reg_field_value( value, mode, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_MODE); set_reg_field_value( value, dyn_range, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_DYNAMIC_RANGE); set_reg_field_value( value, bit_depth, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_COLOR_FORMAT); dm_write_reg(ctx, addr, value); } break; case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS: case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS: { mode = (test_pattern == CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ? TEST_PATTERN_MODE_VERTICALBARS : TEST_PATTERN_MODE_HORIZONTALBARS); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: dst_bpc = 6; break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: dst_bpc = 8; break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: dst_bpc = 10; break; default: dst_bpc = 8; break; } /* adjust color to the required colorFormat */ for (index = 0; index < 6; index++) { /* dst = 2^dstBpc * src / 2^srcBpc = src >> * (srcBpc - dstBpc); */ dst_color[index] = src_color[index] >> (src_bpc - dst_bpc); /* CRTC_TEST_PATTERN_DATA has 16 bits, * lowest 6 are hardwired to ZERO * color bits should be left aligned aligned to MSB * XXXXXXXXXX000000 for 10 bit, * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6 */ dst_color[index] <<= (16 - dst_bpc); } value = 0; addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS); dm_write_reg(ctx, addr, value); /* We have to write the mask before data, similar to pipeline. * For example, for 8 bpc, if we want RGB0 to be magenta, * and RGB1 to be cyan, * we need to make 7 writes: * MASK DATA * 000001 00000000 00000000 set mask to R0 * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0 * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0 * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1 * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1 * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1 * 100000 11111111 00000000 B1 255, 0xFF00 * * we will make a loop of 6 in which we prepare the mask, * then write, then prepare the color for next write. * first iteration will write mask only, * but each next iteration color prepared in * previous iteration will be written within new mask, * the last component will written separately, * mask is not changing between 6th and 7th write * and color will be prepared by last iteration */ /* write color, color values mask in CRTC_TEST_PATTERN_MASK * is B1, G1, R1, B0, G0, R0 */ value = 0; addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR); for (index = 0; index < 6; index++) { /* prepare color mask, first write PATTERN_DATA * will have all zeros */ set_reg_field_value( value, (1 << index), CRTC_TEST_PATTERN_COLOR, CRTC_TEST_PATTERN_MASK); /* write color component */ dm_write_reg(ctx, addr, value); /* prepare next color component, * will be written in the next iteration */ set_reg_field_value( value, dst_color[index], CRTC_TEST_PATTERN_COLOR, CRTC_TEST_PATTERN_DATA); } /* write last color component, * it's been already prepared in the loop */ dm_write_reg(ctx, addr, value); /* enable test pattern */ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL); value = 0; set_reg_field_value( value, 1, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_EN); set_reg_field_value( value, mode, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_MODE); set_reg_field_value( value, 0, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_DYNAMIC_RANGE); set_reg_field_value( value, bit_depth, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_COLOR_FORMAT); dm_write_reg(ctx, addr, value); } break; case CONTROLLER_DP_TEST_PATTERN_COLORRAMP: { mode = (bit_depth == TEST_PATTERN_COLOR_FORMAT_BPC_10 ? TEST_PATTERN_MODE_DUALRAMP_RGB : TEST_PATTERN_MODE_SINGLERAMP_RGB); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: dst_bpc = 6; break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: dst_bpc = 8; break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: dst_bpc = 10; break; default: dst_bpc = 8; break; } /* increment for the first ramp for one color gradation * 1 gradation for 6-bit color is 2^10 * gradations in 16-bit color */ inc_base = (src_bpc - dst_bpc); value = 0; addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: { set_reg_field_value( value, inc_base, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC0); set_reg_field_value( value, 0, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC1); set_reg_field_value( value, 6, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_HRES); set_reg_field_value( value, 6, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_VRES); set_reg_field_value( value, 0, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_RAMP0_OFFSET); } break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: { set_reg_field_value( value, inc_base, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC0); set_reg_field_value( value, 0, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC1); set_reg_field_value( value, 8, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_HRES); set_reg_field_value( value, 6, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_VRES); set_reg_field_value( value, 0, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_RAMP0_OFFSET); } break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: { set_reg_field_value( value, inc_base, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC0); set_reg_field_value( value, inc_base + 2, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC1); set_reg_field_value( value, 8, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_HRES); set_reg_field_value( value, 5, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_VRES); set_reg_field_value( value, 384 << 6, CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_RAMP0_OFFSET); } break; default: break; } dm_write_reg(ctx, addr, value); value = 0; addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR); dm_write_reg(ctx, addr, value); /* enable test pattern */ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL); value = 0; set_reg_field_value( value, 1, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_EN); set_reg_field_value( value, mode, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_MODE); set_reg_field_value( value, 0, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_DYNAMIC_RANGE); /* add color depth translation here */ set_reg_field_value( value, bit_depth, CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_COLOR_FORMAT); dm_write_reg(ctx, addr, value); } break; case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE: { value = 0; dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL), value); dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_COLOR), value); dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS), value); } break; default: break; } } /* * dce110_timing_generator_validate_timing * The timing generators support a maximum display size of is 8192 x 8192 pixels, * including both active display and blanking periods. Check H Total and V Total. */ bool dce110_timing_generator_validate_timing( struct timing_generator *tg, const struct dc_crtc_timing *timing, enum signal_type signal) { uint32_t h_blank; uint32_t h_back_porch, hsync_offset, h_sync_start; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); ASSERT(timing != NULL); if (!timing) return false; hsync_offset = timing->h_border_right + timing->h_front_porch; h_sync_start = timing->h_addressable + hsync_offset; /* Currently we don't support 3D, so block all 3D timings */ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) return false; /* Temporarily blocking interlacing mode until it's supported */ if (timing->flags.INTERLACE == 1) return false; /* Check maximum number of pixels supported by Timing Generator * (Currently will never fail, in order to fail needs display which * needs more than 8192 horizontal and * more than 8192 vertical total pixels) */ if (timing->h_total > tg110->max_h_total || timing->v_total > tg110->max_v_total) return false; h_blank = (timing->h_total - timing->h_addressable - timing->h_border_right - timing->h_border_left); if (h_blank < tg110->min_h_blank) return false; if (timing->h_front_porch < tg110->min_h_front_porch) return false; h_back_porch = h_blank - (h_sync_start - timing->h_addressable - timing->h_border_right - timing->h_sync_width); if (h_back_porch < tg110->min_h_back_porch) return false; return true; } /* * Wait till we are at the beginning of VBlank. */ void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg) { /* We want to catch beginning of VBlank here, so if the first try are * in VBlank, we might be very close to Active, in this case wait for * another frame */ while (dce110_timing_generator_is_in_vertical_blank(tg)) { if (!dce110_timing_generator_is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } while (!dce110_timing_generator_is_in_vertical_blank(tg)) { if (!dce110_timing_generator_is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } } /* * Wait till we are in VActive (anywhere in VActive) */ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg) { while (dce110_timing_generator_is_in_vertical_blank(tg)) { if (!dce110_timing_generator_is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } } /* ***************************************************************************** * Function: dce110_timing_generator_setup_global_swap_lock * * @brief * Setups Global Swap Lock group for current pipe * Pipe can join or leave GSL group, become a TimingServer or TimingClient * * @param [in] gsl_params: setup data ***************************************************************************** */ void dce110_timing_generator_setup_global_swap_lock( struct timing_generator *tg, const struct dcp_gsl_params *gsl_params) { uint32_t value; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t address = DCP_REG(mmDCP_GSL_CONTROL); uint32_t check_point = FLIP_READY_BACK_LOOKUP; value = dm_read_reg(tg->ctx, address); /* This pipe will belong to GSL Group zero. */ set_reg_field_value(value, 1, DCP_GSL_CONTROL, DCP_GSL0_EN); set_reg_field_value(value, gsl_params->gsl_master == tg->inst, DCP_GSL_CONTROL, DCP_GSL_MASTER_EN); set_reg_field_value(value, HFLIP_READY_DELAY, DCP_GSL_CONTROL, DCP_GSL_HSYNC_FLIP_FORCE_DELAY); /* Keep signal low (pending high) during 6 lines. * Also defines minimum interval before re-checking signal. */ set_reg_field_value(value, HFLIP_CHECK_DELAY, DCP_GSL_CONTROL, DCP_GSL_HSYNC_FLIP_CHECK_DELAY); dm_write_reg(tg->ctx, CRTC_REG(mmDCP_GSL_CONTROL), value); value = 0; set_reg_field_value(value, gsl_params->gsl_master, DCIO_GSL0_CNTL, DCIO_GSL0_VSYNC_SEL); set_reg_field_value(value, 0, DCIO_GSL0_CNTL, DCIO_GSL0_TIMING_SYNC_SEL); set_reg_field_value(value, 0, DCIO_GSL0_CNTL, DCIO_GSL0_GLOBAL_UNLOCK_SEL); dm_write_reg(tg->ctx, CRTC_REG(mmDCIO_GSL0_CNTL), value); { uint32_t value_crtc_vtotal; value_crtc_vtotal = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_V_TOTAL)); set_reg_field_value(value, 0,/* DCP_GSL_PURPOSE_SURFACE_FLIP */ DCP_GSL_CONTROL, DCP_GSL_SYNC_SOURCE); /* Checkpoint relative to end of frame */ check_point = get_reg_field_value(value_crtc_vtotal, CRTC_V_TOTAL, CRTC_V_TOTAL); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_GSL_WINDOW), 0); } set_reg_field_value(value, 1, DCP_GSL_CONTROL, DCP_GSL_DELAY_SURFACE_UPDATE_PENDING); dm_write_reg(tg->ctx, address, value); /********************************************************************/ address = CRTC_REG(mmCRTC_GSL_CONTROL); value = dm_read_reg(tg->ctx, address); set_reg_field_value(value, check_point - FLIP_READY_BACK_LOOKUP, CRTC_GSL_CONTROL, CRTC_GSL_CHECK_LINE_NUM); set_reg_field_value(value, VFLIP_READY_DELAY, CRTC_GSL_CONTROL, CRTC_GSL_FORCE_DELAY); dm_write_reg(tg->ctx, address, value); } void dce110_timing_generator_tear_down_global_swap_lock( struct timing_generator *tg) { /* Clear all the register writes done by * dce110_timing_generator_setup_global_swap_lock */ uint32_t value; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t address = DCP_REG(mmDCP_GSL_CONTROL); value = 0; /* This pipe will belong to GSL Group zero. */ /* Settig HW default values from reg specs */ set_reg_field_value(value, 0, DCP_GSL_CONTROL, DCP_GSL0_EN); set_reg_field_value(value, 0, DCP_GSL_CONTROL, DCP_GSL_MASTER_EN); set_reg_field_value(value, 0x2, DCP_GSL_CONTROL, DCP_GSL_HSYNC_FLIP_FORCE_DELAY); set_reg_field_value(value, 0x6, DCP_GSL_CONTROL, DCP_GSL_HSYNC_FLIP_CHECK_DELAY); /* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */ { dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_V_TOTAL)); set_reg_field_value(value, 0, DCP_GSL_CONTROL, DCP_GSL_SYNC_SOURCE); } set_reg_field_value(value, 0, DCP_GSL_CONTROL, DCP_GSL_DELAY_SURFACE_UPDATE_PENDING); dm_write_reg(tg->ctx, address, value); /********************************************************************/ address = CRTC_REG(mmCRTC_GSL_CONTROL); value = 0; set_reg_field_value(value, 0, CRTC_GSL_CONTROL, CRTC_GSL_CHECK_LINE_NUM); set_reg_field_value(value, 0x2, CRTC_GSL_CONTROL, CRTC_GSL_FORCE_DELAY); dm_write_reg(tg->ctx, address, value); } /* ***************************************************************************** * Function: is_counter_moving * * @brief * check if the timing generator is currently going * * @return * true if currently going, false if currently paused or stopped. * ***************************************************************************** */ bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg) { struct crtc_position position1, position2; tg->funcs->get_position(tg, &position1); tg->funcs->get_position(tg, &position2); if (position1.horizontal_count == position2.horizontal_count && position1.vertical_count == position2.vertical_count) return false; else return true; } void dce110_timing_generator_enable_advanced_request( struct timing_generator *tg, bool enable, const struct dc_crtc_timing *timing) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL); uint32_t value = dm_read_reg(tg->ctx, addr); if (enable) { set_reg_field_value( value, 0, CRTC_START_LINE_CONTROL, CRTC_LEGACY_REQUESTOR_EN); } else { set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_LEGACY_REQUESTOR_EN); } if ((timing->v_sync_width + timing->v_front_porch) <= 3) { set_reg_field_value( value, 3, CRTC_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); set_reg_field_value( value, 0, CRTC_START_LINE_CONTROL, CRTC_PREFETCH_EN); } else { set_reg_field_value( value, 4, CRTC_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_PREFETCH_EN); } set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_PROGRESSIVE_START_LINE_EARLY); set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_INTERLACE_START_LINE_EARLY); dm_write_reg(tg->ctx, addr, value); } /*TODO: Figure out if we need this function. */ void dce110_timing_generator_set_lock_master(struct timing_generator *tg, bool lock) { struct dc_context *ctx = tg->ctx; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK); uint32_t value = dm_read_reg(ctx, addr); set_reg_field_value( value, lock ? 1 : 0, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK); dm_write_reg(ctx, addr, value); } void dce110_timing_generator_enable_reset_trigger( struct timing_generator *tg, int source_tg_inst) { uint32_t value; uint32_t rising_edge = 0; uint32_t falling_edge = 0; enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); /* Setup trigger edge */ { uint32_t pol_value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_V_SYNC_A_CNTL)); /* Register spec has reversed definition: * 0 for positive, 1 for negative */ if (get_reg_field_value(pol_value, CRTC_V_SYNC_A_CNTL, CRTC_V_SYNC_A_POL) == 0) { rising_edge = 1; } else { falling_edge = 1; } } value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL)); trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0; set_reg_field_value(value, trig_src_select, CRTC_TRIGB_CNTL, CRTC_TRIGB_SOURCE_SELECT); set_reg_field_value(value, TRIGGER_POLARITY_SELECT_LOGIC_ZERO, CRTC_TRIGB_CNTL, CRTC_TRIGB_POLARITY_SELECT); set_reg_field_value(value, rising_edge, CRTC_TRIGB_CNTL, CRTC_TRIGB_RISING_EDGE_DETECT_CNTL); set_reg_field_value(value, falling_edge, CRTC_TRIGB_CNTL, CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL); set_reg_field_value(value, 0, /* send every signal */ CRTC_TRIGB_CNTL, CRTC_TRIGB_FREQUENCY_SELECT); set_reg_field_value(value, 0, /* no delay */ CRTC_TRIGB_CNTL, CRTC_TRIGB_DELAY); set_reg_field_value(value, 1, /* clear trigger status */ CRTC_TRIGB_CNTL, CRTC_TRIGB_CLEAR); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value); /**************************************************************/ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL)); set_reg_field_value(value, 2, /* force H count to H_TOTAL and V count to V_TOTAL */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_MODE); set_reg_field_value(value, 1, /* TriggerB - we never use TriggerA */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_TRIG_SEL); set_reg_field_value(value, 1, /* clear trigger status */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_CLEAR); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value); } void dce110_timing_generator_enable_crtc_reset( struct timing_generator *tg, int source_tg_inst, struct crtc_trigger_info *crtc_tp) { uint32_t value = 0; uint32_t rising_edge = 0; uint32_t falling_edge = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); /* Setup trigger edge */ switch (crtc_tp->event) { case CRTC_EVENT_VSYNC_RISING: rising_edge = 1; break; case CRTC_EVENT_VSYNC_FALLING: falling_edge = 1; break; } value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL)); set_reg_field_value(value, source_tg_inst, CRTC_TRIGB_CNTL, CRTC_TRIGB_SOURCE_SELECT); set_reg_field_value(value, TRIGGER_POLARITY_SELECT_LOGIC_ZERO, CRTC_TRIGB_CNTL, CRTC_TRIGB_POLARITY_SELECT); set_reg_field_value(value, rising_edge, CRTC_TRIGB_CNTL, CRTC_TRIGB_RISING_EDGE_DETECT_CNTL); set_reg_field_value(value, falling_edge, CRTC_TRIGB_CNTL, CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL); set_reg_field_value(value, 1, /* clear trigger status */ CRTC_TRIGB_CNTL, CRTC_TRIGB_CLEAR); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value); /**************************************************************/ switch (crtc_tp->delay) { case TRIGGER_DELAY_NEXT_LINE: value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL)); set_reg_field_value(value, 0, /* force H count to H_TOTAL and V count to V_TOTAL */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_MODE); set_reg_field_value(value, 0, /* TriggerB - we never use TriggerA */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_TRIG_SEL); set_reg_field_value(value, 1, /* clear trigger status */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_CLEAR); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value); value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL)); set_reg_field_value(value, 1, CRTC_VERT_SYNC_CONTROL, CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR); set_reg_field_value(value, 2, CRTC_VERT_SYNC_CONTROL, CRTC_AUTO_FORCE_VSYNC_MODE); break; case TRIGGER_DELAY_NEXT_PIXEL: value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL)); set_reg_field_value(value, 1, CRTC_VERT_SYNC_CONTROL, CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR); set_reg_field_value(value, 0, CRTC_VERT_SYNC_CONTROL, CRTC_AUTO_FORCE_VSYNC_MODE); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL), value); value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL)); set_reg_field_value(value, 2, /* force H count to H_TOTAL and V count to V_TOTAL */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_MODE); set_reg_field_value(value, 1, /* TriggerB - we never use TriggerA */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_TRIG_SEL); set_reg_field_value(value, 1, /* clear trigger status */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_CLEAR); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value); break; } value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE)); set_reg_field_value(value, 2, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE), value); } void dce110_timing_generator_disable_reset_trigger( struct timing_generator *tg) { uint32_t value; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL)); set_reg_field_value(value, 0, /* force counter now mode is disabled */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_MODE); set_reg_field_value(value, 1, /* clear trigger status */ CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_CLEAR); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value); value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL)); set_reg_field_value(value, 1, CRTC_VERT_SYNC_CONTROL, CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR); set_reg_field_value(value, 0, CRTC_VERT_SYNC_CONTROL, CRTC_AUTO_FORCE_VSYNC_MODE); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL), value); /********************************************************************/ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL)); set_reg_field_value(value, TRIGGER_SOURCE_SELECT_LOGIC_ZERO, CRTC_TRIGB_CNTL, CRTC_TRIGB_SOURCE_SELECT); set_reg_field_value(value, TRIGGER_POLARITY_SELECT_LOGIC_ZERO, CRTC_TRIGB_CNTL, CRTC_TRIGB_POLARITY_SELECT); set_reg_field_value(value, 1, /* clear trigger status */ CRTC_TRIGB_CNTL, CRTC_TRIGB_CLEAR); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value); } /* ***************************************************************************** * @brief * Checks whether CRTC triggered reset occurred * * @return * true if triggered reset occurred, false otherwise ***************************************************************************** */ bool dce110_timing_generator_did_triggered_reset_occur( struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL)); uint32_t value1 = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL)); bool force = get_reg_field_value(value, CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_OCCURRED) != 0; bool vert_sync = get_reg_field_value(value1, CRTC_VERT_SYNC_CONTROL, CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED) != 0; return (force || vert_sync); } /* * dce110_timing_generator_disable_vga * Turn OFF VGA Mode and Timing - DxVGA_CONTROL * VGA Mode and VGA Timing is used by VBIOS on CRT Monitors; */ void dce110_timing_generator_disable_vga( struct timing_generator *tg) { uint32_t addr = 0; uint32_t value = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); switch (tg110->controller_id) { case CONTROLLER_ID_D0: addr = mmD1VGA_CONTROL; break; case CONTROLLER_ID_D1: addr = mmD2VGA_CONTROL; break; case CONTROLLER_ID_D2: addr = mmD3VGA_CONTROL; break; case CONTROLLER_ID_D3: addr = mmD4VGA_CONTROL; break; case CONTROLLER_ID_D4: addr = mmD5VGA_CONTROL; break; case CONTROLLER_ID_D5: addr = mmD6VGA_CONTROL; break; default: break; } value = dm_read_reg(tg->ctx, addr); set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE); set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT); set_reg_field_value( value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT); set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN); dm_write_reg(tg->ctx, addr, value); } /* * set_overscan_color_black * * @param :black_color is one of the color space * :this routine will set overscan black color according to the color space. * @return none */ void dce110_timing_generator_set_overscan_color_black( struct timing_generator *tg, const struct tg_color *color) { struct dc_context *ctx = tg->ctx; uint32_t addr; uint32_t value = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); set_reg_field_value( value, color->color_b_cb, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_BLUE); set_reg_field_value( value, color->color_r_cr, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_RED); set_reg_field_value( value, color->color_g_y, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_GREEN); addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR); dm_write_reg(ctx, addr, value); addr = CRTC_REG(mmCRTC_BLACK_COLOR); dm_write_reg(ctx, addr, value); /* This is desirable to have a constant DAC output voltage during the * blank time that is higher than the 0 volt reference level that the * DAC outputs when the NBLANK signal * is asserted low, such as for output to an analog TV. */ addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR); dm_write_reg(ctx, addr, value); /* TO DO we have to program EXT registers and we need to know LB DATA * format because it is used when more 10 , i.e. 12 bits per color * * m_mmDxCRTC_OVERSCAN_COLOR_EXT * m_mmDxCRTC_BLACK_COLOR_EXT * m_mmDxCRTC_BLANK_DATA_COLOR_EXT */ } void dce110_tg_program_blank_color(struct timing_generator *tg, const struct tg_color *black_color) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR); uint32_t value = dm_read_reg(tg->ctx, addr); set_reg_field_value( value, black_color->color_b_cb, CRTC_BLACK_COLOR, CRTC_BLACK_COLOR_B_CB); set_reg_field_value( value, black_color->color_g_y, CRTC_BLACK_COLOR, CRTC_BLACK_COLOR_G_Y); set_reg_field_value( value, black_color->color_r_cr, CRTC_BLACK_COLOR, CRTC_BLACK_COLOR_R_CR); dm_write_reg(tg->ctx, addr, value); addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR); dm_write_reg(tg->ctx, addr, value); } void dce110_tg_set_overscan_color(struct timing_generator *tg, const struct tg_color *overscan_color) { struct dc_context *ctx = tg->ctx; uint32_t value = 0; uint32_t addr; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); set_reg_field_value( value, overscan_color->color_b_cb, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_BLUE); set_reg_field_value( value, overscan_color->color_g_y, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_GREEN); set_reg_field_value( value, overscan_color->color_r_cr, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_RED); addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR); dm_write_reg(ctx, addr, value); } void dce110_tg_program_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing, int vready_offset, int vstartup_start, int vupdate_offset, int vupdate_width, const enum signal_type signal, bool use_vbios) { if (use_vbios) dce110_timing_generator_program_timing_generator(tg, timing); else dce110_timing_generator_program_blanking(tg, timing); } bool dce110_tg_is_blanked(struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL)); if (get_reg_field_value( value, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 1 && get_reg_field_value( value, CRTC_BLANK_CONTROL, CRTC_CURRENT_BLANK_STATE) == 1) return true; return false; } void dce110_tg_set_blank(struct timing_generator *tg, bool enable_blanking) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = 0; set_reg_field_value( value, 1, CRTC_DOUBLE_BUFFER_CONTROL, CRTC_BLANK_DATA_DOUBLE_BUFFER_EN); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_DOUBLE_BUFFER_CONTROL), value); value = 0; if (enable_blanking) { set_reg_field_value( value, 1, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), value); } else dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), 0); } bool dce110_tg_validate_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing) { return dce110_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE); } void dce110_tg_wait_for_state(struct timing_generator *tg, enum crtc_state state) { switch (state) { case CRTC_STATE_VBLANK: dce110_timing_generator_wait_for_vblank(tg); break; case CRTC_STATE_VACTIVE: dce110_timing_generator_wait_for_vactive(tg); break; default: break; } } void dce110_tg_set_colors(struct timing_generator *tg, const struct tg_color *blank_color, const struct tg_color *overscan_color) { if (blank_color != NULL) dce110_tg_program_blank_color(tg, blank_color); if (overscan_color != NULL) dce110_tg_set_overscan_color(tg, overscan_color); } /* Gets first line of blank region of the display timing for CRTC * and programms is as a trigger to fire vertical interrupt */ bool dce110_arm_vert_intr(struct timing_generator *tg, uint8_t width) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t v_blank_start = 0; uint32_t v_blank_end = 0; uint32_t val = 0; uint32_t h_position, v_position; tg->funcs->get_scanoutpos( tg, &v_blank_start, &v_blank_end, &h_position, &v_position); if (v_blank_start == 0 || v_blank_end == 0) return false; set_reg_field_value( val, v_blank_start, CRTC_VERTICAL_INTERRUPT0_POSITION, CRTC_VERTICAL_INTERRUPT0_LINE_START); /* Set interval width for interrupt to fire to 1 scanline */ set_reg_field_value( val, v_blank_start + width, CRTC_VERTICAL_INTERRUPT0_POSITION, CRTC_VERTICAL_INTERRUPT0_LINE_END); dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_VERTICAL_INTERRUPT0_POSITION), val); return true; } static bool dce110_is_tg_enabled(struct timing_generator *tg) { uint32_t addr = 0; uint32_t value = 0; uint32_t field = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); addr = CRTC_REG(mmCRTC_CONTROL); value = dm_read_reg(tg->ctx, addr); field = get_reg_field_value(value, CRTC_CONTROL, CRTC_CURRENT_MASTER_EN_STATE); return field == 1; } bool dce110_configure_crc(struct timing_generator *tg, const struct crc_params *params) { uint32_t cntl_addr = 0; uint32_t addr = 0; uint32_t value; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); /* Cannot configure crc on a CRTC that is disabled */ if (!dce110_is_tg_enabled(tg)) return false; cntl_addr = CRTC_REG(mmCRTC_CRC_CNTL); /* First, disable CRC before we configure it. */ dm_write_reg(tg->ctx, cntl_addr, 0); if (!params->enable) return true; /* Program frame boundaries */ /* Window A x axis start and end. */ value = 0; addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL); set_reg_field_value(value, params->windowa_x_start, CRTC_CRC0_WINDOWA_X_CONTROL, CRTC_CRC0_WINDOWA_X_START); set_reg_field_value(value, params->windowa_x_end, CRTC_CRC0_WINDOWA_X_CONTROL, CRTC_CRC0_WINDOWA_X_END); dm_write_reg(tg->ctx, addr, value); /* Window A y axis start and end. */ value = 0; addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL); set_reg_field_value(value, params->windowa_y_start, CRTC_CRC0_WINDOWA_Y_CONTROL, CRTC_CRC0_WINDOWA_Y_START); set_reg_field_value(value, params->windowa_y_end, CRTC_CRC0_WINDOWA_Y_CONTROL, CRTC_CRC0_WINDOWA_Y_END); dm_write_reg(tg->ctx, addr, value); /* Window B x axis start and end. */ value = 0; addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL); set_reg_field_value(value, params->windowb_x_start, CRTC_CRC0_WINDOWB_X_CONTROL, CRTC_CRC0_WINDOWB_X_START); set_reg_field_value(value, params->windowb_x_end, CRTC_CRC0_WINDOWB_X_CONTROL, CRTC_CRC0_WINDOWB_X_END); dm_write_reg(tg->ctx, addr, value); /* Window B y axis start and end. */ value = 0; addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL); set_reg_field_value(value, params->windowb_y_start, CRTC_CRC0_WINDOWB_Y_CONTROL, CRTC_CRC0_WINDOWB_Y_START); set_reg_field_value(value, params->windowb_y_end, CRTC_CRC0_WINDOWB_Y_CONTROL, CRTC_CRC0_WINDOWB_Y_END); dm_write_reg(tg->ctx, addr, value); /* Set crc mode and selection, and enable. Only using CRC0*/ value = 0; set_reg_field_value(value, params->continuous_mode ? 1 : 0, CRTC_CRC_CNTL, CRTC_CRC_CONT_EN); set_reg_field_value(value, params->selection, CRTC_CRC_CNTL, CRTC_CRC0_SELECT); set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN); dm_write_reg(tg->ctx, cntl_addr, value); return true; } bool dce110_get_crc(struct timing_generator *tg, uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) { uint32_t addr = 0; uint32_t value = 0; uint32_t field = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); addr = CRTC_REG(mmCRTC_CRC_CNTL); value = dm_read_reg(tg->ctx, addr); field = get_reg_field_value(value, CRTC_CRC_CNTL, CRTC_CRC_EN); /* Early return if CRC is not enabled for this CRTC */ if (!field) return false; addr = CRTC_REG(mmCRTC_CRC0_DATA_RG); value = dm_read_reg(tg->ctx, addr); *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR); *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y); addr = CRTC_REG(mmCRTC_CRC0_DATA_B); value = dm_read_reg(tg->ctx, addr); *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB); return true; } static const struct timing_generator_funcs dce110_tg_funcs = { .validate_timing = dce110_tg_validate_timing, .program_timing = dce110_tg_program_timing, .enable_crtc = dce110_timing_generator_enable_crtc, .disable_crtc = dce110_timing_generator_disable_crtc, .is_counter_moving = dce110_timing_generator_is_counter_moving, .get_position = dce110_timing_generator_get_position, .get_frame_count = dce110_timing_generator_get_vblank_counter, .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos, .set_early_control = dce110_timing_generator_set_early_control, .wait_for_state = dce110_tg_wait_for_state, .set_blank = dce110_tg_set_blank, .is_blanked = dce110_tg_is_blanked, .set_colors = dce110_tg_set_colors, .set_overscan_blank_color = dce110_timing_generator_set_overscan_color_black, .set_blank_color = dce110_timing_generator_program_blank_color, .disable_vga = dce110_timing_generator_disable_vga, .did_triggered_reset_occur = dce110_timing_generator_did_triggered_reset_occur, .setup_global_swap_lock = dce110_timing_generator_setup_global_swap_lock, .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger, .enable_crtc_reset = dce110_timing_generator_enable_crtc_reset, .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger, .tear_down_global_swap_lock = dce110_timing_generator_tear_down_global_swap_lock, .enable_advanced_request = dce110_timing_generator_enable_advanced_request, .set_drr = dce110_timing_generator_set_drr, .get_last_used_drr_vtotal = NULL, .set_static_screen_control = dce110_timing_generator_set_static_screen_control, .set_test_pattern = dce110_timing_generator_set_test_pattern, .arm_vert_intr = dce110_arm_vert_intr, .is_tg_enabled = dce110_is_tg_enabled, .configure_crc = dce110_configure_crc, .get_crc = dce110_get_crc, }; void dce110_timing_generator_construct( struct dce110_timing_generator *tg110, struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { tg110->controller_id = CONTROLLER_ID_D0 + instance; tg110->base.inst = instance; tg110->offsets = *offsets; tg110->base.funcs = &dce110_tg_funcs; tg110->base.ctx = ctx; tg110->base.bp = ctx->dc_bios; tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1; tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1; tg110->min_h_blank = 56; tg110->min_h_front_porch = 4; tg110->min_h_back_porch = 4; }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include "dm_services.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #include "dc_types.h" #include "dc_bios_types.h" #include "dc.h" #include "include/grph_object_id.h" #include "include/logger_interface.h" #include "dce110_timing_generator.h" #include "dce110_timing_generator_v.h" #include "timing_generator.h" #define DC_LOGGER \ tg->ctx->logger /** ******************************************************************************** * * DCE11 Timing Generator Implementation * **********************************************************************************/ /* * Enable CRTCV */ static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg) { /* * Set MASTER_UPDATE_MODE to 0 * This is needed for DRR, and also suggested to be default value by Syed. */ uint32_t value; value = 0; set_reg_field_value(value, 0, CRTCV_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE); dm_write_reg(tg->ctx, mmCRTCV_MASTER_UPDATE_MODE, value); /* TODO: may want this on for looking for underflow */ value = 0; dm_write_reg(tg->ctx, mmCRTCV_MASTER_UPDATE_MODE, value); value = 0; set_reg_field_value(value, 1, CRTCV_MASTER_EN, CRTC_MASTER_EN); dm_write_reg(tg->ctx, mmCRTCV_MASTER_EN, value); return true; } static bool dce110_timing_generator_v_disable_crtc(struct timing_generator *tg) { uint32_t value; value = dm_read_reg(tg->ctx, mmCRTCV_CONTROL); set_reg_field_value(value, 0, CRTCV_CONTROL, CRTC_DISABLE_POINT_CNTL); set_reg_field_value(value, 0, CRTCV_CONTROL, CRTC_MASTER_EN); dm_write_reg(tg->ctx, mmCRTCV_CONTROL, value); /* * TODO: call this when adding stereo support * tg->funcs->disable_stereo(tg); */ return true; } static void dce110_timing_generator_v_blank_crtc(struct timing_generator *tg) { uint32_t addr = mmCRTCV_BLANK_CONTROL; uint32_t value = dm_read_reg(tg->ctx, addr); set_reg_field_value( value, 1, CRTCV_BLANK_CONTROL, CRTC_BLANK_DATA_EN); set_reg_field_value( value, 0, CRTCV_BLANK_CONTROL, CRTC_BLANK_DE_MODE); dm_write_reg(tg->ctx, addr, value); } static void dce110_timing_generator_v_unblank_crtc(struct timing_generator *tg) { uint32_t addr = mmCRTCV_BLANK_CONTROL; uint32_t value = dm_read_reg(tg->ctx, addr); set_reg_field_value( value, 0, CRTCV_BLANK_CONTROL, CRTC_BLANK_DATA_EN); set_reg_field_value( value, 0, CRTCV_BLANK_CONTROL, CRTC_BLANK_DE_MODE); dm_write_reg(tg->ctx, addr, value); } static bool dce110_timing_generator_v_is_in_vertical_blank( struct timing_generator *tg) { uint32_t addr = 0; uint32_t value = 0; uint32_t field = 0; addr = mmCRTCV_STATUS; value = dm_read_reg(tg->ctx, addr); field = get_reg_field_value(value, CRTCV_STATUS, CRTC_V_BLANK); return field == 1; } static bool dce110_timing_generator_v_is_counter_moving(struct timing_generator *tg) { uint32_t value; uint32_t h1 = 0; uint32_t h2 = 0; uint32_t v1 = 0; uint32_t v2 = 0; value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION); h1 = get_reg_field_value( value, CRTCV_STATUS_POSITION, CRTC_HORZ_COUNT); v1 = get_reg_field_value( value, CRTCV_STATUS_POSITION, CRTC_VERT_COUNT); value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION); h2 = get_reg_field_value( value, CRTCV_STATUS_POSITION, CRTC_HORZ_COUNT); v2 = get_reg_field_value( value, CRTCV_STATUS_POSITION, CRTC_VERT_COUNT); if (h1 == h2 && v1 == v2) return false; else return true; } static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *tg) { /* We want to catch beginning of VBlank here, so if the first try are * in VBlank, we might be very close to Active, in this case wait for * another frame */ while (dce110_timing_generator_v_is_in_vertical_blank(tg)) { if (!dce110_timing_generator_v_is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } while (!dce110_timing_generator_v_is_in_vertical_blank(tg)) { if (!dce110_timing_generator_v_is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } } /* * Wait till we are in VActive (anywhere in VActive) */ static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg) { while (dce110_timing_generator_v_is_in_vertical_blank(tg)) { if (!dce110_timing_generator_v_is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } } static void dce110_timing_generator_v_wait_for_state(struct timing_generator *tg, enum crtc_state state) { switch (state) { case CRTC_STATE_VBLANK: dce110_timing_generator_v_wait_for_vblank(tg); break; case CRTC_STATE_VACTIVE: dce110_timing_generator_v_wait_for_vactive(tg); break; default: break; } } static void dce110_timing_generator_v_program_blanking( struct timing_generator *tg, const struct dc_crtc_timing *timing) { uint32_t vsync_offset = timing->v_border_bottom + timing->v_front_porch; uint32_t v_sync_start = timing->v_addressable + vsync_offset; uint32_t hsync_offset = timing->h_border_right + timing->h_front_porch; uint32_t h_sync_start = timing->h_addressable + hsync_offset; struct dc_context *ctx = tg->ctx; uint32_t value = 0; uint32_t addr = 0; uint32_t tmp = 0; addr = mmCRTCV_H_TOTAL; value = dm_read_reg(ctx, addr); set_reg_field_value( value, timing->h_total - 1, CRTCV_H_TOTAL, CRTC_H_TOTAL); dm_write_reg(ctx, addr, value); addr = mmCRTCV_V_TOTAL; value = dm_read_reg(ctx, addr); set_reg_field_value( value, timing->v_total - 1, CRTCV_V_TOTAL, CRTC_V_TOTAL); dm_write_reg(ctx, addr, value); addr = mmCRTCV_H_BLANK_START_END; value = dm_read_reg(ctx, addr); tmp = timing->h_total - (h_sync_start + timing->h_border_left); set_reg_field_value( value, tmp, CRTCV_H_BLANK_START_END, CRTC_H_BLANK_END); tmp = tmp + timing->h_addressable + timing->h_border_left + timing->h_border_right; set_reg_field_value( value, tmp, CRTCV_H_BLANK_START_END, CRTC_H_BLANK_START); dm_write_reg(ctx, addr, value); addr = mmCRTCV_V_BLANK_START_END; value = dm_read_reg(ctx, addr); tmp = timing->v_total - (v_sync_start + timing->v_border_top); set_reg_field_value( value, tmp, CRTCV_V_BLANK_START_END, CRTC_V_BLANK_END); tmp = tmp + timing->v_addressable + timing->v_border_top + timing->v_border_bottom; set_reg_field_value( value, tmp, CRTCV_V_BLANK_START_END, CRTC_V_BLANK_START); dm_write_reg(ctx, addr, value); addr = mmCRTCV_H_SYNC_A; value = 0; set_reg_field_value( value, timing->h_sync_width, CRTCV_H_SYNC_A, CRTC_H_SYNC_A_END); dm_write_reg(ctx, addr, value); addr = mmCRTCV_H_SYNC_A_CNTL; value = dm_read_reg(ctx, addr); if (timing->flags.HSYNC_POSITIVE_POLARITY) { set_reg_field_value( value, 0, CRTCV_H_SYNC_A_CNTL, CRTC_H_SYNC_A_POL); } else { set_reg_field_value( value, 1, CRTCV_H_SYNC_A_CNTL, CRTC_H_SYNC_A_POL); } dm_write_reg(ctx, addr, value); addr = mmCRTCV_V_SYNC_A; value = 0; set_reg_field_value( value, timing->v_sync_width, CRTCV_V_SYNC_A, CRTC_V_SYNC_A_END); dm_write_reg(ctx, addr, value); addr = mmCRTCV_V_SYNC_A_CNTL; value = dm_read_reg(ctx, addr); if (timing->flags.VSYNC_POSITIVE_POLARITY) { set_reg_field_value( value, 0, CRTCV_V_SYNC_A_CNTL, CRTC_V_SYNC_A_POL); } else { set_reg_field_value( value, 1, CRTCV_V_SYNC_A_CNTL, CRTC_V_SYNC_A_POL); } dm_write_reg(ctx, addr, value); addr = mmCRTCV_INTERLACE_CONTROL; value = dm_read_reg(ctx, addr); set_reg_field_value( value, timing->flags.INTERLACE, CRTCV_INTERLACE_CONTROL, CRTC_INTERLACE_ENABLE); dm_write_reg(ctx, addr, value); } static void dce110_timing_generator_v_enable_advanced_request( struct timing_generator *tg, bool enable, const struct dc_crtc_timing *timing) { uint32_t addr = mmCRTCV_START_LINE_CONTROL; uint32_t value = dm_read_reg(tg->ctx, addr); if (enable) { if ((timing->v_sync_width + timing->v_front_porch) <= 3) { set_reg_field_value( value, 3, CRTCV_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); } else { set_reg_field_value( value, 4, CRTCV_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); } set_reg_field_value( value, 0, CRTCV_START_LINE_CONTROL, CRTC_LEGACY_REQUESTOR_EN); } else { set_reg_field_value( value, 2, CRTCV_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); set_reg_field_value( value, 1, CRTCV_START_LINE_CONTROL, CRTC_LEGACY_REQUESTOR_EN); } dm_write_reg(tg->ctx, addr, value); } static void dce110_timing_generator_v_set_blank(struct timing_generator *tg, bool enable_blanking) { if (enable_blanking) dce110_timing_generator_v_blank_crtc(tg); else dce110_timing_generator_v_unblank_crtc(tg); } static void dce110_timing_generator_v_program_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing, int vready_offset, int vstartup_start, int vupdate_offset, int vupdate_width, const enum signal_type signal, bool use_vbios) { if (use_vbios) dce110_timing_generator_program_timing_generator(tg, timing); else dce110_timing_generator_v_program_blanking(tg, timing); } static void dce110_timing_generator_v_program_blank_color( struct timing_generator *tg, const struct tg_color *black_color) { uint32_t addr = mmCRTCV_BLACK_COLOR; uint32_t value = dm_read_reg(tg->ctx, addr); set_reg_field_value( value, black_color->color_b_cb, CRTCV_BLACK_COLOR, CRTC_BLACK_COLOR_B_CB); set_reg_field_value( value, black_color->color_g_y, CRTCV_BLACK_COLOR, CRTC_BLACK_COLOR_G_Y); set_reg_field_value( value, black_color->color_r_cr, CRTCV_BLACK_COLOR, CRTC_BLACK_COLOR_R_CR); dm_write_reg(tg->ctx, addr, value); } static void dce110_timing_generator_v_set_overscan_color_black( struct timing_generator *tg, const struct tg_color *color) { struct dc_context *ctx = tg->ctx; uint32_t addr; uint32_t value = 0; set_reg_field_value( value, color->color_b_cb, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_BLUE); set_reg_field_value( value, color->color_r_cr, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_RED); set_reg_field_value( value, color->color_g_y, CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_GREEN); addr = mmCRTCV_OVERSCAN_COLOR; dm_write_reg(ctx, addr, value); addr = mmCRTCV_BLACK_COLOR; dm_write_reg(ctx, addr, value); /* This is desirable to have a constant DAC output voltage during the * blank time that is higher than the 0 volt reference level that the * DAC outputs when the NBLANK signal * is asserted low, such as for output to an analog TV. */ addr = mmCRTCV_BLANK_DATA_COLOR; dm_write_reg(ctx, addr, value); /* TO DO we have to program EXT registers and we need to know LB DATA * format because it is used when more 10 , i.e. 12 bits per color * * m_mmDxCRTC_OVERSCAN_COLOR_EXT * m_mmDxCRTC_BLACK_COLOR_EXT * m_mmDxCRTC_BLANK_DATA_COLOR_EXT */ } static void dce110_tg_v_program_blank_color(struct timing_generator *tg, const struct tg_color *black_color) { uint32_t addr = mmCRTCV_BLACK_COLOR; uint32_t value = dm_read_reg(tg->ctx, addr); set_reg_field_value( value, black_color->color_b_cb, CRTCV_BLACK_COLOR, CRTC_BLACK_COLOR_B_CB); set_reg_field_value( value, black_color->color_g_y, CRTCV_BLACK_COLOR, CRTC_BLACK_COLOR_G_Y); set_reg_field_value( value, black_color->color_r_cr, CRTCV_BLACK_COLOR, CRTC_BLACK_COLOR_R_CR); dm_write_reg(tg->ctx, addr, value); addr = mmCRTCV_BLANK_DATA_COLOR; dm_write_reg(tg->ctx, addr, value); } static void dce110_timing_generator_v_set_overscan_color(struct timing_generator *tg, const struct tg_color *overscan_color) { struct dc_context *ctx = tg->ctx; uint32_t value = 0; uint32_t addr; set_reg_field_value( value, overscan_color->color_b_cb, CRTCV_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_BLUE); set_reg_field_value( value, overscan_color->color_g_y, CRTCV_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_GREEN); set_reg_field_value( value, overscan_color->color_r_cr, CRTCV_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_RED); addr = mmCRTCV_OVERSCAN_COLOR; dm_write_reg(ctx, addr, value); } static void dce110_timing_generator_v_set_colors(struct timing_generator *tg, const struct tg_color *blank_color, const struct tg_color *overscan_color) { if (blank_color != NULL) dce110_tg_v_program_blank_color(tg, blank_color); if (overscan_color != NULL) dce110_timing_generator_v_set_overscan_color(tg, overscan_color); } static void dce110_timing_generator_v_set_early_control( struct timing_generator *tg, uint32_t early_cntl) { uint32_t regval; uint32_t address = mmCRTC_CONTROL; regval = dm_read_reg(tg->ctx, address); set_reg_field_value(regval, early_cntl, CRTCV_CONTROL, CRTC_HBLANK_EARLY_CONTROL); dm_write_reg(tg->ctx, address, regval); } static uint32_t dce110_timing_generator_v_get_vblank_counter(struct timing_generator *tg) { uint32_t addr = mmCRTCV_STATUS_FRAME_COUNT; uint32_t value = dm_read_reg(tg->ctx, addr); uint32_t field = get_reg_field_value( value, CRTCV_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT); return field; } static bool dce110_timing_generator_v_did_triggered_reset_occur( struct timing_generator *tg) { DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n"); return false; } static void dce110_timing_generator_v_setup_global_swap_lock( struct timing_generator *tg, const struct dcp_gsl_params *gsl_params) { DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n"); return; } static void dce110_timing_generator_v_enable_reset_trigger( struct timing_generator *tg, int source_tg_inst) { DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n"); return; } static void dce110_timing_generator_v_disable_reset_trigger( struct timing_generator *tg) { DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n"); return; } static void dce110_timing_generator_v_tear_down_global_swap_lock( struct timing_generator *tg) { DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n"); return; } static void dce110_timing_generator_v_disable_vga( struct timing_generator *tg) { return; } /** ******************************************************************************************** * * DCE11 Timing Generator Constructor / Destructor * *********************************************************************************************/ static const struct timing_generator_funcs dce110_tg_v_funcs = { .validate_timing = dce110_tg_validate_timing, .program_timing = dce110_timing_generator_v_program_timing, .enable_crtc = dce110_timing_generator_v_enable_crtc, .disable_crtc = dce110_timing_generator_v_disable_crtc, .is_counter_moving = dce110_timing_generator_v_is_counter_moving, .get_position = NULL, /* Not to be implemented for underlay*/ .get_frame_count = dce110_timing_generator_v_get_vblank_counter, .set_early_control = dce110_timing_generator_v_set_early_control, .wait_for_state = dce110_timing_generator_v_wait_for_state, .set_blank = dce110_timing_generator_v_set_blank, .set_colors = dce110_timing_generator_v_set_colors, .set_overscan_blank_color = dce110_timing_generator_v_set_overscan_color_black, .set_blank_color = dce110_timing_generator_v_program_blank_color, .disable_vga = dce110_timing_generator_v_disable_vga, .did_triggered_reset_occur = dce110_timing_generator_v_did_triggered_reset_occur, .setup_global_swap_lock = dce110_timing_generator_v_setup_global_swap_lock, .enable_reset_trigger = dce110_timing_generator_v_enable_reset_trigger, .disable_reset_trigger = dce110_timing_generator_v_disable_reset_trigger, .tear_down_global_swap_lock = dce110_timing_generator_v_tear_down_global_swap_lock, .enable_advanced_request = dce110_timing_generator_v_enable_advanced_request }; void dce110_timing_generator_v_construct( struct dce110_timing_generator *tg110, struct dc_context *ctx) { tg110->controller_id = CONTROLLER_ID_UNDERLAY0; tg110->base.funcs = &dce110_tg_v_funcs; tg110->base.ctx = ctx; tg110->base.bp = ctx->dc_bios; tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1; tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1; tg110->min_h_blank = 56; tg110->min_h_front_porch = 4; tg110->min_h_back_porch = 4; }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "dc_bios_types.h" #include "core_types.h" #include "core_status.h" #include "resource.h" #include "dm_helpers.h" #include "dce110_timing_generator.h" #include "dce/dce_hwseq.h" #include "gpio_service_interface.h" #include "dce110_compressor.h" #include "bios/bios_parser_helper.h" #include "timing_generator.h" #include "mem_input.h" #include "opp.h" #include "ipp.h" #include "transform.h" #include "stream_encoder.h" #include "link_encoder.h" #include "link_enc_cfg.h" #include "link_hwss.h" #include "link.h" #include "dccg.h" #include "clock_source.h" #include "clk_mgr.h" #include "abm.h" #include "audio.h" #include "reg_helper.h" #include "panel_cntl.h" #include "dpcd_defs.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #include "custom_float.h" #include "atomfirmware.h" #include "dcn10/dcn10_hw_sequencer.h" #include "dce110_hw_sequencer.h" #define GAMMA_HW_POINTS_NUM 256 /* * All values are in milliseconds; * For eDP, after power-up/power/down, * 300/500 msec max. delay from LCDVCC to black video generation */ #define PANEL_POWER_UP_TIMEOUT 300 #define PANEL_POWER_DOWN_TIMEOUT 500 #define HPD_CHECK_INTERVAL 10 #define OLED_POST_T7_DELAY 100 #define OLED_PRE_T11_DELAY 150 #define CTX \ hws->ctx #define DC_LOGGER_INIT() #define REG(reg)\ hws->regs->reg #undef FN #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name struct dce110_hw_seq_reg_offsets { uint32_t crtc; }; static const struct dce110_hw_seq_reg_offsets reg_offsets[] = { { .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTCV_GSL_CONTROL - mmCRTC_GSL_CONTROL), } }; #define HW_REG_BLND(reg, id)\ (reg + reg_offsets[id].blnd) #define HW_REG_CRTC(reg, id)\ (reg + reg_offsets[id].crtc) #define MAX_WATERMARK 0xFFFF #define SAFE_NBP_MARK 0x7FFF /******************************************************************************* * Private definitions ******************************************************************************/ /***************************PIPE_CONTROL***********************************/ static void dce110_init_pte(struct dc_context *ctx) { uint32_t addr; uint32_t value = 0; uint32_t chunk_int = 0; uint32_t chunk_mul = 0; addr = mmUNP_DVMM_PTE_CONTROL; value = dm_read_reg(ctx, addr); set_reg_field_value( value, 0, DVMM_PTE_CONTROL, DVMM_USE_SINGLE_PTE); set_reg_field_value( value, 1, DVMM_PTE_CONTROL, DVMM_PTE_BUFFER_MODE0); set_reg_field_value( value, 1, DVMM_PTE_CONTROL, DVMM_PTE_BUFFER_MODE1); dm_write_reg(ctx, addr, value); addr = mmDVMM_PTE_REQ; value = dm_read_reg(ctx, addr); chunk_int = get_reg_field_value( value, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_INT); chunk_mul = get_reg_field_value( value, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER); if (chunk_int != 0x4 || chunk_mul != 0x4) { set_reg_field_value( value, 255, DVMM_PTE_REQ, MAX_PTEREQ_TO_ISSUE); set_reg_field_value( value, 4, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_INT); set_reg_field_value( value, 4, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER); dm_write_reg(ctx, addr, value); } } /**************************************************************************/ static void enable_display_pipe_clock_gating( struct dc_context *ctx, bool clock_gating) { /*TODO*/ } static bool dce110_enable_display_power_gating( struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, enum pipe_gating_control power_gating) { enum bp_result bp_result = BP_RESULT_OK; enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) cntl = ASIC_PIPE_ENABLE; else cntl = ASIC_PIPE_DISABLE; if (controller_id == underlay_idx) controller_id = CONTROLLER_ID_UNDERLAY0 - 1; if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) { bp_result = dcb->funcs->enable_disp_power_gating( dcb, controller_id + 1, cntl); /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2 * by default when command table is called * * Bios parser accepts controller_id = 6 as indicative of * underlay pipe in dce110. But we do not support more * than 3. */ if (controller_id < CONTROLLER_ID_MAX - 1) dm_write_reg(ctx, HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id), 0); } if (power_gating != PIPE_GATING_CONTROL_ENABLE) dce110_init_pte(ctx); if (bp_result == BP_RESULT_OK) return true; else return false; } static void build_prescale_params(struct ipp_prescale_params *prescale_params, const struct dc_plane_state *plane_state) { prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED; switch (plane_state->format) { case SURFACE_PIXEL_FORMAT_GRPH_RGB565: prescale_params->scale = 0x2082; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: prescale_params->scale = 0x2020; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: prescale_params->scale = 0x2008; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: prescale_params->scale = 0x2000; break; default: ASSERT(false); break; } } static bool dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; const struct dc_transfer_func *tf = NULL; struct ipp_prescale_params prescale_params = { 0 }; bool result = true; if (ipp == NULL) return false; if (plane_state->in_transfer_func) tf = plane_state->in_transfer_func; build_prescale_params(&prescale_params, plane_state); ipp->funcs->ipp_program_prescale(ipp, &prescale_params); if (plane_state->gamma_correction && !plane_state->gamma_correction->is_identity && dce_use_lut(plane_state->format)) ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction); if (tf == NULL) { /* Default case if no input transfer function specified */ ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_HW_sRGB); } else if (tf->type == TF_TYPE_PREDEFINED) { switch (tf->tf) { case TRANSFER_FUNCTION_SRGB: ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_HW_sRGB); break; case TRANSFER_FUNCTION_BT709: ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_HW_xvYCC); break; case TRANSFER_FUNCTION_LINEAR: ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_BYPASS); break; case TRANSFER_FUNCTION_PQ: default: result = false; break; } } else if (tf->type == TF_TYPE_BYPASS) { ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_BYPASS); } else { /*TF_TYPE_DISTRIBUTED_POINTS - Not supported in DCE 11*/ result = false; } return result; } static bool convert_to_custom_float(struct pwl_result_data *rgb_resulted, struct curve_points *arr_points, uint32_t hw_points_num) { struct custom_float_format fmt; struct pwl_result_data *rgb = rgb_resulted; uint32_t i = 0; fmt.exponenta_bits = 6; fmt.mantissa_bits = 12; fmt.sign = true; if (!convert_to_custom_float_format(arr_points[0].x, &fmt, &arr_points[0].custom_float_x)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(arr_points[0].offset, &fmt, &arr_points[0].custom_float_offset)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(arr_points[0].slope, &fmt, &arr_points[0].custom_float_slope)) { BREAK_TO_DEBUGGER(); return false; } fmt.mantissa_bits = 10; fmt.sign = false; if (!convert_to_custom_float_format(arr_points[1].x, &fmt, &arr_points[1].custom_float_x)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(arr_points[1].y, &fmt, &arr_points[1].custom_float_y)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(arr_points[1].slope, &fmt, &arr_points[1].custom_float_slope)) { BREAK_TO_DEBUGGER(); return false; } fmt.mantissa_bits = 12; fmt.sign = true; while (i != hw_points_num) { if (!convert_to_custom_float_format(rgb->red, &fmt, &rgb->red_reg)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(rgb->green, &fmt, &rgb->green_reg)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(rgb->blue, &fmt, &rgb->blue_reg)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(rgb->delta_red, &fmt, &rgb->delta_red_reg)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(rgb->delta_green, &fmt, &rgb->delta_green_reg)) { BREAK_TO_DEBUGGER(); return false; } if (!convert_to_custom_float_format(rgb->delta_blue, &fmt, &rgb->delta_blue_reg)) { BREAK_TO_DEBUGGER(); return false; } ++rgb; ++i; } return true; } #define MAX_LOW_POINT 25 #define NUMBER_REGIONS 16 #define NUMBER_SW_SEGMENTS 16 static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, struct pwl_params *regamma_params) { struct curve_points *arr_points; struct pwl_result_data *rgb_resulted; struct pwl_result_data *rgb; struct pwl_result_data *rgb_plus_1; struct fixed31_32 y_r; struct fixed31_32 y_g; struct fixed31_32 y_b; struct fixed31_32 y1_min; struct fixed31_32 y3_max; int32_t region_start, region_end; uint32_t i, j, k, seg_distr[NUMBER_REGIONS], increment, start_index, hw_points; if (output_tf == NULL || regamma_params == NULL || output_tf->type == TF_TYPE_BYPASS) return false; arr_points = regamma_params->arr_points; rgb_resulted = regamma_params->rgb_resulted; hw_points = 0; memset(regamma_params, 0, sizeof(struct pwl_params)); if (output_tf->tf == TRANSFER_FUNCTION_PQ) { /* 16 segments * segments are from 2^-11 to 2^5 */ region_start = -11; region_end = region_start + NUMBER_REGIONS; for (i = 0; i < NUMBER_REGIONS; i++) seg_distr[i] = 4; } else { /* 10 segments * segment is from 2^-10 to 2^1 * We include an extra segment for range [2^0, 2^1). This is to * ensure that colors with normalized values of 1 don't miss the * LUT. */ region_start = -10; region_end = 1; seg_distr[0] = 4; seg_distr[1] = 4; seg_distr[2] = 4; seg_distr[3] = 4; seg_distr[4] = 4; seg_distr[5] = 4; seg_distr[6] = 4; seg_distr[7] = 4; seg_distr[8] = 4; seg_distr[9] = 4; seg_distr[10] = 0; seg_distr[11] = -1; seg_distr[12] = -1; seg_distr[13] = -1; seg_distr[14] = -1; seg_distr[15] = -1; } for (k = 0; k < 16; k++) { if (seg_distr[k] != -1) hw_points += (1 << seg_distr[k]); } j = 0; for (k = 0; k < (region_end - region_start); k++) { increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]); start_index = (region_start + k + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS; for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS; i += increment) { if (j == hw_points - 1) break; rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; j++; } } /* last point */ start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS; rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index]; rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_end)); y_r = rgb_resulted[0].red; y_g = rgb_resulted[0].green; y_b = rgb_resulted[0].blue; y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); arr_points[0].y = y1_min; arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); y_r = rgb_resulted[hw_points - 1].red; y_g = rgb_resulted[hw_points - 1].green; y_b = rgb_resulted[hw_points - 1].blue; /* see comment above, m_arrPoints[1].y should be the Y value for the * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) */ y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); arr_points[1].y = y3_max; arr_points[1].slope = dc_fixpt_zero; if (output_tf->tf == TRANSFER_FUNCTION_PQ) { /* for PQ, we want to have a straight line from last HW X point, * and the slope to be such that we hit 1.0 at 10000 nits. */ const struct fixed31_32 end_value = dc_fixpt_from_int(125); arr_points[1].slope = dc_fixpt_div( dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), dc_fixpt_sub(end_value, arr_points[1].x)); } regamma_params->hw_points_num = hw_points; k = 0; for (i = 1; i < 16; i++) { if (seg_distr[k] != -1) { regamma_params->arr_curve_points[k].segments_num = seg_distr[k]; regamma_params->arr_curve_points[i].offset = regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]); } k++; } if (seg_distr[k] != -1) regamma_params->arr_curve_points[k].segments_num = seg_distr[k]; rgb = rgb_resulted; rgb_plus_1 = rgb_resulted + 1; i = 1; while (i != hw_points + 1) { if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) rgb_plus_1->red = rgb->red; if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) rgb_plus_1->green = rgb->green; if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) rgb_plus_1->blue = rgb->blue; rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); ++rgb_plus_1; ++rgb; ++i; } convert_to_custom_float(rgb_resulted, arr_points, hw_points); return true; } static bool dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { struct transform *xfm = pipe_ctx->plane_res.xfm; xfm->funcs->opp_power_on_regamma_lut(xfm, true); xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM; if (stream->out_transfer_func && stream->out_transfer_func->type == TF_TYPE_PREDEFINED && stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) { xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB); } else if (dce110_translate_regamma_to_hw_format(stream->out_transfer_func, &xfm->regamma_params)) { xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params); xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER); } else { xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_BYPASS); } xfm->funcs->opp_power_on_regamma_lut(xfm, false); return true; } void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) { bool is_hdmi_tmds; bool is_dp; ASSERT(pipe_ctx->stream); if (pipe_ctx->stream_res.stream_enc == NULL) return; /* this is not root pipe */ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); if (!is_hdmi_tmds && !is_dp) return; if (is_hdmi_tmds) pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); else { if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num) pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); } } void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = pipe_ctx->stream->link->cur_link_settings.lane_count; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct dc_link *link = pipe_ctx->stream->link; const struct dc *dc = link->dc; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); uint32_t active_total_with_borders; uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; link_hwss->setup_stream_encoder(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = timing->h_addressable + timing->h_border_left + timing->h_border_right; if (lane_count != 0) early_control = active_total_with_borders % lane_count; if (early_control == 0) early_control = lane_count; tg->funcs->set_early_control(tg, early_control); } static enum bp_result link_transmitter_control( struct dc_bios *bios, struct bp_transmitter_control *cntl) { enum bp_result result; result = bios->funcs->transmitter_control(bios, cntl); return result; } /* * @brief * eDP only. */ void dce110_edp_wait_for_hpd_ready( struct dc_link *link, bool power_up) { struct dc_context *ctx = link->ctx; struct graphics_object_id connector = link->link_enc->connector; struct gpio *hpd; bool edp_hpd_high = false; uint32_t time_elapsed = 0; uint32_t timeout = power_up ? PANEL_POWER_UP_TIMEOUT : PANEL_POWER_DOWN_TIMEOUT; if (dal_graphics_object_id_get_connector_id(connector) != CONNECTOR_ID_EDP) { BREAK_TO_DEBUGGER(); return; } if (!power_up) /* * From KV, we will not HPD low after turning off VCC - * instead, we will check the SW timer in power_up(). */ return; /* * When we power on/off the eDP panel, * we need to wait until SENSE bit is high/low. */ /* obtain HPD */ /* TODO what to do with this? */ hpd = ctx->dc->link_srv->get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service); if (!hpd) { BREAK_TO_DEBUGGER(); return; } if (link != NULL) { if (link->panel_config.pps.extra_t3_ms > 0) { int extra_t3_in_ms = link->panel_config.pps.extra_t3_ms; msleep(extra_t3_in_ms); } } dal_gpio_open(hpd, GPIO_MODE_INTERRUPT); /* wait until timeout or panel detected */ do { uint32_t detected = 0; dal_gpio_get_value(hpd, &detected); if (!(detected ^ power_up)) { edp_hpd_high = true; break; } msleep(HPD_CHECK_INTERVAL); time_elapsed += HPD_CHECK_INTERVAL; } while (time_elapsed < timeout); dal_gpio_close(hpd); dal_gpio_destroy_irq(&hpd); /* ensure that the panel is detected */ if (!edp_hpd_high) DC_LOG_DC("%s: wait timed out!\n", __func__); } void dce110_edp_power_control( struct dc_link *link, bool power_up) { struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; enum bp_result bp_result; uint8_t panel_instance; if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) != CONNECTOR_ID_EDP) { BREAK_TO_DEBUGGER(); return; } if (!link->panel_cntl) return; if (power_up != link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) { unsigned long long current_ts = dm_get_timestamp(ctx); unsigned long long time_since_edp_poweroff_ms = div64_u64(dm_get_elapse_time_in_ns( ctx, current_ts, ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link)), 1000000); unsigned long long time_since_edp_poweron_ms = div64_u64(dm_get_elapse_time_in_ns( ctx, current_ts, ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link)), 1000000); DC_LOG_HW_RESUME_S3( "%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu", __func__, power_up, current_ts, ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link), ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link), time_since_edp_poweroff_ms, time_since_edp_poweron_ms); /* Send VBIOS command to prompt eDP panel power */ if (power_up) { /* edp requires a min of 500ms from LCDVDD off to on */ unsigned long long remaining_min_edp_poweroff_time_ms = 500; /* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */ if (link->local_sink != NULL) remaining_min_edp_poweroff_time_ms += link->panel_config.pps.extra_t12_ms; /* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */ if (ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link) != 0) { if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms) remaining_min_edp_poweroff_time_ms = remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms; else remaining_min_edp_poweroff_time_ms = 0; } if (remaining_min_edp_poweroff_time_ms) { DC_LOG_HW_RESUME_S3( "%s: remaining_min_edp_poweroff_time_ms=%llu: begin wait.\n", __func__, remaining_min_edp_poweroff_time_ms); msleep(remaining_min_edp_poweroff_time_ms); DC_LOG_HW_RESUME_S3( "%s: remaining_min_edp_poweroff_time_ms=%llu: end wait.\n", __func__, remaining_min_edp_poweroff_time_ms); dm_output_to_console("%s: wait %lld ms to power on eDP.\n", __func__, remaining_min_edp_poweroff_time_ms); } else { DC_LOG_HW_RESUME_S3( "%s: remaining_min_edp_poweroff_time_ms=%llu: no wait required.\n", __func__, remaining_min_edp_poweroff_time_ms); } } DC_LOG_HW_RESUME_S3( "%s: BEGIN: Panel Power action: %s\n", __func__, (power_up ? "On":"Off")); cntl.action = power_up ? TRANSMITTER_CONTROL_POWER_ON : TRANSMITTER_CONTROL_POWER_OFF; cntl.transmitter = link->link_enc->transmitter; cntl.connector_obj_id = link->link_enc->connector; cntl.coherent = false; cntl.lanes_number = LANE_COUNT_FOUR; cntl.hpd_sel = link->link_enc->hpd_source; panel_instance = link->panel_cntl->inst; if (ctx->dc->ctx->dmub_srv && ctx->dc->debug.dmub_command_table) { if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) { bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_POWER_ON, panel_instance, link->link_powered_externally); } else { bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_POWER_OFF, panel_instance, link->link_powered_externally); } } bp_result = link_transmitter_control(ctx->dc_bios, &cntl); DC_LOG_HW_RESUME_S3( "%s: END: Panel Power action: %s bp_result=%u\n", __func__, (power_up ? "On":"Off"), bp_result); ctx->dc->link_srv->dp_trace_set_edp_power_timestamp(link, power_up); DC_LOG_HW_RESUME_S3( "%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n", __func__, ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link), ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link)); if (bp_result != BP_RESULT_OK) DC_LOG_ERROR( "%s: Panel Power bp_result: %d\n", __func__, bp_result); } else { DC_LOG_HW_RESUME_S3( "%s: Skipping Panel Power action: %s\n", __func__, (power_up ? "On":"Off")); } } void dce110_edp_wait_for_T12( struct dc_link *link) { struct dc_context *ctx = link->ctx; if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) != CONNECTOR_ID_EDP) { BREAK_TO_DEBUGGER(); return; } if (!link->panel_cntl) return; if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) && ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link) != 0) { unsigned int t12_duration = 500; // Default T12 as per spec unsigned long long current_ts = dm_get_timestamp(ctx); unsigned long long time_since_edp_poweroff_ms = div64_u64(dm_get_elapse_time_in_ns( ctx, current_ts, ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link)), 1000000); t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12 if (time_since_edp_poweroff_ms < t12_duration) msleep(t12_duration - time_since_edp_poweroff_ms); } } /*todo: cloned in stream enc, fix*/ /* * @brief * eDP only. Control the backlight of the eDP panel */ void dce110_edp_backlight_control( struct dc_link *link, bool enable) { struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; uint8_t panel_instance; unsigned int pre_T11_delay = OLED_PRE_T11_DELAY; unsigned int post_T7_delay = OLED_POST_T7_DELAY; if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) != CONNECTOR_ID_EDP) { BREAK_TO_DEBUGGER(); return; } if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) { bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl); if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) { DC_LOG_HW_RESUME_S3( "%s: panel already powered up/off. Do nothing.\n", __func__); return; } } /* Send VBIOS command to control eDP panel backlight */ DC_LOG_HW_RESUME_S3( "%s: backlight action: %s\n", __func__, (enable ? "On":"Off")); cntl.action = enable ? TRANSMITTER_CONTROL_BACKLIGHT_ON : TRANSMITTER_CONTROL_BACKLIGHT_OFF; /*cntl.engine_id = ctx->engine;*/ cntl.transmitter = link->link_enc->transmitter; cntl.connector_obj_id = link->link_enc->connector; /*todo: unhardcode*/ cntl.lanes_number = LANE_COUNT_FOUR; cntl.hpd_sel = link->link_enc->hpd_source; cntl.signal = SIGNAL_TYPE_EDP; /* For eDP, the following delays might need to be considered * after link training completed: * idle period - min. accounts for required BS-Idle pattern, * max. allows for source frame synchronization); * 50 msec max. delay from valid video data from source * to video on dislpay or backlight enable. * * Disable the delay for now. * Enable it in the future if necessary. */ /* dc_service_sleep_in_milliseconds(50); */ /*edp 1.2*/ panel_instance = link->panel_cntl->inst; if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) { if (!link->dc->config.edp_no_power_sequencing) /* * Sometimes, DP receiver chip power-controlled externally by an * Embedded Controller could be treated and used as eDP, * if it drives mobile display. In this case, * we shouldn't be doing power-sequencing, hence we can skip * waiting for T7-ready. */ ctx->dc->link_srv->edp_receiver_ready_T7(link); else DC_LOG_DC("edp_receiver_ready_T7 skipped\n"); } /* Setting link_powered_externally will bypass delays in the backlight * as they are not required if the link is being powered by a different * source. */ if (ctx->dc->ctx->dmub_srv && ctx->dc->debug.dmub_command_table) { if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_LCD_BLON, panel_instance, link->link_powered_externally); else ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_LCD_BLOFF, panel_instance, link->link_powered_externally); } link_transmitter_control(ctx->dc_bios, &cntl); if (enable && link->dpcd_sink_ext_caps.bits.oled) { post_T7_delay += link->panel_config.pps.extra_post_t7_ms; msleep(post_T7_delay); } if (link->dpcd_sink_ext_caps.bits.oled || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1) ctx->dc->link_srv->edp_backlight_enable_aux(link, enable); /*edp 1.2*/ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) { if (!link->dc->config.edp_no_power_sequencing) /* * Sometimes, DP receiver chip power-controlled externally by an * Embedded Controller could be treated and used as eDP, * if it drives mobile display. In this case, * we shouldn't be doing power-sequencing, hence we can skip * waiting for T9-ready. */ ctx->dc->link_srv->edp_add_delay_for_T9(link); else DC_LOG_DC("edp_receiver_ready_T9 skipped\n"); } if (!enable && link->dpcd_sink_ext_caps.bits.oled) { pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms; msleep(pre_T11_delay); } } void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ struct dc *dc; struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; const struct link_hwss *link_hwss; if (!pipe_ctx->stream) return; dc = pipe_ctx->stream->ctx->dc; clk_mgr = dc->clk_mgr; link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res); if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; if (pipe_ctx->stream_res.audio) { for (i = 0; i < MAX_PIPES; i++) { /*current_state not updated yet*/ if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL) num_audio++; } pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); if (num_audio >= 1 && clk_mgr->funcs->enable_pme_wa) /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ clk_mgr->funcs->enable_pme_wa(clk_mgr); link_hwss->enable_audio_packet(pipe_ctx); if (pipe_ctx->stream_res.audio) pipe_ctx->stream_res.audio->enabled = true; } } void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) { struct dc *dc; struct clk_mgr *clk_mgr; const struct link_hwss *link_hwss; if (!pipe_ctx || !pipe_ctx->stream) return; dc = pipe_ctx->stream->ctx->dc; clk_mgr = dc->clk_mgr; link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res); if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; link_hwss->disable_audio_packet(pipe_ctx); if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; if (clk_mgr->funcs->enable_pme_wa) /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ clk_mgr->funcs->enable_pme_wa(clk_mgr); /* TODO: notify audio driver for if audio modes list changed * add audio mode list change flag */ /* dal_audio_disable_azalia_audio_jack_presence(stream->audio, * stream->stream_engine_id); */ } } void dce110_disable_stream(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dc *dc = pipe_ctx->stream->ctx->dc; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; struct dtbclk_dto_params dto_params = {0}; int dp_hpo_inst; struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( pipe_ctx->stream_res.stream_enc); pipe_ctx->stream_res.stream_enc->funcs->hdmi_reset_stream_attribute( pipe_ctx->stream_res.stream_enc); } if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.hpo_dp_stream_enc); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.stream_enc); dc->hwss.disable_audio_stream(pipe_ctx); link_hwss->reset_stream_encoder(pipe_ctx); if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { dto_params.otg_inst = tg->inst; dto_params.timing = &pipe_ctx->stream->timing; dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; if (dccg) { dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { /* TODO: This looks like a bug to me as we are disabling HPO IO when * we are just disabling a single HPO stream. Shouldn't we disable HPO * HW control only when HPOs for all streams are disabled? */ if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( pipe_ctx->stream->ctx->dc->hwseq, false); } } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dce_hwseq *hws = link->dc->hwseq; /* only 3 items below are used by unblank */ params.timing = pipe_ctx->stream->timing; params.link_settings.link_rate = link_settings->link_rate; if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params); if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { hws->funcs.edp_backlight_control(link, true); } } void dce110_blank_stream(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dce_hwseq *hws = link->dc->hwseq; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { if (!stream->skip_edp_power_down) hws->funcs.edp_backlight_control(link, false); link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank( pipe_ctx->stream_res.hpo_dp_stream_enc); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) { /* * After output is idle pattern some sinks need time to recognize the stream * has changed or they enter protection state and hang. */ msleep(60); } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { if (!link->dc->config.edp_no_power_sequencing) { /* * Sometimes, DP receiver chip power-controlled externally by an * Embedded Controller could be treated and used as eDP, * if it drives mobile display. In this case, * we shouldn't be doing power-sequencing, hence we can skip * waiting for T9-ready. */ link->dc->link_srv->edp_receiver_ready_T9(link); } } } } void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) { if (pipe_ctx != NULL && pipe_ctx->stream_res.stream_enc != NULL) pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable); } static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id) { switch (crtc_id) { case CONTROLLER_ID_D0: return DTO_SOURCE_ID0; case CONTROLLER_ID_D1: return DTO_SOURCE_ID1; case CONTROLLER_ID_D2: return DTO_SOURCE_ID2; case CONTROLLER_ID_D3: return DTO_SOURCE_ID3; case CONTROLLER_ID_D4: return DTO_SOURCE_ID4; case CONTROLLER_ID_D5: return DTO_SOURCE_ID5; default: return DTO_SOURCE_UNKNOWN; } } static void build_audio_output( struct dc_state *state, const struct pipe_ctx *pipe_ctx, struct audio_output *audio_output) { const struct dc_stream_state *stream = pipe_ctx->stream; audio_output->engine_id = pipe_ctx->stream_res.stream_enc->id; audio_output->signal = pipe_ctx->stream->signal; /* audio_crtc_info */ audio_output->crtc_info.h_total = stream->timing.h_total; /* * Audio packets are sent during actual CRTC blank physical signal, we * need to specify actual active signal portion */ audio_output->crtc_info.h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; audio_output->crtc_info.v_active = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; audio_output->crtc_info.pixel_repetition = 1; audio_output->crtc_info.interlaced = stream->timing.flags.INTERLACE; audio_output->crtc_info.refresh_rate = (stream->timing.pix_clk_100hz*100)/ (stream->timing.h_total*stream->timing.v_total); audio_output->crtc_info.color_depth = stream->timing.display_color_depth; audio_output->crtc_info.requested_pixel_clock_100Hz = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz; audio_output->crtc_info.calculated_pixel_clock_100Hz = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz; /*for HDMI, audio ACR is with deep color ratio factor*/ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && audio_output->crtc_info.requested_pixel_clock_100Hz == (stream->timing.pix_clk_100hz)) { if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) { audio_output->crtc_info.requested_pixel_clock_100Hz = audio_output->crtc_info.requested_pixel_clock_100Hz/2; audio_output->crtc_info.calculated_pixel_clock_100Hz = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz/2; } } if (state->clk_mgr && (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) { audio_output->pll_info.dp_dto_source_clock_in_khz = state->clk_mgr->funcs->get_dp_ref_clk_frequency( state->clk_mgr); } audio_output->pll_info.feed_back_divider = pipe_ctx->pll_settings.feedback_divider; audio_output->pll_info.dto_source = translate_to_dto_source( pipe_ctx->stream_res.tg->inst + 1); /* TODO hard code to enable for now. Need get from stream */ audio_output->pll_info.ss_enabled = true; audio_output->pll_info.ss_percentage = pipe_ctx->pll_settings.ss_percentage; } static void program_scaler(const struct dc *dc, const struct pipe_ctx *pipe_ctx) { struct tg_color color = {0}; /* TOFPGA */ if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL) return; if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) get_surface_visual_confirm_color(pipe_ctx, &color); else color_space_to_black_color(dc, pipe_ctx->stream->output_color_space, &color); pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth( pipe_ctx->plane_res.xfm, pipe_ctx->plane_res.scl_data.lb_params.depth, &pipe_ctx->stream->bit_depth_params); if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) { /* * The way 420 is packed, 2 channels carry Y component, 1 channel * alternate between Cb and Cr, so both channels need the pixel * value for Y */ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) color.color_r_cr = color.color_g_y; pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color( pipe_ctx->stream_res.tg, &color); } pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data); } static enum dc_status dce110_enable_stream_timing( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) { struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx. pipe_ctx[pipe_ctx->pipe_idx]; struct tg_color black_color = {0}; if (!pipe_ctx_old->stream) { /* program blank color */ color_space_to_black_color(dc, stream->output_color_space, &black_color); pipe_ctx->stream_res.tg->funcs->set_blank_color( pipe_ctx->stream_res.tg, &black_color); /* * Must blank CRTC after disabling power gating and before any * programming, otherwise CRTC will be hung in bad state */ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true); if (false == pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; } if (dc_is_hdmi_tmds_signal(stream->signal)) { stream->link->phy_state.symclk_ref_cnts.otg = 1; if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; else stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON; } pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, &stream->timing, 0, 0, 0, 0, pipe_ctx->stream->signal, true); } if (!pipe_ctx_old->stream) { if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc( pipe_ctx->stream_res.tg)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; } } return DC_OK; } static enum dc_status apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct drr_params params = {0}; unsigned int event_triggers = 0; struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; struct dce_hwseq *hws = dc->hwseq; const struct link_hwss *link_hwss = get_link_hwss( link, &pipe_ctx->link_res); if (hws->funcs.disable_stream_gating) { hws->funcs.disable_stream_gating(dc, pipe_ctx); } if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); link_hwss->setup_audio_output(pipe_ctx, &audio_output, pipe_ctx->stream_res.audio->inst); pipe_ctx->stream_res.audio->funcs->az_configure( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, &audio_output.crtc_info, &pipe_ctx->stream->audio_info); } /* make sure no pipes syncd to the pipe being enabled */ if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic) check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx); pipe_ctx->stream_res.opp->funcs->opp_program_fmt( pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601, stream->timing.display_color_depth, stream->signal); while (odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion( odm_pipe->stream_res.opp, COLOR_SPACE_YCBCR601, stream->timing.display_color_depth, stream->signal); odm_pipe->stream_res.opp->funcs->opp_program_fmt( odm_pipe->stream_res.opp, &stream->bit_depth_params, &stream->clamping); odm_pipe = odm_pipe->next_odm_pipe; } /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. * To do so, move calling function enable_stream_timing to only be done AFTER calling * function core_link_enable_stream */ if (!(hws->wa.dp_hpo_and_otg_sequence && dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))) /* */ /* Do not touch stream timing on seamless boot optimization. */ if (!pipe_ctx->stream->apply_seamless_boot_optimization) hws->funcs.enable_stream_timing(pipe_ctx, context, dc); if (hws->funcs.setup_vupdate_interrupt) hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; if (pipe_ctx->stream_res.tg->funcs->set_drr) pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, &params); // DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) event_triggers = 0x80; /* Event triggers and num frames initialized for DRR, but can be * later updated for PSR use. Note DRR trigger events are generated * regardless of whether num frames met. */ if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) pipe_ctx->stream_res.tg->funcs->set_static_screen_control( pipe_ctx->stream_res.tg, event_triggers, 2); if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); if (dc_is_dp_signal(pipe_ctx->stream->signal)) dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); if (!stream->dpms_off) dc->link_srv->set_dpms_on(context, pipe_ctx); /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. * To do so, move calling function enable_stream_timing to only be done AFTER calling * function core_link_enable_stream */ if (hws->wa.dp_hpo_and_otg_sequence && dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { if (!pipe_ctx->stream->apply_seamless_boot_optimization) hws->funcs.enable_stream_timing(pipe_ctx, context, dc); } pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != NULL; /* Phantom and main stream share the same link (because the stream * is constructed with the same sink). Make sure not to override * and link programming on the main. */ if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false; } return DC_OK; } /******************************************************************************/ static void power_down_encoders(struct dc *dc) { int i; for (i = 0; i < dc->link_count; i++) { enum signal_type signal = dc->links[i]->connector_signal; dc->link_srv->blank_dp_stream(dc->links[i], false); if (signal != SIGNAL_TYPE_EDP) signal = SIGNAL_TYPE_NONE; if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY) dc->links[i]->link_enc->funcs->disable_output( dc->links[i]->link_enc, signal); dc->links[i]->link_status.link_active = false; memset(&dc->links[i]->cur_link_settings, 0, sizeof(dc->links[i]->cur_link_settings)); } } static void power_down_controllers(struct dc *dc) { int i; for (i = 0; i < dc->res_pool->timing_generator_count; i++) { dc->res_pool->timing_generators[i]->funcs->disable_crtc( dc->res_pool->timing_generators[i]); } } static void power_down_clock_sources(struct dc *dc) { int i; if (dc->res_pool->dp_clock_source->funcs->cs_power_down( dc->res_pool->dp_clock_source) == false) dm_error("Failed to power down pll! (dp clk src)\n"); for (i = 0; i < dc->res_pool->clk_src_count; i++) { if (dc->res_pool->clock_sources[i]->funcs->cs_power_down( dc->res_pool->clock_sources[i]) == false) dm_error("Failed to power down pll! (clk src index=%d)\n", i); } } static void power_down_all_hw_blocks(struct dc *dc) { power_down_encoders(dc); power_down_controllers(dc); power_down_clock_sources(dc); if (dc->fbc_compressor) dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); } static void disable_vga_and_power_gate_all_controllers( struct dc *dc) { int i; struct timing_generator *tg; struct dc_context *ctx = dc->ctx; for (i = 0; i < dc->res_pool->timing_generator_count; i++) { tg = dc->res_pool->timing_generators[i]; if (tg->funcs->disable_vga) tg->funcs->disable_vga(tg); } for (i = 0; i < dc->res_pool->pipe_count; i++) { /* Enable CLOCK gating for each pipe BEFORE controller * powergating. */ enable_display_pipe_clock_gating(ctx, true); dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i; dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); } } static void get_edp_streams(struct dc_state *context, struct dc_stream_state **edp_streams, int *edp_stream_num) { int i; *edp_stream_num = 0; for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { edp_streams[*edp_stream_num] = context->streams[i]; if (++(*edp_stream_num) == MAX_NUM_EDP) return; } } } static void get_edp_links_with_sink( struct dc *dc, struct dc_link **edp_links_with_sink, int *edp_with_sink_num) { int i; /* check if there is an eDP panel not in use */ *edp_with_sink_num = 0; for (i = 0; i < dc->link_count; i++) { if (dc->links[i]->local_sink && dc->links[i]->local_sink->sink_signal == SIGNAL_TYPE_EDP) { edp_links_with_sink[*edp_with_sink_num] = dc->links[i]; if (++(*edp_with_sink_num) == MAX_NUM_EDP) return; } } } /* * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need: * 1. Power down all DC HW blocks * 2. Disable VGA engine on all controllers * 3. Enable power gating for controller * 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS) */ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) { struct dc_link *edp_links_with_sink[MAX_NUM_EDP]; struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_stream_state *edp_streams[MAX_NUM_EDP]; struct dc_link *edp_link_with_sink = NULL; struct dc_link *edp_link = NULL; struct dce_hwseq *hws = dc->hwseq; int edp_with_sink_num; int edp_num; int edp_stream_num; int i; bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; DC_LOGGER_INIT(); get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num); dc_get_edp_links(dc, edp_links, &edp_num); if (hws->funcs.init_pipes) hws->funcs.init_pipes(dc, context); get_edp_streams(context, edp_streams, &edp_stream_num); // Check fastboot support, disable on DCE8 because of blank screens if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 && dc->ctx->dce_version != DCE_VERSION_8_1 && dc->ctx->dce_version != DCE_VERSION_8_3) { for (i = 0; i < edp_num; i++) { edp_link = edp_links[i]; if (edp_link != edp_streams[0]->link) continue; // enable fastboot if backend is enabled on eDP if (edp_link->link_enc->funcs->is_dig_enabled && edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && edp_link->link_status.link_active) { struct dc_stream_state *edp_stream = edp_streams[0]; can_apply_edp_fast_boot = dc_validate_boot_timing(dc, edp_stream->sink, &edp_stream->timing); edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; if (can_apply_edp_fast_boot) DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); break; } } // We are trying to enable eDP, don't power down VDD if (can_apply_edp_fast_boot) keep_edp_vdd_on = true; } // Check seamless boot support for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) { can_apply_seamless_boot = true; break; } } /* eDP should not have stream in resume from S4 and so even with VBios post * it should get turned off */ if (edp_with_sink_num) edp_link_with_sink = edp_links_with_sink[0]; if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ hws->funcs.edp_backlight_control(edp_link_with_sink, false); } /*resume from S3, no vbios posting, no need to power down again*/ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); power_down_all_hw_blocks(dc); disable_vga_and_power_gate_all_controllers(dc); if (edp_link_with_sink && !keep_edp_vdd_on) dc->hwss.edp_power_control(edp_link_with_sink, false); clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); } bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); } static uint32_t compute_pstate_blackout_duration( struct bw_fixed blackout_duration, const struct dc_stream_state *stream) { uint32_t total_dest_line_time_ns; uint32_t pstate_blackout_duration_ns; pstate_blackout_duration_ns = 1000 * blackout_duration.value >> 24; total_dest_line_time_ns = 1000000UL * (stream->timing.h_total * 10) / stream->timing.pix_clk_100hz + pstate_blackout_duration_ns; return total_dest_line_time_ns; } static void dce110_set_displaymarks( const struct dc *dc, struct dc_state *context) { uint8_t i, num_pipes; unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; for (i = 0, num_pipes = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; uint32_t total_dest_line_time_ns; if (pipe_ctx->stream == NULL) continue; total_dest_line_time_ns = compute_pstate_blackout_duration( dc->bw_vbios->blackout_duration, pipe_ctx->stream); pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks( pipe_ctx->plane_res.mi, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes], context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes], context->bw_ctx.bw.dce.stutter_entry_wm_ns[num_pipes], context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes], total_dest_line_time_ns); if (i == underlay_idx) { num_pipes++; pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks( pipe_ctx->plane_res.mi, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes], context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes], context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes], total_dest_line_time_ns); } num_pipes++; } } void dce110_set_safe_displaymarks( struct resource_context *res_ctx, const struct resource_pool *pool) { int i; int underlay_idx = pool->underlay_pipe_index; struct dce_watermarks max_marks = { MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK }; struct dce_watermarks nbp_marks = { SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK }; struct dce_watermarks min_marks = { 0, 0, 0, 0}; for (i = 0; i < MAX_PIPES; i++) { if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL) continue; res_ctx->pipe_ctx[i].plane_res.mi->funcs->mem_input_program_display_marks( res_ctx->pipe_ctx[i].plane_res.mi, nbp_marks, max_marks, min_marks, max_marks, MAX_WATERMARK); if (i == underlay_idx) res_ctx->pipe_ctx[i].plane_res.mi->funcs->mem_input_program_chroma_display_marks( res_ctx->pipe_ctx[i].plane_res.mi, nbp_marks, max_marks, max_marks, MAX_WATERMARK); } } /******************************************************************************* * Public functions ******************************************************************************/ static void set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, struct dc_crtc_timing_adjust adjust) { int i = 0; struct drr_params params = {0}; // DRR should set trigger event to monitor surface update event unsigned int event_triggers = 0x80; // Note DRR trigger events are generated regardless of whether num frames met. unsigned int num_frames = 2; params.vertical_total_max = adjust.v_total_max; params.vertical_total_min = adjust.v_total_min; /* TODO: If multiple pipes are to be supported, you need * some GSL stuff. Static screen triggers may be programmed differently * as well. */ for (i = 0; i < num_pipes; i++) { pipe_ctx[i]->stream_res.tg->funcs->set_drr( pipe_ctx[i]->stream_res.tg, &params); if (adjust.v_total_max != 0 && adjust.v_total_min != 0) pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( pipe_ctx[i]->stream_res.tg, event_triggers, num_frames); } } static void get_position(struct pipe_ctx **pipe_ctx, int num_pipes, struct crtc_position *position) { int i = 0; /* TODO: handle pipes > 1 */ for (i = 0; i < num_pipes; i++) pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position); } static void set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_params *params) { unsigned int i; unsigned int triggers = 0; if (params->triggers.overlay_update) triggers |= 0x100; if (params->triggers.surface_update) triggers |= 0x80; if (params->triggers.cursor_update) triggers |= 0x2; if (params->triggers.force_trigger) triggers |= 0x1; if (num_pipes) { struct dc *dc = pipe_ctx[0]->stream->ctx->dc; if (dc->fbc_compressor) triggers |= 0x84; } for (i = 0; i < num_pipes; i++) pipe_ctx[i]->stream_res.tg->funcs-> set_static_screen_control(pipe_ctx[i]->stream_res.tg, triggers, params->num_frames); } /* * Check if FBC can be enabled */ static bool should_enable_fbc(struct dc *dc, struct dc_state *context, uint32_t *pipe_idx) { uint32_t i; struct pipe_ctx *pipe_ctx = NULL; struct resource_context *res_ctx = &context->res_ctx; unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; ASSERT(dc->fbc_compressor); /* FBC memory should be allocated */ if (!dc->ctx->fbc_gpu_addr) return false; /* Only supports single display */ if (context->stream_count != 1) return false; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (res_ctx->pipe_ctx[i].stream) { pipe_ctx = &res_ctx->pipe_ctx[i]; if (!pipe_ctx) continue; /* fbc not applicable on underlay pipe */ if (pipe_ctx->pipe_idx != underlay_idx) { *pipe_idx = i; break; } } } if (i == dc->res_pool->pipe_count) return false; if (!pipe_ctx->stream->link) return false; /* Only supports eDP */ if (pipe_ctx->stream->link->connector_signal != SIGNAL_TYPE_EDP) return false; /* PSR should not be enabled */ if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled) return false; /* Replay should not be enabled */ if (pipe_ctx->stream->link->replay_settings.replay_feature_enabled) return false; /* Nothing to compress */ if (!pipe_ctx->plane_state) return false; /* Only for non-linear tiling */ if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) return false; return true; } /* * Enable FBC */ static void enable_fbc( struct dc *dc, struct dc_state *context) { uint32_t pipe_idx = 0; if (should_enable_fbc(dc, context, &pipe_idx)) { /* Program GRPH COMPRESSED ADDRESS and PITCH */ struct compr_addr_and_pitch_params params = {0, 0, 0}; struct compressor *compr = dc->fbc_compressor; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; params.source_view_width = pipe_ctx->stream->timing.h_addressable; params.source_view_height = pipe_ctx->stream->timing.v_addressable; params.inst = pipe_ctx->stream_res.tg->inst; compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr; compr->funcs->surface_address_and_pitch(compr, &params); compr->funcs->set_fbc_invalidation_triggers(compr, 1); compr->funcs->enable_fbc(compr, &params); } } static void dce110_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context) { int i; /* Reset old context */ /* look up the targets that have been removed since last commit */ for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; /* Note: We need to disable output if clock sources change, * since bios does optimization and doesn't apply if changing * PHY when not already disabled. */ /* Skip underlay pipe since it will be handled in commit surface*/ if (!pipe_ctx_old->stream || pipe_ctx_old->top_pipe) continue; if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { struct clock_source *old_clk = pipe_ctx_old->clock_source; /* Disable if new stream is null. O/w, if stream is * disabled already, no need to disable again. */ if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) { dc->link_srv->set_dpms_off(pipe_ctx_old); /* free acquired resources*/ if (pipe_ctx_old->stream_res.audio) { /*disable az_endpoint*/ pipe_ctx_old->stream_res.audio->funcs-> az_disable(pipe_ctx_old->stream_res.audio); /*free audio*/ if (dc->caps.dynamic_audio == true) { /*we have to dynamic arbitrate the audio endpoints*/ /*we free the resource, need reset is_audio_acquired*/ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx_old->stream_res.audio, false); pipe_ctx_old->stream_res.audio = NULL; } } } pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true); if (!hwss_wait_for_blank_complete(pipe_ctx_old->stream_res.tg)) { dm_error("DC: failed to blank crtc!\n"); BREAK_TO_DEBUGGER(); } pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg); pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0; pipe_ctx_old->plane_res.mi->funcs->free_mem_input( pipe_ctx_old->plane_res.mi, dc->current_state->stream_count); if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx, dc->res_pool, old_clk)) old_clk->funcs->cs_power_down(old_clk); dc->hwss.disable_plane(dc, pipe_ctx_old); pipe_ctx_old->stream = NULL; } } } static void dce110_setup_audio_dto( struct dc *dc, struct dc_state *context) { int i; /* program audio wall clock. use HDMI as clock source if HDMI * audio active. Otherwise, use DP as clock source * first, loop to find any HDMI audio, if not, loop find DP audio */ /* Setup audio rate clock source */ /* Issue: * Audio lag happened on DP monitor when unplug a HDMI monitor * * Cause: * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL * is set to either dto0 or dto1, audio should work fine. * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1, * set to dto0 will cause audio lag. * * Solution: * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx, * find first available pipe with audio, setup audio wall DTO per topology * instead of per pipe. */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream == NULL) continue; if (pipe_ctx->top_pipe) continue; if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) continue; if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) { struct dtbclk_dto_params dto_params = {0}; dc->res_pool->dccg->funcs->set_audio_dtbclk_dto( dc->res_pool->dccg, &dto_params); pipe_ctx->stream_res.audio->funcs->wall_dto_setup( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, &audio_output.crtc_info, &audio_output.pll_info); } else pipe_ctx->stream_res.audio->funcs->wall_dto_setup( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, &audio_output.crtc_info, &audio_output.pll_info); break; } } /* no HDMI audio is found, try DP audio */ if (i == dc->res_pool->pipe_count) { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream == NULL) continue; if (pipe_ctx->top_pipe) continue; if (!dc_is_dp_signal(pipe_ctx->stream->signal)) continue; if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); pipe_ctx->stream_res.audio->funcs->wall_dto_setup( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, &audio_output.crtc_info, &audio_output.pll_info); break; } } } } enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context) { struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status status; int i; /* reset syncd pipes from disabled pipes */ if (dc->config.use_pipe_ctx_sync_logic) reset_syncd_pipes_from_disabled_pipes(dc, context); /* Reset old context */ /* look up the targets that have been removed since last commit */ hws->funcs.reset_hw_ctx_wrap(dc, context); /* Skip applying if no targets */ if (context->stream_count <= 0) return DC_OK; /* Apply new context */ dcb->funcs->set_scratch_critical_state(dcb, true); /* below is for real asic only */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe) continue; if (pipe_ctx->stream == pipe_ctx_old->stream) { if (pipe_ctx_old->clock_source != pipe_ctx->clock_source) dce_crtc_switch_to_clk_src(dc->hwseq, pipe_ctx->clock_source, i); continue; } hws->funcs.enable_display_power_gating( dc, i, dc->ctx->dc_bios, PIPE_GATING_CONTROL_DISABLE); } if (dc->fbc_compressor) dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); dce110_setup_audio_dto(dc, context); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream == NULL) continue; if (pipe_ctx->stream == pipe_ctx_old->stream && pipe_ctx->stream->link->link_state_valid) { continue; } if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) continue; if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) continue; status = apply_single_controller_ctx_to_hw( pipe_ctx, context, dc); if (DC_OK != status) return status; #ifdef CONFIG_DRM_AMD_DC_FP if (hws->funcs.resync_fifo_dccg_dio) hws->funcs.resync_fifo_dccg_dio(hws, dc, context); #endif } if (dc->fbc_compressor) enable_fbc(dc, dc->current_state); dcb->funcs->set_scratch_critical_state(dcb, false); return DC_OK; } /******************************************************************************* * Front End programming ******************************************************************************/ static void set_default_colors(struct pipe_ctx *pipe_ctx) { struct default_adjustment default_adjust = { 0 }; default_adjust.force_hw_default = false; default_adjust.in_color_space = pipe_ctx->plane_state->color_space; default_adjust.out_color_space = pipe_ctx->stream->output_color_space; default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW; default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format; /* display color depth */ default_adjust.color_depth = pipe_ctx->stream->timing.display_color_depth; /* Lb color depth */ default_adjust.lb_color_depth = pipe_ctx->plane_res.scl_data.lb_params.depth; pipe_ctx->plane_res.xfm->funcs->opp_set_csc_default( pipe_ctx->plane_res.xfm, &default_adjust); } /******************************************************************************* * In order to turn on/off specific surface we will program * Blender + CRTC * * In case that we have two surfaces and they have a different visibility * we can't turn off the CRTC since it will turn off the entire display * * |----------------------------------------------- | * |bottom pipe|curr pipe | | | * |Surface |Surface | Blender | CRCT | * |visibility |visibility | Configuration| | * |------------------------------------------------| * | off | off | CURRENT_PIPE | blank | * | off | on | CURRENT_PIPE | unblank | * | on | off | OTHER_PIPE | unblank | * | on | on | BLENDING | unblank | * -------------------------------------------------| * ******************************************************************************/ static void program_surface_visibility(const struct dc *dc, struct pipe_ctx *pipe_ctx) { enum blnd_mode blender_mode = BLND_MODE_CURRENT_PIPE; bool blank_target = false; if (pipe_ctx->bottom_pipe) { /* For now we are supporting only two pipes */ ASSERT(pipe_ctx->bottom_pipe->bottom_pipe == NULL); if (pipe_ctx->bottom_pipe->plane_state->visible) { if (pipe_ctx->plane_state->visible) blender_mode = BLND_MODE_BLENDING; else blender_mode = BLND_MODE_OTHER_PIPE; } else if (!pipe_ctx->plane_state->visible) blank_target = true; } else if (!pipe_ctx->plane_state->visible) blank_target = true; dce_set_blender_mode(dc->hwseq, pipe_ctx->stream_res.tg->inst, blender_mode); pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target); } static void program_gamut_remap(struct pipe_ctx *pipe_ctx) { int i = 0; struct xfm_grph_csc_adjustment adjust; memset(&adjust, 0, sizeof(adjust)); adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) adjust.temperature_matrix[i] = pipe_ctx->stream->gamut_remap_matrix.matrix[i]; } pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); } static void update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) { struct dc_plane_state *plane_state = pipe_ctx->plane_state; if (plane_state == NULL) return; pipe_ctx->plane_res.mi->funcs->mem_input_program_surface_flip_and_addr( pipe_ctx->plane_res.mi, &plane_state->address, plane_state->flip_immediate); plane_state->status.requested_address = plane_state->address; } static void dce110_update_pending_status(struct pipe_ctx *pipe_ctx) { struct dc_plane_state *plane_state = pipe_ctx->plane_state; if (plane_state == NULL) return; plane_state->status.is_flip_pending = pipe_ctx->plane_res.mi->funcs->mem_input_is_flip_pending( pipe_ctx->plane_res.mi); if (plane_state->status.is_flip_pending && !plane_state->visible) pipe_ctx->plane_res.mi->current_address = pipe_ctx->plane_res.mi->request_address; plane_state->status.current_address = pipe_ctx->plane_res.mi->current_address; if (pipe_ctx->plane_res.mi->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO && pipe_ctx->stream_res.tg->funcs->is_stereo_left_eye) { plane_state->status.is_right_eye =\ !pipe_ctx->stream_res.tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg); } } void dce110_power_down(struct dc *dc) { power_down_all_hw_blocks(dc); disable_vga_and_power_gate_all_controllers(dc); } static bool wait_for_reset_trigger_to_occur( struct dc_context *dc_ctx, struct timing_generator *tg) { bool rc = false; /* To avoid endless loop we wait at most * frames_to_wait_on_triggered_reset frames for the reset to occur. */ const uint32_t frames_to_wait_on_triggered_reset = 10; uint32_t i; for (i = 0; i < frames_to_wait_on_triggered_reset; i++) { if (!tg->funcs->is_counter_moving(tg)) { DC_ERROR("TG counter is not moving!\n"); break; } if (tg->funcs->did_triggered_reset_occur(tg)) { rc = true; /* usually occurs at i=1 */ DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n", i); break; } /* Wait for one frame. */ tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE); tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK); } if (false == rc) DC_ERROR("GSL: Timeout on reset trigger!\n"); return rc; } /* Enable timing synchronization for a group of Timing Generators. */ static void dce110_enable_timing_synchronization( struct dc *dc, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]) { struct dc_context *dc_ctx = dc->ctx; struct dcp_gsl_params gsl_params = { 0 }; int i; DC_SYNC_INFO("GSL: Setting-up...\n"); /* Designate a single TG in the group as a master. * Since HW doesn't care which one, we always assign * the 1st one in the group. */ gsl_params.gsl_group = 0; gsl_params.gsl_master = grouped_pipes[0]->stream_res.tg->inst; for (i = 0; i < group_size; i++) grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock( grouped_pipes[i]->stream_res.tg, &gsl_params); /* Reset slave controllers on master VSync */ DC_SYNC_INFO("GSL: enabling trigger-reset\n"); for (i = 1 /* skip the master */; i < group_size; i++) grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger( grouped_pipes[i]->stream_res.tg, gsl_params.gsl_group); for (i = 1 /* skip the master */; i < group_size; i++) { DC_SYNC_INFO("GSL: waiting for reset to occur.\n"); wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg); grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger( grouped_pipes[i]->stream_res.tg); } /* GSL Vblank synchronization is a one time sync mechanism, assumption * is that the sync'ed displays will not drift out of sync over time*/ DC_SYNC_INFO("GSL: Restoring register states.\n"); for (i = 0; i < group_size; i++) grouped_pipes[i]->stream_res.tg->funcs->tear_down_global_swap_lock(grouped_pipes[i]->stream_res.tg); DC_SYNC_INFO("GSL: Set-up complete.\n"); } static void dce110_enable_per_frame_crtc_position_reset( struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]) { struct dc_context *dc_ctx = dc->ctx; struct dcp_gsl_params gsl_params = { 0 }; int i; gsl_params.gsl_group = 0; gsl_params.gsl_master = 0; for (i = 0; i < group_size; i++) grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock( grouped_pipes[i]->stream_res.tg, &gsl_params); DC_SYNC_INFO("GSL: enabling trigger-reset\n"); for (i = 1; i < group_size; i++) grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset( grouped_pipes[i]->stream_res.tg, gsl_params.gsl_master, &grouped_pipes[i]->stream->triggered_crtc_reset); DC_SYNC_INFO("GSL: waiting for reset to occur.\n"); for (i = 1; i < group_size; i++) wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg); for (i = 0; i < group_size; i++) grouped_pipes[i]->stream_res.tg->funcs->tear_down_global_swap_lock(grouped_pipes[i]->stream_res.tg); } static void init_pipes(struct dc *dc, struct dc_state *context) { // Do nothing } static void init_hw(struct dc *dc) { int i; struct dc_bios *bp; struct transform *xfm; struct abm *abm; struct dmcu *dmcu; struct dce_hwseq *hws = dc->hwseq; uint32_t backlight = MAX_BACKLIGHT_LEVEL; bp = dc->ctx->dc_bios; for (i = 0; i < dc->res_pool->pipe_count; i++) { xfm = dc->res_pool->transforms[i]; xfm->funcs->transform_reset(xfm); hws->funcs.enable_display_power_gating( dc, i, bp, PIPE_GATING_CONTROL_INIT); hws->funcs.enable_display_power_gating( dc, i, bp, PIPE_GATING_CONTROL_DISABLE); hws->funcs.enable_display_pipe_clock_gating( dc->ctx, true); } dce_clock_gating_power_up(dc->hwseq, false); /***************************************/ for (i = 0; i < dc->link_count; i++) { /****************************************/ /* Power up AND update implementation according to the * required signal (which may be different from the * default signal on connector). */ struct dc_link *link = dc->links[i]; link->link_enc->funcs->hw_init(link->link_enc); } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; tg->funcs->disable_vga(tg); /* Blank controller using driver code instead of * command table. */ tg->funcs->set_blank(tg, true); hwss_wait_for_blank_complete(tg); } for (i = 0; i < dc->res_pool->audio_count; i++) { struct audio *audio = dc->res_pool->audios[i]; audio->funcs->hw_init(audio); } for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; if (link->panel_cntl) backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); } abm = dc->res_pool->abm; if (abm != NULL) abm->funcs->abm_init(abm, backlight); dmcu = dc->res_pool->dmcu; if (dmcu != NULL && abm != NULL) abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); if (dc->fbc_compressor) dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor); } void dce110_prepare_bandwidth( struct dc *dc, struct dc_state *context) { struct clk_mgr *dccg = dc->clk_mgr; dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); if (dccg) dccg->funcs->update_clocks( dccg, context, false); } void dce110_optimize_bandwidth( struct dc *dc, struct dc_state *context) { struct clk_mgr *dccg = dc->clk_mgr; dce110_set_displaymarks(dc, context); if (dccg) dccg->funcs->update_clocks( dccg, context, true); } static void dce110_program_front_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx) { struct mem_input *mi = pipe_ctx->plane_res.mi; struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; unsigned int i; struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); memset(&adjust, 0, sizeof(adjust)); adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; dce_enable_fe_clock(dc->hwseq, mi->inst, true); set_default_colors(pipe_ctx); if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { tbl_entry.color_space = pipe_ctx->stream->output_color_space; for (i = 0; i < 12; i++) tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i]; pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment (pipe_ctx->plane_res.xfm, &tbl_entry); } if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) adjust.temperature_matrix[i] = pipe_ctx->stream->gamut_remap_matrix.matrix[i]; } pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != NULL; program_scaler(dc, pipe_ctx); mi->funcs->mem_input_program_surface_config( mi, plane_state->format, &plane_state->tiling_info, &plane_state->plane_size, plane_state->rotation, NULL, false); if (mi->funcs->set_blank) mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible); if (dc->config.gpu_vm_support) mi->funcs->mem_input_program_pte_vm( pipe_ctx->plane_res.mi, plane_state->format, &plane_state->tiling_info, plane_state->rotation); /* Moved programming gamma from dc to hwss */ if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); if (pipe_ctx->plane_state->update_flags.bits.full_update) hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); DC_LOG_SURFACE( "Pipe:%d %p: addr hi:0x%x, " "addr low:0x%x, " "src: %d, %d, %d," " %d; dst: %d, %d, %d, %d;" "clip: %d, %d, %d, %d\n", pipe_ctx->pipe_idx, (void *) pipe_ctx->plane_state, pipe_ctx->plane_state->address.grph.addr.high_part, pipe_ctx->plane_state->address.grph.addr.low_part, pipe_ctx->plane_state->src_rect.x, pipe_ctx->plane_state->src_rect.y, pipe_ctx->plane_state->src_rect.width, pipe_ctx->plane_state->src_rect.height, pipe_ctx->plane_state->dst_rect.x, pipe_ctx->plane_state->dst_rect.y, pipe_ctx->plane_state->dst_rect.width, pipe_ctx->plane_state->dst_rect.height, pipe_ctx->plane_state->clip_rect.x, pipe_ctx->plane_state->clip_rect.y, pipe_ctx->plane_state->clip_rect.width, pipe_ctx->plane_state->clip_rect.height); DC_LOG_SURFACE( "Pipe %d: width, height, x, y\n" "viewport:%d, %d, %d, %d\n" "recout: %d, %d, %d, %d\n", pipe_ctx->pipe_idx, pipe_ctx->plane_res.scl_data.viewport.width, pipe_ctx->plane_res.scl_data.viewport.height, pipe_ctx->plane_res.scl_data.viewport.x, pipe_ctx->plane_res.scl_data.viewport.y, pipe_ctx->plane_res.scl_data.recout.width, pipe_ctx->plane_res.scl_data.recout.height, pipe_ctx->plane_res.scl_data.recout.x, pipe_ctx->plane_res.scl_data.recout.y); } static void dce110_apply_ctx_for_surface( struct dc *dc, const struct dc_stream_state *stream, int num_planes, struct dc_state *context) { int i; if (num_planes == 0) return; if (dc->fbc_compressor) dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream != stream) continue; /* Need to allocate mem before program front end for Fiji */ pipe_ctx->plane_res.mi->funcs->allocate_mem_input( pipe_ctx->plane_res.mi, pipe_ctx->stream->timing.h_total, pipe_ctx->stream->timing.v_total, pipe_ctx->stream->timing.pix_clk_100hz / 10, context->stream_count); dce110_program_front_end_for_pipe(dc, pipe_ctx); dc->hwss.update_plane_addr(dc, pipe_ctx); program_surface_visibility(dc, pipe_ctx); } if (dc->fbc_compressor) enable_fbc(dc, context); } static void dce110_post_unlock_program_front_end( struct dc *dc, struct dc_state *context) { } static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; int fe_idx = pipe_ctx->plane_res.mi ? pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx; /* Do not power down fe when stream is active on dce*/ if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream) return; hws->funcs.enable_display_power_gating( dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE); dc->res_pool->transforms[fe_idx]->funcs->transform_reset( dc->res_pool->transforms[fe_idx]); } static void dce110_wait_for_mpcc_disconnect( struct dc *dc, struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx) { /* do nothing*/ } static void program_output_csc(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, uint16_t *matrix, int opp_id) { int i; struct out_csc_color_matrix tbl_entry; if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { enum dc_color_space color_space = pipe_ctx->stream->output_color_space; for (i = 0; i < 12; i++) tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i]; tbl_entry.color_space = color_space; pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment( pipe_ctx->plane_res.xfm, &tbl_entry); } } static void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; struct mem_input *mi = pipe_ctx->plane_res.mi; struct dc_cursor_mi_param param = { .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10, .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.xtalin_clock_inKhz, .viewport = pipe_ctx->plane_res.scl_data.viewport, .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, .rotation = pipe_ctx->plane_state->rotation, .mirror = pipe_ctx->plane_state->horizontal_mirror }; /** * If the cursor's source viewport is clipped then we need to * translate the cursor to appear in the correct position on * the screen. * * This translation isn't affected by scaling so it needs to be * done *after* we adjust the position for the scale factor. * * This is only done by opt-in for now since there are still * some usecases like tiled display that might enable the * cursor on both streams while expecting dc to clip it. */ if (pos_cpy.translate_by_source) { pos_cpy.x += pipe_ctx->plane_state->src_rect.x; pos_cpy.y += pipe_ctx->plane_state->src_rect.y; } if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) pos_cpy.enable = false; if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) pos_cpy.enable = false; if (ipp->funcs->ipp_cursor_set_position) ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param); if (mi->funcs->set_cursor_position) mi->funcs->set_cursor_position(mi, &pos_cpy, &param); } static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) { struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; if (pipe_ctx->plane_res.ipp && pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes) pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( pipe_ctx->plane_res.ipp, attributes); if (pipe_ctx->plane_res.mi && pipe_ctx->plane_res.mi->funcs->set_cursor_attributes) pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( pipe_ctx->plane_res.mi, attributes); if (pipe_ctx->plane_res.xfm && pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes) pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( pipe_ctx->plane_res.xfm, attributes); } bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { struct dc_link *link = pipe_ctx->stream->link; struct dc *dc = link->ctx->dc; struct abm *abm = pipe_ctx->stream_res.abm; struct panel_cntl *panel_cntl = link->panel_cntl; struct dmcu *dmcu = dc->res_pool->dmcu; bool fw_set_brightness = true; /* DMCU -1 for all controller id values, * therefore +1 here */ uint32_t controller_id = pipe_ctx->stream_res.tg->inst + 1; if (abm == NULL || panel_cntl == NULL || (abm->funcs->set_backlight_level_pwm == NULL)) return false; if (dmcu) fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); if (!fw_set_brightness && panel_cntl->funcs->driver_set_backlight) panel_cntl->funcs->driver_set_backlight(panel_cntl, backlight_pwm_u16_16); else abm->funcs->set_backlight_level_pwm( abm, backlight_pwm_u16_16, frame_ramp, controller_id, link->panel_cntl->inst); return true; } void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) { struct abm *abm = pipe_ctx->stream_res.abm; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; if (abm) abm->funcs->set_abm_immediate_disable(abm, pipe_ctx->stream->link->panel_cntl->inst); if (panel_cntl) panel_cntl->funcs->store_backlight_level(panel_cntl); } void dce110_set_pipe(struct pipe_ctx *pipe_ctx) { struct abm *abm = pipe_ctx->stream_res.abm; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; uint32_t otg_inst = pipe_ctx->stream_res.tg->inst + 1; if (abm && panel_cntl) abm->funcs->set_pipe(abm, otg_inst, panel_cntl->inst); } void dce110_enable_lvds_link_output(struct dc_link *link, const struct link_resource *link_res, enum clock_source_id clock_source, uint32_t pixel_clock) { link->link_enc->funcs->enable_lvds_output( link->link_enc, clock_source, pixel_clock); link->phy_state.symclk_state = SYMCLK_ON_TX_ON; } void dce110_enable_tmds_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, enum dc_color_depth color_depth, uint32_t pixel_clock) { link->link_enc->funcs->enable_tmds_output( link->link_enc, clock_source, color_depth, signal, pixel_clock); link->phy_state.symclk_state = SYMCLK_ON_TX_ON; } void dce110_enable_dp_link_output( struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; const struct link_hwss *link_hwss = get_link_hwss(link, link_res); unsigned int i; /* * Add the logic to extract BOTH power up and power down sequences * from enable/disable link output and only call edp panel control * in enable_link_dp and disable_link_dp once. */ if (link->connector_signal == SIGNAL_TYPE_EDP) { link->dc->hwss.edp_wait_for_hpd_ready(link, true); } /* If the current pixel clock source is not DTO(happens after * switching from HDMI passive dongle to DP on the same connector), * switch the pixel clock source to DTO. */ for (i = 0; i < MAX_PIPES; i++) { if (pipes[i].stream != NULL && pipes[i].stream->link == link) { if (pipes[i].clock_source != NULL && pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { pipes[i].clock_source = dp_cs; pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz = pipes[i].stream->timing.pix_clk_100hz; pipes[i].clock_source->funcs->program_pix_clk( pipes[i].clock_source, &pipes[i].stream_res.pix_clk_params, dc->link_srv->dp_get_encoding_format(link_settings), &pipes[i].pll_settings); } } } if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { if (dc->clk_mgr->funcs->notify_link_rate_change) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); } if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); if (link_hwss->ext.enable_dp_link_output) link_hwss->ext.enable_dp_link_output(link, link_res, signal, clock_source, link_settings); link->phy_state.symclk_state = SYMCLK_ON_TX_ON; if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); } void dce110_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { struct dc *dc = link->ctx->dc; const struct link_hwss *link_hwss = get_link_hwss(link, link_res); struct dmcu *dmcu = dc->res_pool->dmcu; if (signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, false); else if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); link_hwss->disable_link_output(link, link_res, signal); link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; /* * Add the logic to extract BOTH power up and power down sequences * from enable/disable link output and only call edp panel control * in enable_link_dp and disable_link_dp once. */ if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); } static const struct hw_sequencer_funcs dce110_funcs = { .program_gamut_remap = program_gamut_remap, .program_output_csc = program_output_csc, .init_hw = init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, .post_unlock_program_front_end = dce110_post_unlock_program_front_end, .update_plane_addr = update_plane_addr, .update_pending_status = dce110_update_pending_status, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dce110_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset, .update_info_frame = dce110_update_info_frame, .enable_stream = dce110_enable_stream, .disable_stream = dce110_disable_stream, .unblank_stream = dce110_unblank_stream, .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dce110_power_down_fe, .pipe_control_lock = dce_pipe_control_lock, .interdependent_update_lock = NULL, .cursor_lock = dce_pipe_control_lock, .prepare_bandwidth = dce110_prepare_bandwidth, .optimize_bandwidth = dce110_optimize_bandwidth, .set_drr = set_drr, .get_position = get_position, .set_static_screen_control = set_static_screen_control, .setup_stereo = NULL, .set_avmute = dce110_set_avmute, .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dce110_set_cursor_position, .set_cursor_attribute = dce110_set_cursor_attribute, .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_pipe = dce110_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, }; static const struct hwseq_private_funcs dce110_private_funcs = { .init_pipes = init_pipes, .update_plane_addr = update_plane_addr, .set_input_transfer_func = dce110_set_input_transfer_func, .set_output_transfer_func = dce110_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, .enable_display_power_gating = dce110_enable_display_power_gating, .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, .enable_stream_timing = dce110_enable_stream_timing, .disable_stream_gating = NULL, .enable_stream_gating = NULL, .edp_backlight_control = dce110_edp_backlight_control, }; void dce110_hw_sequencer_construct(struct dc *dc) { dc->hwss = dce110_funcs; dc->hwseq->funcs = dce110_private_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "dce110/dce110_resource.h" #include "include/irq_service_interface.h" #include "dce/dce_audio.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" #include "dce110/dce110_timing_generator_v.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_mem_input.h" #include "dce110/dce110_mem_input_v.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce110/dce110_transform_v.h" #include "dce/dce_opp.h" #include "dce110/dce110_opp_v.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_i2c.h" #include "dce/dce_panel_cntl.h" #define DC_LOGGER \ dc->ctx->logger #include "dce110/dce110_compressor.h" #include "reg_helper.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_8_2_d.h" #include "gmc/gmc_8_2_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_BS_SR_SWAP_CNTL #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x4ABC #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC #endif #ifndef DPHY_RX_FAST_TRAINING_CAPABLE #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1 #endif static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE110_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_panel_cntl_registers panel_cntl_regs[] = { { DCE_PANEL_CNTL_REG_LIST() } }; static const struct dce_panel_cntl_shift panel_cntl_shift = { DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dce_panel_cntl_mask panel_cntl_mask = { DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) }; static const struct dce110_aux_registers_shift aux_shift = { DCE_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCE_AUX_MASK_SH_LIST(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_110_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; /* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */ #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_100_110(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps carrizo_resource_cap = { .num_timing_generator = 3, .num_video_plane = 1, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, .num_ddc = 3, }; static const struct resource_caps stoney_resource_cap = { .num_timing_generator = 2, .num_video_plane = 1, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, .num_ddc = 3, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .per_pixel_alpha = 1, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 }, 64, 64 }; static const struct dc_debug_options debug_defaults = { .enable_legacy_fast_update = true, }; static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .per_pixel_alpha = 1, .pixel_format_support = { .argb8888 = false, .nv12 = true, .fp16 = false }, .max_upscale_factor = { .argb8888 = 1, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 1, .nv12 = 250, .fp16 = 1 }, 64, 64 }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x4819 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static int map_transmitter_id_to_phy_instance( enum transmitter transmitter) { switch (transmitter) { case TRANSMITTER_UNIPHY_A: return 0; case TRANSMITTER_UNIPHY_B: return 1; case TRANSMITTER_UNIPHY_C: return 2; case TRANSMITTER_UNIPHY_D: return 3; case TRANSMITTER_UNIPHY_E: return 4; case TRANSMITTER_UNIPHY_F: return 5; case TRANSMITTER_UNIPHY_G: return 6; default: ASSERT(0); return 0; } } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce110_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce110_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct stream_encoder *dce110_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_stoney_reg = { HWSEQ_ST_REG_LIST() }; static const struct dce_hwseq_registers hwseq_cz_reg = { HWSEQ_CZ_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE11_MASK_SH_LIST(__SHIFT), }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE11_MASK_SH_LIST(_MASK), }; static struct dce_hwseq *dce110_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ? &hwseq_stoney_reg : &hwseq_cz_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; hws->wa.blnd_crtc_trigger = true; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce110_stream_encoder_create, .create_hwseq = dce110_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE11_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE11_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE11_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce110_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 3; return &dce_mi->base; } static void dce110_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce110_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); return &transform->base; } static struct input_pixel_processor *dce110_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; static struct link_encoder *dce110_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; if (!enc110) return NULL; link_regs_id = map_transmitter_id_to_phy_instance(enc_init_data->transmitter); dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } static struct output_pixel_processor *dce110_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } static struct dce_aux *dce110_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; static struct dce_i2c_hw *dce110_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct clock_source *dce110_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static void dce110_clock_source_destroy(struct clock_source **clk_src) { struct dce110_clk_src *dce110_clk_src; if (!clk_src) return; dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src); kfree(dce110_clk_src->dp_ss_params); kfree(dce110_clk_src->hdmi_ss_params); kfree(dce110_clk_src->dvi_ss_params); kfree(dce110_clk_src); *clk_src = NULL; } static void dce110_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce110_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dce110_clock_source_destroy(&pool->base.clock_sources[i]); } } if (pool->base.dp_clock_source != NULL) dce110_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) { dce_aud_destroy(&pool->base.audios[i]); } } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } static void get_pixel_clock_parameters( const struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; /*TODO: is this halved for YCbCr 420? in that case we might want to move * the pixel clock normalization for hdmi up to here instead of doing it * in pll_adjust_pix_clk */ pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420); pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { pixel_clk_params->color_depth = COLOR_DEPTH_888; } if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; } if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; } static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx) { if (pipe_ctx->pipe_idx != underlay_idx) return true; if (!pipe_ctx->plane_state) return false; if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; return true; } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; if (!is_surface_pixel_format_supported(pipe_ctx, dc->res_pool->underlay_pipe_index)) return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED; dce110_resource_build_pipe_hw_param(pipe_ctx); /* TODO: validate audio ASIC caps, encoder */ resource_build_info_frame(pipe_ctx); return DC_OK; } static bool dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { bool result = false; DC_LOG_BANDWIDTH_CALCS( "%s: start", __func__); if (bw_calcs( dc->ctx, dc->bw_dceip, dc->bw_vbios, context->res_ctx.pipe_ctx, dc->res_pool->pipe_count, &context->bw_ctx.bw.dce)) result = true; if (!result) DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n", __func__, context->streams[0]->timing.h_addressable, context->streams[0]->timing.v_addressable, context->streams[0]->timing.pix_clk_100hz / 10); if (memcmp(&dc->current_state->bw_ctx.bw.dce, &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { DC_LOG_BANDWIDTH_CALCS( "%s: finish,\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" , __func__, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_mode_enable, context->bw_ctx.bw.dce.cpuc_state_change_enable, context->bw_ctx.bw.dce.cpup_state_change_enable, context->bw_ctx.bw.dce.nbp_state_change_enable, context->bw_ctx.bw.dce.all_displays_in_sync, context->bw_ctx.bw.dce.dispclk_khz, context->bw_ctx.bw.dce.sclk_khz, context->bw_ctx.bw.dce.sclk_deep_sleep_khz, context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } return result; } static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static bool dce110_validate_surface_sets( struct dc_state *context) { int i, j; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 2) return false; for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; /* underlay validation */ if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { if ((plane->src_rect.width > 1920 || plane->src_rect.height > 1080)) return false; /* we don't have the logic to support underlay * only yet so block the use case where we get * NV12 plane as top layer */ if (j == 0) return false; /* irrespective of plane format, * stream should be RGB encoded */ if (context->streams[i]->timing.pixel_encoding != PIXEL_ENCODING_RGB) return false; } } } return true; } static enum dc_status dce110_validate_global( struct dc *dc, struct dc_state *context) { if (!dce110_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dce110_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static struct pipe_ctx *dce110_acquire_underlay( const struct dc_state *cur_ctx, struct dc_state *new_ctx, const struct resource_pool *pool, const struct pipe_ctx *opp_head_pipe) { struct dc_stream_state *stream = opp_head_pipe->stream; struct dc *dc = stream->ctx->dc; struct dce_hwseq *hws = dc->hwseq; struct resource_context *res_ctx = &new_ctx->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; if (res_ctx->pipe_ctx[underlay_idx].stream) return NULL; pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx]; pipe_ctx->plane_res.mi = pool->mis[underlay_idx]; /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/ pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx]; pipe_ctx->stream_res.opp = pool->opps[underlay_idx]; pipe_ctx->pipe_idx = underlay_idx; pipe_ctx->stream = stream; if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) { struct tg_color black_color = {0}; struct dc_bios *dcb = dc->ctx->dc_bios; hws->funcs.enable_display_power_gating( dc, pipe_ctx->stream_res.tg->inst, dcb, PIPE_GATING_CONTROL_DISABLE); /* * This is for powering on underlay, so crtc does not * need to be enabled */ pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg, &stream->timing, 0, 0, 0, 0, pipe_ctx->stream->signal, false); pipe_ctx->stream_res.tg->funcs->enable_advanced_request( pipe_ctx->stream_res.tg, true, &stream->timing); pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi, stream->timing.h_total, stream->timing.v_total, stream->timing.pix_clk_100hz / 10, new_ctx->stream_count); color_space_to_black_color(dc, COLOR_SPACE_YCBCR601, &black_color); pipe_ctx->stream_res.tg->funcs->set_blank_color( pipe_ctx->stream_res.tg, &black_color); } return pipe_ctx; } static void dce110_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); dce110_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * For CZ and later, we can allow DIG FE and BE to differ for all display types */ if (j >= 0) return pool->stream_enc[j]; return NULL; } static const struct resource_funcs dce110_res_pool_funcs = { .destroy = dce110_destroy_resource_pool, .link_enc_create = dce110_link_encoder_create, .panel_cntl_create = dce110_panel_cntl_create, .validate_bandwidth = dce110_validate_bandwidth, .validate_plane = dce110_validate_plane, .acquire_free_pipe_as_secondary_dpp_pipe = dce110_acquire_underlay, .add_stream_to_ctx = dce110_add_stream_to_ctx, .validate_global = dce110_validate_global, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) { struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv), GFP_KERNEL); struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv), GFP_KERNEL); struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv), GFP_KERNEL); struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), GFP_KERNEL); if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) { kfree(dce110_tgv); kfree(dce110_xfmv); kfree(dce110_miv); kfree(dce110_oppv); return false; } dce110_opp_v_construct(dce110_oppv, ctx); dce110_timing_generator_v_construct(dce110_tgv, ctx); dce110_mem_input_v_construct(dce110_miv, ctx); dce110_transform_v_construct(dce110_xfmv, ctx); pool->opps[pool->pipe_count] = &dce110_oppv->base; pool->timing_generators[pool->pipe_count] = &dce110_tgv->base; pool->mis[pool->pipe_count] = &dce110_miv->base; pool->transforms[pool->pipe_count] = &dce110_xfmv->base; pool->pipe_count++; /* update the public caps to indicate an underlay is available */ ctx->dc->caps.max_slave_planes = 1; ctx->dc->caps.max_slave_yuv_planes = 1; ctx->dc->caps.max_slave_rgb_planes = 0; return true; } static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels clks = {0}; /*do system clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &clks); /* convert all the clock fro kHz to fix point mHz */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels/8], 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*2/8], 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*3/8], 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*4/8], 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*5/8], 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*6/8], 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); dc->sclk_lvls = clks; /*do display clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clks); dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1], 1000); dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); /*do memory clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); } static const struct resource_caps *dce110_resource_cap( struct hw_asic_id *asic_id) { if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev)) return &stoney_resource_cap; else return &carrizo_resource_cap; } static bool dce110_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool, struct hw_asic_id asic_id) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = dce110_resource_cap(&ctx->asic_id); pool->base.funcs = &dce110_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.pipe_count = pool->base.res_cap->num_timing_generator; pool->base.underlay_pipe_index = pool->base.pipe_count; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 150; dc->caps.i2c_speed_in_khz = 40; dc->caps.i2c_speed_in_khz_hdcp = 40; dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clk_src_count = 2; /* TODO: find out if CZ support 3 PLLs */ } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce110_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce110_timing_generator_create( ctx, i, &dce110_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce110_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce110_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce110_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce110_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce110_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } if (dc->config.fbc_support) dc->fbc_compressor = dce110_compressor_create(ctx); if (!underlay_create(ctx, &pool->base)) goto res_create_fail; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce110_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < pool->base.underlay_pipe_index; ++i) dc->caps.planes[i] = plane_cap; dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap; bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; res_create_fail: dce110_resource_destruct(pool); return false; } struct resource_pool *dce110_create_resource_pool( uint8_t num_virtual_links, struct dc *dc, struct hw_asic_id asic_id) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce110_transform_v.h" #include "dm_services.h" #include "dc.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #define SCLV_PHASES 64 #define DC_LOGGER \ xfm->ctx->logger struct sclv_ratios_inits { uint32_t h_int_scale_ratio_luma; uint32_t h_int_scale_ratio_chroma; uint32_t v_int_scale_ratio_luma; uint32_t v_int_scale_ratio_chroma; struct init_int_and_frac h_init_luma; struct init_int_and_frac h_init_chroma; struct init_int_and_frac v_init_luma; struct init_int_and_frac v_init_chroma; }; static void calculate_viewport( const struct scaler_data *scl_data, struct rect *luma_viewport, struct rect *chroma_viewport) { /*Do not set chroma vp for rgb444 pixel format*/ luma_viewport->x = scl_data->viewport.x - scl_data->viewport.x % 2; luma_viewport->y = scl_data->viewport.y - scl_data->viewport.y % 2; luma_viewport->width = scl_data->viewport.width - scl_data->viewport.width % 2; luma_viewport->height = scl_data->viewport.height - scl_data->viewport.height % 2; chroma_viewport->x = luma_viewport->x; chroma_viewport->y = luma_viewport->y; chroma_viewport->height = luma_viewport->height; chroma_viewport->width = luma_viewport->width; if (scl_data->format == PIXEL_FORMAT_420BPP8) { luma_viewport->height += luma_viewport->height % 2; luma_viewport->width += luma_viewport->width % 2; /*for 420 video chroma is 1/4 the area of luma, scaled *vertically and horizontally */ chroma_viewport->x = luma_viewport->x / 2; chroma_viewport->y = luma_viewport->y / 2; chroma_viewport->height = luma_viewport->height / 2; chroma_viewport->width = luma_viewport->width / 2; } } static void program_viewport( struct dce_transform *xfm_dce, struct rect *luma_view_port, struct rect *chroma_view_port) { struct dc_context *ctx = xfm_dce->base.ctx; uint32_t value = 0; uint32_t addr = 0; if (luma_view_port->width != 0 && luma_view_port->height != 0) { addr = mmSCLV_VIEWPORT_START; value = 0; set_reg_field_value( value, luma_view_port->x, SCLV_VIEWPORT_START, VIEWPORT_X_START); set_reg_field_value( value, luma_view_port->y, SCLV_VIEWPORT_START, VIEWPORT_Y_START); dm_write_reg(ctx, addr, value); addr = mmSCLV_VIEWPORT_SIZE; value = 0; set_reg_field_value( value, luma_view_port->height, SCLV_VIEWPORT_SIZE, VIEWPORT_HEIGHT); set_reg_field_value( value, luma_view_port->width, SCLV_VIEWPORT_SIZE, VIEWPORT_WIDTH); dm_write_reg(ctx, addr, value); } if (chroma_view_port->width != 0 && chroma_view_port->height != 0) { addr = mmSCLV_VIEWPORT_START_C; value = 0; set_reg_field_value( value, chroma_view_port->x, SCLV_VIEWPORT_START_C, VIEWPORT_X_START_C); set_reg_field_value( value, chroma_view_port->y, SCLV_VIEWPORT_START_C, VIEWPORT_Y_START_C); dm_write_reg(ctx, addr, value); addr = mmSCLV_VIEWPORT_SIZE_C; value = 0; set_reg_field_value( value, chroma_view_port->height, SCLV_VIEWPORT_SIZE_C, VIEWPORT_HEIGHT_C); set_reg_field_value( value, chroma_view_port->width, SCLV_VIEWPORT_SIZE_C, VIEWPORT_WIDTH_C); dm_write_reg(ctx, addr, value); } } /* * Function: * void setup_scaling_configuration * * Purpose: setup scaling mode : bypass, RGb, YCbCr and nummber of taps * Input: data * * Output: * void */ static bool setup_scaling_configuration( struct dce_transform *xfm_dce, const struct scaler_data *data) { bool is_scaling_needed = false; struct dc_context *ctx = xfm_dce->base.ctx; uint32_t value = 0; set_reg_field_value(value, data->taps.h_taps - 1, SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS); set_reg_field_value(value, data->taps.v_taps - 1, SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS); set_reg_field_value(value, data->taps.h_taps_c - 1, SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS_C); set_reg_field_value(value, data->taps.v_taps_c - 1, SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS_C); dm_write_reg(ctx, mmSCLV_TAP_CONTROL, value); value = 0; if (data->taps.h_taps + data->taps.v_taps > 2) { set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE); set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN); is_scaling_needed = true; } else { set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE); set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN); } if (data->taps.h_taps_c + data->taps.v_taps_c > 2) { set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE_C); set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN_C); is_scaling_needed = true; } else if (data->format != PIXEL_FORMAT_420BPP8) { set_reg_field_value( value, get_reg_field_value(value, SCLV_MODE, SCL_MODE), SCLV_MODE, SCL_MODE_C); set_reg_field_value( value, get_reg_field_value(value, SCLV_MODE, SCL_PSCL_EN), SCLV_MODE, SCL_PSCL_EN_C); } else { set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE_C); set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN_C); } dm_write_reg(ctx, mmSCLV_MODE, value); value = 0; /* * 0 - Replaced out of bound pixels with black pixel * (or any other required color) * 1 - Replaced out of bound pixels with the edge pixel */ set_reg_field_value(value, 1, SCLV_CONTROL, SCL_BOUNDARY_MODE); dm_write_reg(ctx, mmSCLV_CONTROL, value); return is_scaling_needed; } /* * Function: * void program_overscan * * Purpose: Programs overscan border * Input: overscan * * Output: void */ static void program_overscan( struct dce_transform *xfm_dce, const struct scaler_data *data) { uint32_t overscan_left_right = 0; uint32_t overscan_top_bottom = 0; int overscan_right = data->h_active - data->recout.x - data->recout.width; int overscan_bottom = data->v_active - data->recout.y - data->recout.height; if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { overscan_bottom += 2; overscan_right += 2; } if (overscan_right < 0) { BREAK_TO_DEBUGGER(); overscan_right = 0; } if (overscan_bottom < 0) { BREAK_TO_DEBUGGER(); overscan_bottom = 0; } set_reg_field_value(overscan_left_right, data->recout.x, EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT); set_reg_field_value(overscan_left_right, overscan_right, EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT); set_reg_field_value(overscan_top_bottom, data->recout.y, EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP); set_reg_field_value(overscan_top_bottom, overscan_bottom, EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM); dm_write_reg(xfm_dce->base.ctx, mmSCLV_EXT_OVERSCAN_LEFT_RIGHT, overscan_left_right); dm_write_reg(xfm_dce->base.ctx, mmSCLV_EXT_OVERSCAN_TOP_BOTTOM, overscan_top_bottom); } static void set_coeff_update_complete( struct dce_transform *xfm_dce) { uint32_t value; value = dm_read_reg(xfm_dce->base.ctx, mmSCLV_UPDATE); set_reg_field_value(value, 1, SCLV_UPDATE, SCL_COEF_UPDATE_COMPLETE); dm_write_reg(xfm_dce->base.ctx, mmSCLV_UPDATE, value); } static void program_multi_taps_filter( struct dce_transform *xfm_dce, int taps, const uint16_t *coeffs, enum ram_filter_type filter_type) { struct dc_context *ctx = xfm_dce->base.ctx; int i, phase, pair; int array_idx = 0; int taps_pairs = (taps + 1) / 2; int phases_to_program = SCLV_PHASES / 2 + 1; uint32_t select = 0; uint32_t power_ctl, power_ctl_off; if (!coeffs) return; /*We need to disable power gating on coeff memory to do programming*/ power_ctl = dm_read_reg(ctx, mmDCFEV_MEM_PWR_CTRL); power_ctl_off = power_ctl; set_reg_field_value(power_ctl_off, 1, DCFEV_MEM_PWR_CTRL, SCLV_COEFF_MEM_PWR_DIS); dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl_off); /*Wait to disable gating:*/ for (i = 0; i < 10; i++) { if (get_reg_field_value( dm_read_reg(ctx, mmDCFEV_MEM_PWR_STATUS), DCFEV_MEM_PWR_STATUS, SCLV_COEFF_MEM_PWR_STATE) == 0) break; udelay(1); } set_reg_field_value(select, filter_type, SCLV_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE); for (phase = 0; phase < phases_to_program; phase++) { /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror phase 0 is unique and phase N/2 is unique if N is even*/ set_reg_field_value(select, phase, SCLV_COEF_RAM_SELECT, SCL_C_RAM_PHASE); for (pair = 0; pair < taps_pairs; pair++) { uint32_t data = 0; set_reg_field_value(select, pair, SCLV_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX); dm_write_reg(ctx, mmSCLV_COEF_RAM_SELECT, select); set_reg_field_value( data, 1, SCLV_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN); set_reg_field_value( data, coeffs[array_idx], SCLV_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF); if (taps % 2 && pair == taps_pairs - 1) { set_reg_field_value( data, 0, SCLV_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN); array_idx++; } else { set_reg_field_value( data, 1, SCLV_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN); set_reg_field_value( data, coeffs[array_idx + 1], SCLV_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF); array_idx += 2; } dm_write_reg(ctx, mmSCLV_COEF_RAM_TAP_DATA, data); } } /*We need to restore power gating on coeff memory to initial state*/ dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl); } static void calculate_inits( struct dce_transform *xfm_dce, const struct scaler_data *data, struct sclv_ratios_inits *inits, struct rect *luma_viewport, struct rect *chroma_viewport) { inits->h_int_scale_ratio_luma = dc_fixpt_u2d19(data->ratios.horz) << 5; inits->v_int_scale_ratio_luma = dc_fixpt_u2d19(data->ratios.vert) << 5; inits->h_int_scale_ratio_chroma = dc_fixpt_u2d19(data->ratios.horz_c) << 5; inits->v_int_scale_ratio_chroma = dc_fixpt_u2d19(data->ratios.vert_c) << 5; inits->h_init_luma.integer = 1; inits->v_init_luma.integer = 1; inits->h_init_chroma.integer = 1; inits->v_init_chroma.integer = 1; } static void program_scl_ratios_inits( struct dce_transform *xfm_dce, struct sclv_ratios_inits *inits) { struct dc_context *ctx = xfm_dce->base.ctx; uint32_t addr = mmSCLV_HORZ_FILTER_SCALE_RATIO; uint32_t value = 0; set_reg_field_value( value, inits->h_int_scale_ratio_luma, SCLV_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO); dm_write_reg(ctx, addr, value); addr = mmSCLV_VERT_FILTER_SCALE_RATIO; value = 0; set_reg_field_value( value, inits->v_int_scale_ratio_luma, SCLV_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO); dm_write_reg(ctx, addr, value); addr = mmSCLV_HORZ_FILTER_SCALE_RATIO_C; value = 0; set_reg_field_value( value, inits->h_int_scale_ratio_chroma, SCLV_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C); dm_write_reg(ctx, addr, value); addr = mmSCLV_VERT_FILTER_SCALE_RATIO_C; value = 0; set_reg_field_value( value, inits->v_int_scale_ratio_chroma, SCLV_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C); dm_write_reg(ctx, addr, value); addr = mmSCLV_HORZ_FILTER_INIT; value = 0; set_reg_field_value( value, inits->h_init_luma.fraction, SCLV_HORZ_FILTER_INIT, SCL_H_INIT_FRAC); set_reg_field_value( value, inits->h_init_luma.integer, SCLV_HORZ_FILTER_INIT, SCL_H_INIT_INT); dm_write_reg(ctx, addr, value); addr = mmSCLV_VERT_FILTER_INIT; value = 0; set_reg_field_value( value, inits->v_init_luma.fraction, SCLV_VERT_FILTER_INIT, SCL_V_INIT_FRAC); set_reg_field_value( value, inits->v_init_luma.integer, SCLV_VERT_FILTER_INIT, SCL_V_INIT_INT); dm_write_reg(ctx, addr, value); addr = mmSCLV_HORZ_FILTER_INIT_C; value = 0; set_reg_field_value( value, inits->h_init_chroma.fraction, SCLV_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C); set_reg_field_value( value, inits->h_init_chroma.integer, SCLV_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C); dm_write_reg(ctx, addr, value); addr = mmSCLV_VERT_FILTER_INIT_C; value = 0; set_reg_field_value( value, inits->v_init_chroma.fraction, SCLV_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C); set_reg_field_value( value, inits->v_init_chroma.integer, SCLV_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C); dm_write_reg(ctx, addr, value); } static const uint16_t *get_filter_coeffs_64p(int taps, struct fixed31_32 ratio) { if (taps == 4) return get_filter_4tap_64p(ratio); else if (taps == 2) return get_filter_2tap_64p(); else if (taps == 1) return NULL; else { /* should never happen, bug */ BREAK_TO_DEBUGGER(); return NULL; } } static bool dce110_xfmv_power_up_line_buffer(struct transform *xfm) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); uint32_t value; value = dm_read_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL); /*Use all three pieces of memory always*/ set_reg_field_value(value, 0, LBV_MEMORY_CTRL, LB_MEMORY_CONFIG); /*hard coded number DCE11 1712(0x6B0) Partitions: 720/960/1712*/ set_reg_field_value(value, xfm_dce->lb_memory_size, LBV_MEMORY_CTRL, LB_MEMORY_SIZE); dm_write_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL, value); return true; } static void dce110_xfmv_set_scaler( struct transform *xfm, const struct scaler_data *data) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); bool is_scaling_required = false; bool filter_updated = false; const uint16_t *coeffs_v, *coeffs_h, *coeffs_h_c, *coeffs_v_c; struct rect luma_viewport = {0}; struct rect chroma_viewport = {0}; dce110_xfmv_power_up_line_buffer(xfm); /* 1. Calculate viewport, viewport programming should happen after init * calculations as they may require an adjustment in the viewport. */ calculate_viewport(data, &luma_viewport, &chroma_viewport); /* 2. Program overscan */ program_overscan(xfm_dce, data); /* 3. Program taps and configuration */ is_scaling_required = setup_scaling_configuration(xfm_dce, data); if (is_scaling_required) { /* 4. Calculate and program ratio, filter initialization */ struct sclv_ratios_inits inits = { 0 }; calculate_inits( xfm_dce, data, &inits, &luma_viewport, &chroma_viewport); program_scl_ratios_inits(xfm_dce, &inits); coeffs_v = get_filter_coeffs_64p(data->taps.v_taps, data->ratios.vert); coeffs_h = get_filter_coeffs_64p(data->taps.h_taps, data->ratios.horz); coeffs_v_c = get_filter_coeffs_64p(data->taps.v_taps_c, data->ratios.vert_c); coeffs_h_c = get_filter_coeffs_64p(data->taps.h_taps_c, data->ratios.horz_c); if (coeffs_v != xfm_dce->filter_v || coeffs_v_c != xfm_dce->filter_v_c || coeffs_h != xfm_dce->filter_h || coeffs_h_c != xfm_dce->filter_h_c) { /* 5. Program vertical filters */ program_multi_taps_filter( xfm_dce, data->taps.v_taps, coeffs_v, FILTER_TYPE_RGB_Y_VERTICAL); program_multi_taps_filter( xfm_dce, data->taps.v_taps_c, coeffs_v_c, FILTER_TYPE_CBCR_VERTICAL); /* 6. Program horizontal filters */ program_multi_taps_filter( xfm_dce, data->taps.h_taps, coeffs_h, FILTER_TYPE_RGB_Y_HORIZONTAL); program_multi_taps_filter( xfm_dce, data->taps.h_taps_c, coeffs_h_c, FILTER_TYPE_CBCR_HORIZONTAL); xfm_dce->filter_v = coeffs_v; xfm_dce->filter_v_c = coeffs_v_c; xfm_dce->filter_h = coeffs_h; xfm_dce->filter_h_c = coeffs_h_c; filter_updated = true; } } /* 7. Program the viewport */ program_viewport(xfm_dce, &luma_viewport, &chroma_viewport); /* 8. Set bit to flip to new coefficient memory */ if (filter_updated) set_coeff_update_complete(xfm_dce); } static void dce110_xfmv_reset(struct transform *xfm) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); xfm_dce->filter_h = NULL; xfm_dce->filter_v = NULL; xfm_dce->filter_h_c = NULL; xfm_dce->filter_v_c = NULL; } static void dce110_xfmv_set_gamut_remap( struct transform *xfm, const struct xfm_grph_csc_adjustment *adjust) { /* DO NOTHING*/ } static void dce110_xfmv_set_pixel_storage_depth( struct transform *xfm, enum lb_pixel_depth depth, const struct bit_depth_reduction_params *bit_depth_params) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); int pixel_depth = 0; int expan_mode = 0; uint32_t reg_data = 0; switch (depth) { case LB_PIXEL_DEPTH_18BPP: pixel_depth = 2; expan_mode = 1; break; case LB_PIXEL_DEPTH_24BPP: pixel_depth = 1; expan_mode = 1; break; case LB_PIXEL_DEPTH_30BPP: pixel_depth = 0; expan_mode = 1; break; case LB_PIXEL_DEPTH_36BPP: pixel_depth = 3; expan_mode = 0; break; default: BREAK_TO_DEBUGGER(); break; } set_reg_field_value( reg_data, expan_mode, LBV_DATA_FORMAT, PIXEL_EXPAN_MODE); set_reg_field_value( reg_data, pixel_depth, LBV_DATA_FORMAT, PIXEL_DEPTH); dm_write_reg(xfm->ctx, mmLBV_DATA_FORMAT, reg_data); if (!(xfm_dce->lb_pixel_depth_supported & depth)) { /*we should use unsupported capabilities * unless it is required by w/a*/ DC_LOG_WARNING("%s: Capability not supported", __func__); } } static const struct transform_funcs dce110_xfmv_funcs = { .transform_reset = dce110_xfmv_reset, .transform_set_scaler = dce110_xfmv_set_scaler, .transform_set_gamut_remap = dce110_xfmv_set_gamut_remap, .opp_set_csc_default = dce110_opp_v_set_csc_default, .opp_set_csc_adjustment = dce110_opp_v_set_csc_adjustment, .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut_v, .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl_v, .opp_set_regamma_mode = dce110_opp_set_regamma_mode_v, .transform_set_pixel_storage_depth = dce110_xfmv_set_pixel_storage_depth, .transform_get_optimal_number_of_taps = dce_transform_get_optimal_number_of_taps }; /*****************************************/ /* Constructor, Destructor */ /*****************************************/ bool dce110_transform_v_construct( struct dce_transform *xfm_dce, struct dc_context *ctx) { xfm_dce->base.ctx = ctx; xfm_dce->base.funcs = &dce110_xfmv_funcs; xfm_dce->lb_pixel_depth_supported = LB_PIXEL_DEPTH_18BPP | LB_PIXEL_DEPTH_24BPP | LB_PIXEL_DEPTH_30BPP | LB_PIXEL_DEPTH_36BPP; xfm_dce->prescaler_on = true; xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY; xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/ return true; }
linux-master
drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "core_types.h" #include "dce120_hw_sequencer.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "reg_helper.h" #define CTX \ hws->ctx #define REG(reg)\ hws->regs->reg #undef FN #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name struct dce120_hw_seq_reg_offsets { uint32_t crtc; }; #if 0 static const struct dce120_hw_seq_reg_offsets reg_offsets[] = { { .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL), }, { .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL), }, { .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL), }, { .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL), }, { .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL), }, { .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL), } }; #define HW_REG_CRTC(reg, id)\ (reg + reg_offsets[id].crtc) #define CNTL_ID(controller_id)\ controller_id /******************************************************************************* * Private definitions ******************************************************************************/ static void dce120_init_pte(struct dc_context *ctx, uint8_t controller_id) { uint32_t addr; uint32_t value = 0; uint32_t chunk_int = 0; uint32_t chunk_mul = 0; /* addr = mmDCP0_DVMM_PTE_CONTROL + controller_id * (mmDCP1_DVMM_PTE_CONTROL- mmDCP0_DVMM_PTE_CONTROL); value = dm_read_reg(ctx, addr); set_reg_field_value( value, 0, DCP, controller_id, DVMM_PTE_CONTROL, DVMM_USE_SINGLE_PTE); set_reg_field_value_soc15( value, 1, DCP, controller_id, DVMM_PTE_CONTROL, DVMM_PTE_BUFFER_MODE0); set_reg_field_value_soc15( value, 1, DCP, controller_id, DVMM_PTE_CONTROL, DVMM_PTE_BUFFER_MODE1); dm_write_reg(ctx, addr, value);*/ addr = mmDVMM_PTE_REQ; value = dm_read_reg(ctx, addr); chunk_int = get_reg_field_value( value, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_INT); chunk_mul = get_reg_field_value( value, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER); if (chunk_int != 0x4 || chunk_mul != 0x4) { set_reg_field_value( value, 255, DVMM_PTE_REQ, MAX_PTEREQ_TO_ISSUE); set_reg_field_value( value, 4, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_INT); set_reg_field_value( value, 4, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER); dm_write_reg(ctx, addr, value); } } #endif static bool dce120_enable_display_power_gating( struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, enum pipe_gating_control power_gating) { /* disable for bringup */ #if 0 enum bp_result bp_result = BP_RESULT_OK; enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) cntl = ASIC_PIPE_ENABLE; else cntl = ASIC_PIPE_DISABLE; if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) { bp_result = dcb->funcs->enable_disp_power_gating( dcb, controller_id + 1, cntl); /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2 * by default when command table is called */ dm_write_reg(ctx, HW_REG_CRTC(mmCRTC0_CRTC_MASTER_UPDATE_MODE, controller_id), 0); } if (power_gating != PIPE_GATING_CONTROL_ENABLE) dce120_init_pte(ctx, controller_id); if (bp_result == BP_RESULT_OK) return true; else return false; #endif return false; } static void dce120_update_dchub( struct dce_hwseq *hws, struct dchub_init_data *dh_data) { /* TODO: port code from dal2 */ switch (dh_data->fb_mode) { case FRAME_BUFFER_MODE_ZFB_ONLY: /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/ REG_UPDATE_2(DCHUB_FB_LOCATION, FB_TOP, 0, FB_BASE, 0x0FFFF); REG_UPDATE(DCHUB_AGP_BASE, AGP_BASE, dh_data->zfb_phys_addr_base >> 22); REG_UPDATE(DCHUB_AGP_BOT, AGP_BOT, dh_data->zfb_mc_base_addr >> 22); REG_UPDATE(DCHUB_AGP_TOP, AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 22); break; case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL: /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ REG_UPDATE(DCHUB_AGP_BASE, AGP_BASE, dh_data->zfb_phys_addr_base >> 22); REG_UPDATE(DCHUB_AGP_BOT, AGP_BOT, dh_data->zfb_mc_base_addr >> 22); REG_UPDATE(DCHUB_AGP_TOP, AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 22); break; case FRAME_BUFFER_MODE_LOCAL_ONLY: /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ REG_UPDATE(DCHUB_AGP_BASE, AGP_BASE, 0); REG_UPDATE(DCHUB_AGP_BOT, AGP_BOT, 0x03FFFF); REG_UPDATE(DCHUB_AGP_TOP, AGP_TOP, 0); break; default: break; } dh_data->dchub_initialzied = true; dh_data->dchub_info_valid = false; } /** * dce121_xgmi_enabled() - Check if xGMI is enabled * @hws: DCE hardware sequencer object * * Return true if xGMI is enabled. False otherwise. */ bool dce121_xgmi_enabled(struct dce_hwseq *hws) { uint32_t pf_max_region; REG_GET(MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION, &pf_max_region); /* PF_MAX_REGION == 0 means xgmi is disabled */ return !!pf_max_region; } void dce120_hw_sequencer_construct(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and * structure */ dce110_hw_sequencer_construct(dc); dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating; dc->hwss.update_dchub = dce120_update_dchub; }
linux-master
drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc.cls * * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dce120_resource.h" #include "dce112/dce112_resource.h" #include "dce110/dce110_resource.h" #include "../virtual/virtual_stream_encoder.h" #include "dce120_timing_generator.h" #include "irq/dce120/irq_service_dce120.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_ipp.h" #include "dce/dce_mem_input.h" #include "dce/dce_panel_cntl.h" #include "dce110/dce110_hw_sequencer.h" #include "dce120/dce120_hw_sequencer.h" #include "dce/dce_transform.h" #include "clk_mgr.h" #include "dce/dce_audio.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_hwseq.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "nbio/nbio_6_1_offset.h" #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" #include "reg_helper.h" #include "dce100/dce100_resource.h" #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dce120_clk_src_array_id { DCE120_CLK_SRC_PLL0, DCE120_CLK_SRC_PLL1, DCE120_CLK_SRC_PLL2, DCE120_CLK_SRC_PLL3, DCE120_CLK_SRC_PLL4, DCE120_CLK_SRC_PLL5, DCE120_CLK_SRC_TOTAL }; static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), } }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) /* compile time expand base address. */ #define BASE(seg) \ BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* macros to expend register list macro defined in HW object header file * end *********************/ static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE120_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; static const struct dce_panel_cntl_registers panel_cntl_regs[] = { { DCE_PANEL_CNTL_REG_LIST() } }; static const struct dce_panel_cntl_shift panel_cntl_shift = { DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dce_panel_cntl_mask panel_cntl_mask = { DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) }; static const struct dce110_aux_registers_shift aux_shift = { DCE12_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCE12_AUX_MASK_SH_LIST(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_120_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; static int map_transmitter_id_to_phy_instance( enum transmitter transmitter) { switch (transmitter) { case TRANSMITTER_UNIPHY_A: return 0; case TRANSMITTER_UNIPHY_B: return 1; case TRANSMITTER_UNIPHY_C: return 2; case TRANSMITTER_UNIPHY_D: return 3; case TRANSMITTER_UNIPHY_E: return 4; case TRANSMITTER_UNIPHY_F: return 5; case TRANSMITTER_UNIPHY_G: return 6; default: ASSERT(0); return 0; } } #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E), clk_src_regs(5, F) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) }; static struct output_pixel_processor *dce120_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } static struct dce_aux *dce120_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; static struct dce_i2c_hw *dce120_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 7, .num_stream_encoder = 6, .num_pll = 6, .num_ddc = 6, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults = { .disable_clock_gate = true, .enable_legacy_fast_update = true, }; static struct clock_source *dce120_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(*clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static void dce120_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static bool dce120_hw_sequencer_create(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and * structure */ dce120_hw_sequencer_construct(dc); /*TODO Move to separate file and Override what is needed */ return true; } static struct timing_generator *dce120_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce120_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static void dce120_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static void dce120_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce120_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) dce120_clock_source_destroy( &pool->base.clock_sources[i]); } if (pool->base.dp_clock_source != NULL) dce120_clock_source_destroy(&pool->base.dp_clock_source); if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0); straps->audio_stream_number = get_reg_field_value(reg_val, CC_DC_MISC_STRAPS, AUDIO_STREAM_NUMBER); straps->hdmi_disable = get_reg_field_value(reg_val, CC_DC_MISC_STRAPS, HDMI_DISABLE); reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0); straps->dc_pinstraps_audio = get_reg_field_value(reg_val, DC_PINSTRAPS, DC_PINSTRAPS_AUDIO); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true, }; static struct link_encoder *dce120_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; if (!enc110) return NULL; link_regs_id = map_transmitter_id_to_phy_instance(enc_init_data->transmitter); dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } static struct input_pixel_processor *dce120_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static struct stream_encoder *dce120_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE120_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE12_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE12_MASK_SH_LIST(_MASK) }; /* HWSEQ regs for VG20 */ static const struct dce_hwseq_registers dce121_hwseq_reg = { HWSEQ_VG20_REG_LIST() }; static const struct dce_hwseq_shift dce121_hwseq_shift = { HWSEQ_VG20_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask dce121_hwseq_mask = { HWSEQ_VG20_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce120_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static struct dce_hwseq *dce121_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &dce121_hwseq_reg; hws->shifts = &dce121_hwseq_shift; hws->masks = &dce121_hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce120_stream_encoder_create, .create_hwseq = dce120_hwseq_create, }; static const struct resource_create_funcs dce121_res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce120_stream_encoder_create, .create_hwseq = dce121_hwseq_create, }; #define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE12_MASK_SH_LIST(__SHIFT) }; static const struct dce_mem_input_mask mi_masks = { MI_DCE12_MASK_SH_LIST(_MASK) }; static struct mem_input *dce120_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); return &dce_mi->base; } static struct transform *dce120_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); transform->lb_memory_size = 0x1404; /*5124*/ return &transform->base; } static void dce120_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); dce120_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } static const struct resource_funcs dce120_res_pool_funcs = { .destroy = dce120_destroy_resource_pool, .link_enc_create = dce120_link_encoder_create, .panel_cntl_create = dce120_panel_cntl_create, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels_with_latency eng_clks = {0}; struct dm_pp_clock_levels_with_latency mem_clks = {0}; struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; int i; unsigned int clk; unsigned int latency; /*original logic in dal3*/ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; /*do system clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &eng_clks) || eng_clks.num_levels == 0) { eng_clks.num_levels = 8; clk = 300000; for (i = 0; i < eng_clks.num_levels; i++) { eng_clks.data[i].clocks_in_khz = clk; clk += 100000; } } /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( eng_clks.data[0].clocks_in_khz, 1000); /*do memory clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &mem_clks) || mem_clks.num_levels == 0) { mem_clks.num_levels = 3; clk = 250000; latency = 45; for (i = 0; i < eng_clks.num_levels; i++) { mem_clks.data[i].clocks_in_khz = clk; mem_clks.data[i].latency_in_us = latency; clk += 500000; latency -= 5; } } /* we don't need to call PPLIB for validation clock since they * also give us the highest sclk and highest mclk (UMA clock). * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): * YCLK = UMACLK*m_memoryTypeMultiplier */ if (dc->bw_vbios->memory_type == bw_def_hbm) memory_type_multiplier = MEMORY_TYPE_HBM; dc->bw_vbios->low_yclk = bw_frc_to_fixed( mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select * depending on DPM state they are in. And update BW MGR GFX Engine and * Memory clock member variables for Watermarks calculations for each * Watermark Set */ clk_ranges.num_wm_sets = 4; clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); } static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); /* VG20 support max 6 pipes */ value = value & 0x3f; return value; } static bool dce120_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; int j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data irq_init_data; static const struct resource_create_funcs *res_funcs; bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); uint32_t pipe_fuses; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce120_res_pool_funcs; /* TODO: Fill more data from GreenlandAsicCapability.cpp */ pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.psp_setup_panel_mode = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL1] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL2] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL3] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL4] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL5] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL5, &clk_src_regs[5], false); pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL; pool->base.dp_clock_source = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto clk_src_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } irq_init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); if (!pool->base.irqs) goto irqs_create_fail; /* VG20: Pipe harvesting enabled, retrieve valid pipe fuses */ if (is_vg20) pipe_fuses = read_pipe_fuses(ctx); /* index to valid pipe resource */ j = 0; for (i = 0; i < pool->base.pipe_count; i++) { if (is_vg20) { if ((pipe_fuses & (1 << i)) != 0) { dm_error("DC: skip invalid pipe %d!\n", i); continue; } } pool->base.timing_generators[j] = dce120_timing_generator_create( ctx, i, &dce120_tg_offsets[i]); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto controller_create_fail; } pool->base.mis[j] = dce120_mem_input_create(ctx, i); if (pool->base.mis[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto controller_create_fail; } pool->base.ipps[j] = dce120_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto controller_create_fail; } pool->base.transforms[j] = dce120_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[j] = dce120_opp_create( ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); } /* check next valid pipe */ j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce120_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; if (is_vg20) res_funcs = &dce121_res_create_funcs; else res_funcs = &res_create_funcs; if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs)) goto res_create_fail; /* Create hardware sequencer */ if (!dce120_hw_sequencer_create(dc)) goto controller_create_fail; dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; irqs_create_fail: controller_create_fail: clk_src_create_fail: res_create_fail: dce120_resource_destruct(pool); return false; } struct resource_pool *dce120_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce120_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "dc_types.h" #include "dc_bios_types.h" #include "include/grph_object_id.h" #include "include/logger_interface.h" #include "dce120_timing_generator.h" #include "timing_generator.h" #define CRTC_REG_UPDATE_N(reg_name, n, ...) \ generic_reg_update_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__) #define CRTC_REG_SET_N(reg_name, n, ...) \ generic_reg_set_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__) #define CRTC_REG_UPDATE(reg, field, val) \ CRTC_REG_UPDATE_N(reg, 1, FD(reg##__##field), val) #define CRTC_REG_UPDATE_2(reg, field1, val1, field2, val2) \ CRTC_REG_UPDATE_N(reg, 2, FD(reg##__##field1), val1, FD(reg##__##field2), val2) #define CRTC_REG_UPDATE_3(reg, field1, val1, field2, val2, field3, val3) \ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3) #define CRTC_REG_UPDATE_4(reg, field1, val1, field2, val2, field3, val3, field4, val4) \ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3, FD(reg##__##field4), val4) #define CRTC_REG_UPDATE_5(reg, field1, val1, field2, val2, field3, val3, field4, val4, field5, val5) \ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3, FD(reg##__##field4), val4, FD(reg##__##field5), val5) #define CRTC_REG_SET(reg, field, val) \ CRTC_REG_SET_N(reg, 1, FD(reg##__##field), val) #define CRTC_REG_SET_2(reg, field1, val1, field2, val2) \ CRTC_REG_SET_N(reg, 2, FD(reg##__##field1), val1, FD(reg##__##field2), val2) #define CRTC_REG_SET_3(reg, field1, val1, field2, val2, field3, val3) \ CRTC_REG_SET_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3) /* ***************************************************************************** * Function: is_in_vertical_blank * * @brief * check the current status of CRTC to check if we are in Vertical Blank * regioneased" state * * @return * true if currently in blank region, false otherwise * ***************************************************************************** */ static bool dce120_timing_generator_is_in_vertical_blank( struct timing_generator *tg) { uint32_t field = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_STATUS, tg110->offsets.crtc); field = get_reg_field_value(value, CRTC0_CRTC_STATUS, CRTC_V_BLANK); return field == 1; } /* determine if given timing can be supported by TG */ static bool dce120_timing_generator_validate_timing( struct timing_generator *tg, const struct dc_crtc_timing *timing, enum signal_type signal) { uint32_t interlace_factor = timing->flags.INTERLACE ? 2 : 1; uint32_t v_blank = (timing->v_total - timing->v_addressable - timing->v_border_top - timing->v_border_bottom) * interlace_factor; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); if (!dce110_timing_generator_validate_timing( tg, timing, signal)) return false; if (v_blank < tg110->min_v_blank || timing->h_sync_width < tg110->min_h_sync_width || timing->v_sync_width < tg110->min_v_sync_width) return false; return true; } static bool dce120_tg_validate_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing) { return dce120_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE); } /******** HW programming ************/ /* Disable/Enable Timing Generator */ static bool dce120_timing_generator_enable_crtc(struct timing_generator *tg) { enum bp_result result; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); /* Set MASTER_UPDATE_MODE to 0 * This is needed for DRR, and also suggested to be default value by Syed.*/ CRTC_REG_UPDATE(CRTC0_CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0); CRTC_REG_UPDATE(CRTC0_CRTC_MASTER_UPDATE_LOCK, UNDERFLOW_UPDATE_LOCK, 0); /* TODO API for AtomFirmware didn't change*/ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true); return result == BP_RESULT_OK; } static void dce120_timing_generator_set_early_control( struct timing_generator *tg, uint32_t early_cntl) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); CRTC_REG_UPDATE(CRTC0_CRTC_CONTROL, CRTC_HBLANK_EARLY_CONTROL, early_cntl); } /**************** TG current status ******************/ /* return the current frame counter. Used by Linux kernel DRM */ static uint32_t dce120_timing_generator_get_vblank_counter( struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_STATUS_FRAME_COUNT, tg110->offsets.crtc); uint32_t field = get_reg_field_value( value, CRTC0_CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT); return field; } /* Get current H and V position */ static void dce120_timing_generator_get_crtc_position( struct timing_generator *tg, struct crtc_position *position) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_STATUS_POSITION, tg110->offsets.crtc); position->horizontal_count = get_reg_field_value(value, CRTC0_CRTC_STATUS_POSITION, CRTC_HORZ_COUNT); position->vertical_count = get_reg_field_value(value, CRTC0_CRTC_STATUS_POSITION, CRTC_VERT_COUNT); value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_NOM_VERT_POSITION, tg110->offsets.crtc); position->nominal_vcount = get_reg_field_value(value, CRTC0_CRTC_NOM_VERT_POSITION, CRTC_VERT_COUNT_NOM); } /* wait until TG is in beginning of vertical blank region */ static void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg) { /* We want to catch beginning of VBlank here, so if the first try are * in VBlank, we might be very close to Active, in this case wait for * another frame */ while (dce120_timing_generator_is_in_vertical_blank(tg)) { if (!tg->funcs->is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } while (!dce120_timing_generator_is_in_vertical_blank(tg)) { if (!tg->funcs->is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } } /* wait until TG is in beginning of active region */ static void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg) { while (dce120_timing_generator_is_in_vertical_blank(tg)) { if (!tg->funcs->is_counter_moving(tg)) { /* error - no point to wait if counter is not moving */ break; } } } /*********** Timing Generator Synchronization routines ****/ /* Setups Global Swap Lock group, TimingServer or TimingClient*/ static void dce120_timing_generator_setup_global_swap_lock( struct timing_generator *tg, const struct dcp_gsl_params *gsl_params) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value_crtc_vtotal = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_V_TOTAL, tg110->offsets.crtc); /* Checkpoint relative to end of frame */ uint32_t check_point = get_reg_field_value(value_crtc_vtotal, CRTC0_CRTC_V_TOTAL, CRTC_V_TOTAL); dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_GSL_WINDOW, tg110->offsets.crtc, 0); CRTC_REG_UPDATE_N(DCP0_DCP_GSL_CONTROL, 6, /* This pipe will belong to GSL Group zero. */ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL0_EN), 1, FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_MASTER_EN), gsl_params->gsl_master == tg->inst, FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY), HFLIP_READY_DELAY, /* Keep signal low (pending high) during 6 lines. * Also defines minimum interval before re-checking signal. */ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY), HFLIP_CHECK_DELAY, /* DCP_GSL_PURPOSE_SURFACE_FLIP */ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE), 0, FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING), 1); CRTC_REG_SET_2( CRTC0_CRTC_GSL_CONTROL, CRTC_GSL_CHECK_LINE_NUM, check_point - FLIP_READY_BACK_LOOKUP, CRTC_GSL_FORCE_DELAY, VFLIP_READY_DELAY); } /* Clear all the register writes done by setup_global_swap_lock */ static void dce120_timing_generator_tear_down_global_swap_lock( struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); /* Settig HW default values from reg specs */ CRTC_REG_SET_N(DCP0_DCP_GSL_CONTROL, 6, FD(DCP0_DCP_GSL_CONTROL__DCP_GSL0_EN), 0, FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_MASTER_EN), 0, FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY), HFLIP_READY_DELAY, FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY), HFLIP_CHECK_DELAY, /* DCP_GSL_PURPOSE_SURFACE_FLIP */ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE), 0, FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING), 0); CRTC_REG_SET_2(CRTC0_CRTC_GSL_CONTROL, CRTC_GSL_CHECK_LINE_NUM, 0, CRTC_GSL_FORCE_DELAY, 0x2); /*TODO Why this value here ?*/ } /* Reset slave controllers on master VSync */ static void dce120_timing_generator_enable_reset_trigger( struct timing_generator *tg, int source) { enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t rising_edge = 0; uint32_t falling_edge = 0; /* Setup trigger edge */ uint32_t pol_value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_V_SYNC_A_CNTL, tg110->offsets.crtc); /* Register spec has reversed definition: * 0 for positive, 1 for negative */ if (get_reg_field_value(pol_value, CRTC0_CRTC_V_SYNC_A_CNTL, CRTC_V_SYNC_A_POL) == 0) { rising_edge = 1; } else { falling_edge = 1; } /* TODO What about other sources ?*/ trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0; CRTC_REG_UPDATE_N(CRTC0_CRTC_TRIGB_CNTL, 7, FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT), trig_src_select, FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT), TRIGGER_POLARITY_SELECT_LOGIC_ZERO, FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL), rising_edge, FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL), falling_edge, /* send every signal */ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT), 0, /* no delay */ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY), 0, /* clear trigger status */ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR), 1); CRTC_REG_UPDATE_3( CRTC0_CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_MODE, 2, CRTC_FORCE_COUNT_NOW_TRIG_SEL, 1, CRTC_FORCE_COUNT_NOW_CLEAR, 1); } /* disabling trigger-reset */ static void dce120_timing_generator_disable_reset_trigger( struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); CRTC_REG_UPDATE_2( CRTC0_CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_MODE, 0, CRTC_FORCE_COUNT_NOW_CLEAR, 1); CRTC_REG_UPDATE_3( CRTC0_CRTC_TRIGB_CNTL, CRTC_TRIGB_SOURCE_SELECT, TRIGGER_SOURCE_SELECT_LOGIC_ZERO, CRTC_TRIGB_POLARITY_SELECT, TRIGGER_POLARITY_SELECT_LOGIC_ZERO, /* clear trigger status */ CRTC_TRIGB_CLEAR, 1); } /* Checks whether CRTC triggered reset occurred */ static bool dce120_timing_generator_did_triggered_reset_occur( struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL, tg110->offsets.crtc); return get_reg_field_value(value, CRTC0_CRTC_FORCE_COUNT_NOW_CNTL, CRTC_FORCE_COUNT_NOW_OCCURRED) != 0; } /******** Stuff to move to other virtual HW objects *****************/ /* Move to enable accelerated mode */ static void dce120_timing_generator_disable_vga(struct timing_generator *tg) { uint32_t offset = 0; uint32_t value = 0; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); switch (tg110->controller_id) { case CONTROLLER_ID_D0: offset = 0; break; case CONTROLLER_ID_D1: offset = mmD2VGA_CONTROL - mmD1VGA_CONTROL; break; case CONTROLLER_ID_D2: offset = mmD3VGA_CONTROL - mmD1VGA_CONTROL; break; case CONTROLLER_ID_D3: offset = mmD4VGA_CONTROL - mmD1VGA_CONTROL; break; case CONTROLLER_ID_D4: offset = mmD5VGA_CONTROL - mmD1VGA_CONTROL; break; case CONTROLLER_ID_D5: offset = mmD6VGA_CONTROL - mmD1VGA_CONTROL; break; default: break; } value = dm_read_reg_soc15(tg->ctx, mmD1VGA_CONTROL, offset); set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE); set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT); set_reg_field_value( value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT); set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN); dm_write_reg_soc15(tg->ctx, mmD1VGA_CONTROL, offset, value); } /* TODO: Should we move it to transform */ /* Fully program CRTC timing in timing generator */ static void dce120_timing_generator_program_blanking( struct timing_generator *tg, const struct dc_crtc_timing *timing) { uint32_t tmp1 = 0; uint32_t tmp2 = 0; uint32_t vsync_offset = timing->v_border_bottom + timing->v_front_porch; uint32_t v_sync_start = timing->v_addressable + vsync_offset; uint32_t hsync_offset = timing->h_border_right + timing->h_front_porch; uint32_t h_sync_start = timing->h_addressable + hsync_offset; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); CRTC_REG_UPDATE( CRTC0_CRTC_H_TOTAL, CRTC_H_TOTAL, timing->h_total - 1); CRTC_REG_UPDATE( CRTC0_CRTC_V_TOTAL, CRTC_V_TOTAL, timing->v_total - 1); /* In case of V_TOTAL_CONTROL is on, make sure V_TOTAL_MAX and * V_TOTAL_MIN are equal to V_TOTAL. */ CRTC_REG_UPDATE( CRTC0_CRTC_V_TOTAL_MAX, CRTC_V_TOTAL_MAX, timing->v_total - 1); CRTC_REG_UPDATE( CRTC0_CRTC_V_TOTAL_MIN, CRTC_V_TOTAL_MIN, timing->v_total - 1); tmp1 = timing->h_total - (h_sync_start + timing->h_border_left); tmp2 = tmp1 + timing->h_addressable + timing->h_border_left + timing->h_border_right; CRTC_REG_UPDATE_2( CRTC0_CRTC_H_BLANK_START_END, CRTC_H_BLANK_END, tmp1, CRTC_H_BLANK_START, tmp2); tmp1 = timing->v_total - (v_sync_start + timing->v_border_top); tmp2 = tmp1 + timing->v_addressable + timing->v_border_top + timing->v_border_bottom; CRTC_REG_UPDATE_2( CRTC0_CRTC_V_BLANK_START_END, CRTC_V_BLANK_END, tmp1, CRTC_V_BLANK_START, tmp2); } /* TODO: Should we move it to opp? */ /* Combine with below and move YUV/RGB color conversion to SW layer */ static void dce120_timing_generator_program_blank_color( struct timing_generator *tg, const struct tg_color *black_color) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); CRTC_REG_UPDATE_3( CRTC0_CRTC_BLACK_COLOR, CRTC_BLACK_COLOR_B_CB, black_color->color_b_cb, CRTC_BLACK_COLOR_G_Y, black_color->color_g_y, CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr); } /* Combine with above and move YUV/RGB color conversion to SW layer */ static void dce120_timing_generator_set_overscan_color_black( struct timing_generator *tg, const struct tg_color *color) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = 0; CRTC_REG_SET_3( CRTC0_CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_BLUE, color->color_b_cb, CRTC_OVERSCAN_COLOR_GREEN, color->color_g_y, CRTC_OVERSCAN_COLOR_RED, color->color_r_cr); value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_OVERSCAN_COLOR, tg110->offsets.crtc); dm_write_reg_soc15( tg->ctx, mmCRTC0_CRTC_BLACK_COLOR, tg110->offsets.crtc, value); /* This is desirable to have a constant DAC output voltage during the * blank time that is higher than the 0 volt reference level that the * DAC outputs when the NBLANK signal * is asserted low, such as for output to an analog TV. */ dm_write_reg_soc15( tg->ctx, mmCRTC0_CRTC_BLANK_DATA_COLOR, tg110->offsets.crtc, value); /* TO DO we have to program EXT registers and we need to know LB DATA * format because it is used when more 10 , i.e. 12 bits per color * * m_mmDxCRTC_OVERSCAN_COLOR_EXT * m_mmDxCRTC_BLACK_COLOR_EXT * m_mmDxCRTC_BLANK_DATA_COLOR_EXT */ } static void dce120_timing_generator_set_drr( struct timing_generator *tg, const struct drr_params *params) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); if (params != NULL && params->vertical_total_max > 0 && params->vertical_total_min > 0) { CRTC_REG_UPDATE( CRTC0_CRTC_V_TOTAL_MIN, CRTC_V_TOTAL_MIN, params->vertical_total_min - 1); CRTC_REG_UPDATE( CRTC0_CRTC_V_TOTAL_MAX, CRTC_V_TOTAL_MAX, params->vertical_total_max - 1); CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 6, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 1, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 1, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_EN), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0); CRTC_REG_UPDATE( CRTC0_CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_EVENT_MASK, 0x180); } else { CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0); CRTC_REG_UPDATE( CRTC0_CRTC_V_TOTAL_MIN, CRTC_V_TOTAL_MIN, 0); CRTC_REG_UPDATE( CRTC0_CRTC_V_TOTAL_MAX, CRTC_V_TOTAL_MAX, 0); CRTC_REG_UPDATE( CRTC0_CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_EVENT_MASK, 0); } } static void dce120_timing_generator_get_crtc_scanoutpos( struct timing_generator *tg, uint32_t *v_blank_start, uint32_t *v_blank_end, uint32_t *h_position, uint32_t *v_position) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); struct crtc_position position; uint32_t v_blank_start_end = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_V_BLANK_START_END, tg110->offsets.crtc); *v_blank_start = get_reg_field_value(v_blank_start_end, CRTC0_CRTC_V_BLANK_START_END, CRTC_V_BLANK_START); *v_blank_end = get_reg_field_value(v_blank_start_end, CRTC0_CRTC_V_BLANK_START_END, CRTC_V_BLANK_END); dce120_timing_generator_get_crtc_position( tg, &position); *h_position = position.horizontal_count; *v_position = position.vertical_count; } static void dce120_timing_generator_enable_advanced_request( struct timing_generator *tg, bool enable, const struct dc_crtc_timing *timing) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t v_sync_width_and_b_porch = timing->v_total - timing->v_addressable - timing->v_border_bottom - timing->v_front_porch; uint32_t value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_START_LINE_CONTROL, tg110->offsets.crtc); set_reg_field_value( value, enable ? 0 : 1, CRTC0_CRTC_START_LINE_CONTROL, CRTC_LEGACY_REQUESTOR_EN); /* Program advanced line position acc.to the best case from fetching data perspective to hide MC latency * and prefilling Line Buffer in V Blank (to 10 lines as LB can store max 10 lines) */ if (v_sync_width_and_b_porch > 10) v_sync_width_and_b_porch = 10; set_reg_field_value( value, v_sync_width_and_b_porch, CRTC0_CRTC_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_START_LINE_CONTROL, tg110->offsets.crtc, value); } static void dce120_tg_program_blank_color(struct timing_generator *tg, const struct tg_color *black_color) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = 0; CRTC_REG_UPDATE_3( CRTC0_CRTC_BLACK_COLOR, CRTC_BLACK_COLOR_B_CB, black_color->color_b_cb, CRTC_BLACK_COLOR_G_Y, black_color->color_g_y, CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr); value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_BLACK_COLOR, tg110->offsets.crtc); dm_write_reg_soc15( tg->ctx, mmCRTC0_CRTC_BLANK_DATA_COLOR, tg110->offsets.crtc, value); } static void dce120_tg_set_overscan_color(struct timing_generator *tg, const struct tg_color *overscan_color) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); CRTC_REG_SET_3( CRTC0_CRTC_OVERSCAN_COLOR, CRTC_OVERSCAN_COLOR_BLUE, overscan_color->color_b_cb, CRTC_OVERSCAN_COLOR_GREEN, overscan_color->color_g_y, CRTC_OVERSCAN_COLOR_RED, overscan_color->color_r_cr); } static void dce120_tg_program_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing, int vready_offset, int vstartup_start, int vupdate_offset, int vupdate_width, const enum signal_type signal, bool use_vbios) { if (use_vbios) dce110_timing_generator_program_timing_generator(tg, timing); else dce120_timing_generator_program_blanking(tg, timing); } static bool dce120_tg_is_blanked(struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value = dm_read_reg_soc15( tg->ctx, mmCRTC0_CRTC_BLANK_CONTROL, tg110->offsets.crtc); if (get_reg_field_value( value, CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 1 && get_reg_field_value( value, CRTC0_CRTC_BLANK_CONTROL, CRTC_CURRENT_BLANK_STATE) == 1) return true; return false; } static void dce120_tg_set_blank(struct timing_generator *tg, bool enable_blanking) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); CRTC_REG_SET( CRTC0_CRTC_DOUBLE_BUFFER_CONTROL, CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1); if (enable_blanking) CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); else dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_BLANK_CONTROL, tg110->offsets.crtc, 0); } bool dce120_tg_validate_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing); static void dce120_tg_wait_for_state(struct timing_generator *tg, enum crtc_state state) { switch (state) { case CRTC_STATE_VBLANK: dce120_timing_generator_wait_for_vblank(tg); break; case CRTC_STATE_VACTIVE: dce120_timing_generator_wait_for_vactive(tg); break; default: break; } } static void dce120_tg_set_colors(struct timing_generator *tg, const struct tg_color *blank_color, const struct tg_color *overscan_color) { if (blank_color != NULL) dce120_tg_program_blank_color(tg, blank_color); if (overscan_color != NULL) dce120_tg_set_overscan_color(tg, overscan_color); } static void dce120_timing_generator_set_static_screen_control( struct timing_generator *tg, uint32_t event_triggers, uint32_t num_frames) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); // By register spec, it only takes 8 bit value if (num_frames > 0xFF) num_frames = 0xFF; CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_EVENT_MASK, event_triggers, CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames); } static void dce120_timing_generator_set_test_pattern( struct timing_generator *tg, /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode' * because this is not DP-specific (which is probably somewhere in DP * encoder) */ enum controller_dp_test_pattern test_pattern, enum dc_color_depth color_depth) { struct dc_context *ctx = tg->ctx; uint32_t value; struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); enum test_pattern_color_format bit_depth; enum test_pattern_dyn_range dyn_range; enum test_pattern_mode mode; /* color ramp generator mixes 16-bits color */ uint32_t src_bpc = 16; /* requested bpc */ uint32_t dst_bpc; uint32_t index; /* RGB values of the color bars. * Produce two RGB colors: RGB0 - white (all Fs) * and RGB1 - black (all 0s) * (three RGB components for two colors) */ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000}; /* dest color (converted to the specified color format) */ uint16_t dst_color[6]; uint32_t inc_base; /* translate to bit depth */ switch (color_depth) { case COLOR_DEPTH_666: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6; break; case COLOR_DEPTH_888: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; break; case COLOR_DEPTH_101010: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10; break; case COLOR_DEPTH_121212: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12; break; default: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; break; } switch (test_pattern) { case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: { dyn_range = (test_pattern == CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ? TEST_PATTERN_DYN_RANGE_CEA : TEST_PATTERN_DYN_RANGE_VESA); mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; CRTC_REG_UPDATE_2(CRTC0_CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_VRES, 6, CRTC_TEST_PATTERN_HRES, 6); CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_EN, 1, CRTC_TEST_PATTERN_MODE, mode, CRTC_TEST_PATTERN_DYNAMIC_RANGE, dyn_range, CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth); } break; case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS: case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS: { mode = (test_pattern == CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ? TEST_PATTERN_MODE_VERTICALBARS : TEST_PATTERN_MODE_HORIZONTALBARS); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: dst_bpc = 6; break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: dst_bpc = 8; break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: dst_bpc = 10; break; default: dst_bpc = 8; break; } /* adjust color to the required colorFormat */ for (index = 0; index < 6; index++) { /* dst = 2^dstBpc * src / 2^srcBpc = src >> * (srcBpc - dstBpc); */ dst_color[index] = src_color[index] >> (src_bpc - dst_bpc); /* CRTC_TEST_PATTERN_DATA has 16 bits, * lowest 6 are hardwired to ZERO * color bits should be left aligned aligned to MSB * XXXXXXXXXX000000 for 10 bit, * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6 */ dst_color[index] <<= (16 - dst_bpc); } dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS, tg110->offsets.crtc, 0); /* We have to write the mask before data, similar to pipeline. * For example, for 8 bpc, if we want RGB0 to be magenta, * and RGB1 to be cyan, * we need to make 7 writes: * MASK DATA * 000001 00000000 00000000 set mask to R0 * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0 * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0 * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1 * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1 * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1 * 100000 11111111 00000000 B1 255, 0xFF00 * * we will make a loop of 6 in which we prepare the mask, * then write, then prepare the color for next write. * first iteration will write mask only, * but each next iteration color prepared in * previous iteration will be written within new mask, * the last component will written separately, * mask is not changing between 6th and 7th write * and color will be prepared by last iteration */ /* write color, color values mask in CRTC_TEST_PATTERN_MASK * is B1, G1, R1, B0, G0, R0 */ value = 0; for (index = 0; index < 6; index++) { /* prepare color mask, first write PATTERN_DATA * will have all zeros */ set_reg_field_value( value, (1 << index), CRTC0_CRTC_TEST_PATTERN_COLOR, CRTC_TEST_PATTERN_MASK); /* write color component */ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value); /* prepare next color component, * will be written in the next iteration */ set_reg_field_value( value, dst_color[index], CRTC0_CRTC_TEST_PATTERN_COLOR, CRTC_TEST_PATTERN_DATA); } /* write last color component, * it's been already prepared in the loop */ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value); /* enable test pattern */ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_EN, 1, CRTC_TEST_PATTERN_MODE, mode, CRTC_TEST_PATTERN_DYNAMIC_RANGE, 0, CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth); } break; case CONTROLLER_DP_TEST_PATTERN_COLORRAMP: { mode = (bit_depth == TEST_PATTERN_COLOR_FORMAT_BPC_10 ? TEST_PATTERN_MODE_DUALRAMP_RGB : TEST_PATTERN_MODE_SINGLERAMP_RGB); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: dst_bpc = 6; break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: dst_bpc = 8; break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: dst_bpc = 10; break; default: dst_bpc = 8; break; } /* increment for the first ramp for one color gradation * 1 gradation for 6-bit color is 2^10 * gradations in 16-bit color */ inc_base = (src_bpc - dst_bpc); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: { CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC0, inc_base, CRTC_TEST_PATTERN_INC1, 0, CRTC_TEST_PATTERN_HRES, 6, CRTC_TEST_PATTERN_VRES, 6, CRTC_TEST_PATTERN_RAMP0_OFFSET, 0); } break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: { CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC0, inc_base, CRTC_TEST_PATTERN_INC1, 0, CRTC_TEST_PATTERN_HRES, 8, CRTC_TEST_PATTERN_VRES, 6, CRTC_TEST_PATTERN_RAMP0_OFFSET, 0); } break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: { CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS, CRTC_TEST_PATTERN_INC0, inc_base, CRTC_TEST_PATTERN_INC1, inc_base + 2, CRTC_TEST_PATTERN_HRES, 8, CRTC_TEST_PATTERN_VRES, 5, CRTC_TEST_PATTERN_RAMP0_OFFSET, 384 << 6); } break; default: break; } dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, 0); /* enable test pattern */ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_CONTROL, tg110->offsets.crtc, 0); CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL, CRTC_TEST_PATTERN_EN, 1, CRTC_TEST_PATTERN_MODE, mode, CRTC_TEST_PATTERN_DYNAMIC_RANGE, 0, CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth); } break; case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE: { value = 0; dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_CONTROL, tg110->offsets.crtc, value); dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value); dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS, tg110->offsets.crtc, value); } break; default: break; } } static bool dce120_arm_vert_intr( struct timing_generator *tg, uint8_t width) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t v_blank_start, v_blank_end, h_position, v_position; tg->funcs->get_scanoutpos( tg, &v_blank_start, &v_blank_end, &h_position, &v_position); if (v_blank_start == 0 || v_blank_end == 0) return false; CRTC_REG_SET_2( CRTC0_CRTC_VERTICAL_INTERRUPT0_POSITION, CRTC_VERTICAL_INTERRUPT0_LINE_START, v_blank_start, CRTC_VERTICAL_INTERRUPT0_LINE_END, v_blank_start + width); return true; } static bool dce120_is_tg_enabled(struct timing_generator *tg) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value, field; value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CONTROL, tg110->offsets.crtc); field = get_reg_field_value(value, CRTC0_CRTC_CONTROL, CRTC_CURRENT_MASTER_EN_STATE); return field == 1; } static bool dce120_configure_crc(struct timing_generator *tg, const struct crc_params *params) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); /* Cannot configure crc on a CRTC that is disabled */ if (!dce120_is_tg_enabled(tg)) return false; /* First, disable CRC before we configure it. */ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL, tg110->offsets.crtc, 0); if (!params->enable) return true; /* Program frame boundaries */ /* Window A x axis start and end. */ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL, CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start, CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end); /* Window A y axis start and end. */ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL, CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start, CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end); /* Window B x axis start and end. */ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL, CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start, CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end); /* Window B y axis start and end. */ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL, CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start, CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end); /* Set crc mode and selection, and enable. Only using CRC0*/ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL, CRTC_CRC_EN, params->continuous_mode ? 1 : 0, CRTC_CRC0_SELECT, params->selection, CRTC_CRC_EN, 1); return true; } static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t value, field; value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL, tg110->offsets.crtc); field = get_reg_field_value(value, CRTC0_CRTC_CRC_CNTL, CRTC_CRC_EN); /* Early return if CRC is not enabled for this CRTC */ if (!field) return false; value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG, tg110->offsets.crtc); *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR); *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y); value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B, tg110->offsets.crtc); *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB); return true; } static const struct timing_generator_funcs dce120_tg_funcs = { .validate_timing = dce120_tg_validate_timing, .program_timing = dce120_tg_program_timing, .enable_crtc = dce120_timing_generator_enable_crtc, .disable_crtc = dce110_timing_generator_disable_crtc, /* used by enable_timing_synchronization. Not need for FPGA */ .is_counter_moving = dce110_timing_generator_is_counter_moving, /* never be called */ .get_position = dce120_timing_generator_get_crtc_position, .get_frame_count = dce120_timing_generator_get_vblank_counter, .get_scanoutpos = dce120_timing_generator_get_crtc_scanoutpos, .set_early_control = dce120_timing_generator_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = dce120_tg_wait_for_state, .set_blank = dce120_tg_set_blank, .is_blanked = dce120_tg_is_blanked, /* never be called */ .set_colors = dce120_tg_set_colors, .set_overscan_blank_color = dce120_timing_generator_set_overscan_color_black, .set_blank_color = dce120_timing_generator_program_blank_color, .disable_vga = dce120_timing_generator_disable_vga, .did_triggered_reset_occur = dce120_timing_generator_did_triggered_reset_occur, .setup_global_swap_lock = dce120_timing_generator_setup_global_swap_lock, .enable_reset_trigger = dce120_timing_generator_enable_reset_trigger, .disable_reset_trigger = dce120_timing_generator_disable_reset_trigger, .tear_down_global_swap_lock = dce120_timing_generator_tear_down_global_swap_lock, .enable_advanced_request = dce120_timing_generator_enable_advanced_request, .set_drr = dce120_timing_generator_set_drr, .get_last_used_drr_vtotal = NULL, .set_static_screen_control = dce120_timing_generator_set_static_screen_control, .set_test_pattern = dce120_timing_generator_set_test_pattern, .arm_vert_intr = dce120_arm_vert_intr, .is_tg_enabled = dce120_is_tg_enabled, .configure_crc = dce120_configure_crc, .get_crc = dce120_get_crc, }; void dce120_timing_generator_construct( struct dce110_timing_generator *tg110, struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { tg110->controller_id = CONTROLLER_ID_D0 + instance; tg110->base.inst = instance; tg110->offsets = *offsets; tg110->base.funcs = &dce120_tg_funcs; tg110->base.ctx = ctx; tg110->base.bp = ctx->dc_bios; tg110->max_h_total = CRTC0_CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1; tg110->max_v_total = CRTC0_CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1; /*//CRTC requires a minimum HBLANK = 32 pixels and o * Minimum HSYNC = 8 pixels*/ tg110->min_h_blank = 32; /*DCE12_CRTC_Block_ARch.doc*/ tg110->min_h_front_porch = 0; tg110->min_h_back_porch = 0; tg110->min_h_sync_width = 4; tg110->min_v_sync_width = 1; tg110->min_v_blank = 3; }
linux-master
drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "dcn301_dccg.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) #define REG(reg) \ (dccg_dcn->regs->reg) #undef FN #define FN(reg_name, field_name) \ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name #define CTX \ dccg_dcn->base.ctx #define DC_LOGGER \ dccg->ctx->logger static const struct dccg_funcs dccg301_funcs = { .update_dpp_dto = dccg2_update_dpp_dto, .get_dccg_ref_freq = dccg2_get_dccg_ref_freq, .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .otg_add_pixel = dccg2_otg_add_pixel, .otg_drop_pixel = dccg2_otg_drop_pixel, .dccg_init = dccg2_init }; struct dccg *dccg301_create( struct dc_context *ctx, const struct dccg_registers *regs, const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask) { struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); struct dccg *base; if (dccg_dcn == NULL) { BREAK_TO_DEBUGGER(); return NULL; } base = &dccg_dcn->base; base->ctx = ctx; base->funcs = &dccg301_funcs; dccg_dcn->regs = regs; dccg_dcn->dccg_shift = dccg_shift; dccg_dcn->dccg_mask = dccg_mask; return &dccg_dcn->base; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "dcn301_optc.h" #include "dc.h" #include "dcn_calc_math.h" #include "dc_dmub_srv.h" #include "dml/dcn30/dcn30_fpu.h" #include "dc_trace.h" #define REG(reg)\ optc1->tg_regs->reg #define CTX \ optc1->base.ctx #undef FN #define FN(reg_name, field_name) \ optc1->tg_shift->field_name, optc1->tg_mask->field_name /** * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. * * @optc: timing_generator instance. * @params: parameters used for Dynamic Refresh Rate. */ void optc301_set_drr( struct timing_generator *optc, const struct drr_params *params) { struct optc *optc1 = DCN10TG_FROM_TG(optc); if (params != NULL && params->vertical_total_max > 0 && params->vertical_total_min > 0) { if (params->vertical_total_mid != 0) { REG_SET(OTG_V_TOTAL_MID, 0, OTG_V_TOTAL_MID, params->vertical_total_mid - 1); REG_UPDATE_2(OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, OTG_VTOTAL_MID_FRAME_NUM, (uint8_t)params->vertical_total_mid_frame_num); } optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); REG_UPDATE_5(OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, 1, OTG_V_TOTAL_MAX_SEL, 1, OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK_EN, 0, OTG_SET_V_TOTAL_MIN_MASK, 0); // Setup manual flow control for EOF via TRIG_A optc->funcs->setup_manual_trigger(optc); } else { REG_UPDATE_4(OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, 0, OTG_V_TOTAL_MIN_SEL, 0, OTG_V_TOTAL_MAX_SEL, 0, OTG_FORCE_LOCK_ON_EVENT, 0); optc->funcs->set_vtotal_min_max(optc, 0, 0); } } void optc301_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_SET_8(OTG_TRIGA_CNTL, 0, OTG_TRIGA_SOURCE_SELECT, 21, OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, OTG_TRIGA_POLARITY_SELECT, 0, OTG_TRIGA_FREQUENCY_SELECT, 0, OTG_TRIGA_DELAY, 0, OTG_TRIGA_CLEAR, 1); } static struct timing_generator_funcs dcn30_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, .program_global_sync = optc1_program_global_sync, .enable_crtc = optc2_enable_crtc, .disable_crtc = optc1_disable_crtc, /* used by enable_timing_synchronization. Not need for FPGA */ .is_counter_moving = optc1_is_counter_moving, .get_position = optc1_get_position, .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, .set_blank_color = optc3_program_blank_color, .did_triggered_reset_occur = optc1_did_triggered_reset_occur, .triplebuffer_lock = optc3_triplebuffer_lock, .triplebuffer_unlock = optc2_triplebuffer_unlock, .enable_reset_trigger = optc1_enable_reset_trigger, .enable_crtc_reset = optc1_enable_crtc_reset, .disable_reset_trigger = optc1_disable_reset_trigger, .lock = optc3_lock, .unlock = optc1_unlock, .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, .enable_optc_clock = optc1_enable_optc_clock, .set_drr = optc301_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc3_set_vtotal_min_max, .set_static_screen_control = optc1_set_static_screen_control, .program_stereo = optc1_program_stereo, .is_stereo_left_eye = optc1_is_stereo_left_eye, .tg_init = optc3_tg_init, .is_tg_enabled = optc1_is_tg_enabled, .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, .clear_optc_underflow = optc1_clear_optc_underflow, .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc3_set_odm_combine, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, .set_drr_trigger_window = optc3_set_drr_trigger_window, .set_vtotal_change_limit = optc3_set_vtotal_change_limit, .set_gsl = optc2_set_gsl, .set_gsl_source_select = optc2_set_gsl_source_select, .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc301_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, }; void dcn301_timing_generator_init(struct optc *optc1) { optc1->base.funcs = &dcn30_tg_funcs; optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; optc1->min_h_blank = 32; optc1->min_v_blank = 3; optc1->min_v_blank_interlace = 5; optc1->min_h_sync_width = 4; optc1->min_v_sync_width = 1; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "core_types.h" #include "dce/dce_hwseq.h" #include "dcn301_hwseq.h" #include "reg_helper.h" #define DC_LOGGER_INIT(logger) #define CTX \ hws->ctx #define REG(reg)\ hws->regs->reg #undef FN #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name
linux-master
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hwseq.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dcn301_hubbub.h" #include "reg_helper.h" #define REG(reg)\ hubbub1->regs->reg #define DC_LOGGER \ hubbub1->base.ctx->logger #define CTX \ hubbub1->base.ctx #undef FN #define FN(reg_name, field_name) \ hubbub1->shifts->field_name, hubbub1->masks->field_name #define REG(reg)\ hubbub1->regs->reg #define CTX \ hubbub1->base.ctx #undef FN #define FN(reg_name, field_name) \ hubbub1->shifts->field_name, hubbub1->masks->field_name static const struct hubbub_funcs hubbub301_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub21_init_dchub, .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub3_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, .wm_read_state = hubbub21_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub3_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .hubbub_read_state = hubbub2_read_state, }; void hubbub301_construct(struct dcn20_hubbub *hubbub3, struct dc_context *ctx, const struct dcn_hubbub_registers *hubbub_regs, const struct dcn_hubbub_shift *hubbub_shift, const struct dcn_hubbub_mask *hubbub_mask) { hubbub3->base.ctx = ctx; hubbub3->base.funcs = &hubbub301_funcs; hubbub3->regs = hubbub_regs; hubbub3->shifts = hubbub_shift; hubbub3->masks = hubbub_mask; hubbub3->debug_test_index_pstate = 0xB; hubbub3->detile_buf_size = 184 * 1024; /* 184KB for DCN3 */ }
linux-master
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
/* * Copyright 2016-2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20/dcn20_hwseq.h" #include "dcn21/dcn21_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dcn301_hwseq.h" #include "dcn301_init.h" static const struct hw_sequencer_funcs dcn301_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, .power_down_on_boot = dcn10_power_down_on_boot, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, .program_output_csc = dcn20_program_output_csc, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, .update_info_frame = dcn30_update_info_frame, .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, .enable_stream = dcn20_enable_stream, .disable_stream = dce110_disable_stream, .unblank_stream = dcn20_unblank_stream, #ifdef FREESYNC_POWER_OPTIMIZE .are_streams_coarse_grain_aligned = dcn20_are_streams_coarse_grain_aligned, #endif .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn10_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, .program_triplebuffer = dcn20_program_triple_buffer, .enable_writeback = dcn30_enable_writeback, .disable_writeback = dcn30_disable_writeback, .update_writeback = dcn30_update_writeback, .mmhubbub_warmup = dcn30_mmhubbub_warmup, .dmdata_status_done = dcn20_dmdata_status_done, .program_dmdata_engine = dcn30_program_dmdata_engine, .set_dmdata_attributes = dcn20_set_dmdata_attributes, .init_sys_ctx = dcn20_init_sys_ctx, .init_vm_ctx = dcn20_init_vm_ctx, .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn301_private_funcs = { .init_pipes = dcn10_init_pipes, .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn30_set_input_transfer_func, .set_output_transfer_func = dcn30_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn20_init_blank, .disable_vga = dcn20_disable_vga, .bios_golden_init = dcn10_bios_golden_init, .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn10_plane_atomic_power_down, .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .set_hdr_multiplier = dcn10_set_hdr_multiplier, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn30_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, }; void dcn301_hw_sequencer_construct(struct dc *dc) { dc->hwss = dcn301_funcs; dc->hwseq->funcs = dcn301_private_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "link_encoder.h" #include "dcn301_dio_link_encoder.h" #include "stream_encoder.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" #define CTX \ enc10->base.ctx #define DC_LOGGER \ enc10->base.ctx->logger #define REG(reg)\ (enc10->link_regs->reg) #undef FN #define FN(reg_name, field_name) \ enc10->link_shift->field_name, enc10->link_mask->field_name #define IND_REG(index) \ (enc10->link_regs->index) static const struct link_encoder_funcs dcn301_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = dcn10_link_encoder_validate_output_with_stream, .hw_init = enc3_hw_init, .setup = dcn10_link_encoder_setup, .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, .enable_dp_output = dcn20_link_encoder_enable_dp_output, .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output, .disable_output = dcn10_link_encoder_disable_output, .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, .update_mst_stream_allocation_table = dcn10_link_encoder_update_mst_stream_allocation_table, .psr_program_dp_dphy_fast_training = dcn10_psr_program_dp_dphy_fast_training, .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, .enable_hpd = dcn10_link_encoder_enable_hpd, .disable_hpd = dcn10_link_encoder_disable_hpd, .is_dig_enabled = dcn10_is_dig_enabled, .destroy = dcn10_link_encoder_destroy, .fec_set_enable = enc2_fec_set_enable, .fec_set_ready = enc2_fec_set_ready, .fec_is_active = enc2_fec_is_active, .get_dig_frontend = dcn10_get_dig_frontend, .get_dig_mode = dcn10_get_dig_mode, .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode, .get_max_link_cap = dcn20_link_encoder_get_max_link_cap, }; void dcn301_link_encoder_construct( struct dcn20_link_encoder *enc20, const struct encoder_init_data *init_data, const struct encoder_feature_support *enc_features, const struct dcn10_link_enc_registers *link_regs, const struct dcn10_link_enc_aux_registers *aux_regs, const struct dcn10_link_enc_hpd_registers *hpd_regs, const struct dcn10_link_enc_shift *link_shift, const struct dcn10_link_enc_mask *link_mask) { struct bp_encoder_cap_info bp_cap_info = {0}; const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; enum bp_result result = BP_RESULT_OK; struct dcn10_link_encoder *enc10 = &enc20->enc10; enc10->base.funcs = &dcn301_link_enc_funcs; enc10->base.ctx = init_data->ctx; enc10->base.id = init_data->encoder; enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; enc10->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin * while doing the DP sink detect */ /* if (dal_adapter_service_is_feature_supported(as, FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) enc10->base.features.flags.bits. DP_SINK_DETECT_POLL_DATA_PIN = true;*/ enc10->base.output_signals = SIGNAL_TYPE_DVI_SINGLE_LINK | SIGNAL_TYPE_DVI_DUAL_LINK | SIGNAL_TYPE_LVDS | SIGNAL_TYPE_DISPLAY_PORT | SIGNAL_TYPE_DISPLAY_PORT_MST | SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. * Prefer DIG assignment is decided by board design. * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design * and VBIOS will filter out 7 UNIPHY for DCE 8.0. * By this, adding DIGG should not hurt DCE 8.0. * This will let DCE 8.1 share DCE 8.0 as much as possible */ enc10->link_regs = link_regs; enc10->aux_regs = aux_regs; enc10->hpd_regs = hpd_regs; enc10->link_shift = link_shift; enc10->link_mask = link_mask; switch (enc10->base.transmitter) { case TRANSMITTER_UNIPHY_A: enc10->base.preferred_engine = ENGINE_ID_DIGA; break; case TRANSMITTER_UNIPHY_B: enc10->base.preferred_engine = ENGINE_ID_DIGB; break; case TRANSMITTER_UNIPHY_C: enc10->base.preferred_engine = ENGINE_ID_DIGC; break; case TRANSMITTER_UNIPHY_D: enc10->base.preferred_engine = ENGINE_ID_DIGD; break; case TRANSMITTER_UNIPHY_E: enc10->base.preferred_engine = ENGINE_ID_DIGE; break; case TRANSMITTER_UNIPHY_F: enc10->base.preferred_engine = ENGINE_ID_DIGF; break; case TRANSMITTER_UNIPHY_G: enc10->base.preferred_engine = ENGINE_ID_DIGG; break; default: ASSERT_CRITICAL(false); enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; } /* default to one to mirror Windows behavior */ enc10->base.features.flags.bits.HDMI_6GB_EN = 1; result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, enc10->base.id, &bp_cap_info); /* Override features with DCE-specific values */ if (result == BP_RESULT_OK) { enc10->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc10->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; enc10->base.features.flags.bits.DP_IS_USB_C = bp_cap_info.DP_IS_USB_C; } else { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); } if (enc10->base.ctx->dc->debug.hdmi20_disable) { enc10->base.features.flags.bits.HDMI_6GB_EN = 0; } }
linux-master
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
/* * Copyright 2019-2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "dcn301_init.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn30/dcn30_resource.h" #include "dcn301_resource.h" #include "dcn20/dcn20_resource.h" #include "dcn10/dcn10_ipp.h" #include "dcn301/dcn301_hubbub.h" #include "dcn30/dcn30_mpc.h" #include "dcn30/dcn30_hubp.h" #include "irq/dcn30/irq_service_dcn30.h" #include "dcn30/dcn30_dpp.h" #include "dcn301/dcn301_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn30/dcn30_opp.h" #include "dcn20/dcn20_dsc.h" #include "dcn30/dcn30_vpg.h" #include "dcn30/dcn30_afmt.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn301/dcn301_dccg.h" #include "dcn10/dcn10_resource.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn301/dcn301_dio_link_encoder.h" #include "dcn301_panel_cntl.h" #include "vangogh_ip_offset.h" #include "dcn30/dcn30_dwb.h" #include "dcn30/dcn30_mmhubbub.h" #include "dcn/dcn_3_0_1_offset.h" #include "dcn/dcn_3_0_1_sh_mask.h" #include "nbio/nbio_7_2_0_offset.h" #include "dpcs/dpcs_3_0_0_offset.h" #include "dpcs/dpcs_3_0_0_sh_mask.h" #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dml/dcn30/dcn30_fpu.h" #include "dml/dcn30/display_mode_vba_30.h" #include "dml/dcn301/dcn301_fpu.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" #include "amdgpu_socbb.h" #define TO_DCN301_RES_POOL(pool)\ container_of(pool, struct dcn301_resource_pool, base) #define DC_LOGGER_INIT(logger) enum dcn301_clk_src_array_id { DCN301_CLK_SRC_PLL0, DCN301_CLK_SRC_PLL1, DCN301_CLK_SRC_PLL2, DCN301_CLK_SRC_PLL3, DCN301_CLK_SRC_TOTAL }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg #define BASE(seg) BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRI2(reg_name, block, id)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRIR(var_name, reg_name, block, id)\ .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRII2(reg_name_pre, reg_name_post, id)\ .reg_name_pre ## _ ## reg_name_post[id] = BASE(mm ## reg_name_pre \ ## id ## _ ## reg_name_post ## _BASE_IDX) + \ mm ## reg_name_pre ## id ## _ ## reg_name_post #define SRII_MPC_RMU(reg_name, block, id)\ .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRII_DWB(reg_name, temp_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## temp_name #define SF_DWB2(reg_name, block, id, field_name, post_fix) \ .field_name = reg_name ## __ ## field_name ## post_fix #define DCCG_SRII(reg_name, block, id)\ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define VUPDATE_SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ mm ## reg_name ## _ ## block ## id /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIO_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ regBIF_BX0_ ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(regMM ## reg_name ## _BASE_IDX) + \ regMM ## reg_name /* CLOCK */ #define CLK_BASE_INNER(seg) \ CLK_BASE__INST0_SEG ## seg #define CLK_BASE(seg) \ CLK_BASE_INNER(seg) #define CLK_SRI(reg_name, block, inst)\ .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## _ ## inst ## _ ## reg_name static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN3_01(index, pllid),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; #define abm_regs(id)\ [id] = {\ ABM_DCN301_REG_LIST(id)\ } static const struct dce_abm_registers abm_regs[] = { abm_regs(0), abm_regs(1), abm_regs(2), abm_regs(3), }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN30(_MASK) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6) }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define vpg_regs(id)\ [id] = {\ VPG_DCN3_REG_LIST(id)\ } static const struct dcn30_vpg_registers vpg_regs[] = { vpg_regs(0), vpg_regs(1), vpg_regs(2), vpg_regs(3), }; static const struct dcn30_vpg_shift vpg_shift = { DCN3_VPG_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_vpg_mask vpg_mask = { DCN3_VPG_MASK_SH_LIST(_MASK) }; #define afmt_regs(id)\ [id] = {\ AFMT_DCN3_REG_LIST(id)\ } static const struct dcn30_afmt_registers afmt_regs[] = { afmt_regs(0), afmt_regs(1), afmt_regs(2), afmt_regs(3), }; static const struct dcn30_afmt_shift afmt_shift = { DCN3_AFMT_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_afmt_mask afmt_mask = { DCN3_AFMT_MASK_SH_LIST(_MASK) }; #define stream_enc_regs(id)\ [id] = {\ SE_DCN3_REG_LIST(id)\ } static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define aux_regs(id)\ [id] = {\ DCN2_AUX_REG_LIST(id)\ } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), }; #define link_regs(id, phyid)\ [id] = {\ LE_DCN301_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ DPCS_DCN2_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dce110_aux_registers_shift aux_shift = { DCN_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCN_AUX_MASK_SH_LIST(_MASK) }; static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0, A), link_regs(1, B), link_regs(2, C), link_regs(3, D), }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN301(__SHIFT),\ DPCS_DCN2_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN301(_MASK),\ DPCS_DCN2_MASK_SH_LIST(_MASK) }; #define panel_cntl_regs(id)\ [id] = {\ DCN301_PANEL_CNTL_REG_LIST(id),\ } static const struct dce_panel_cntl_registers panel_cntl_regs[] = { panel_cntl_regs(0), panel_cntl_regs(1), }; static const struct dcn301_panel_cntl_shift panel_cntl_shift = { DCN301_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dcn301_panel_cntl_mask panel_cntl_mask = { DCN301_PANEL_CNTL_MASK_SH_LIST(_MASK) }; #define dpp_regs(id)\ [id] = {\ DPP_REG_LIST_DCN30(id),\ } static const struct dcn3_dpp_registers dpp_regs[] = { dpp_regs(0), dpp_regs(1), dpp_regs(2), dpp_regs(3), }; static const struct dcn3_dpp_shift tf_shift = { DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) }; static const struct dcn3_dpp_mask tf_mask = { DPP_REG_LIST_SH_MASK_DCN30(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_REG_LIST_DCN30(id),\ } static const struct dcn20_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), }; static const struct dcn20_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn20_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN20(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST0(id), \ .AUXN_IMPCAL = 0, \ .AUXP_IMPCAL = 0, \ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), }; #define dwbc_regs_dcn3(id)\ [id] = {\ DWBC_COMMON_REG_LIST_DCN30(id),\ } static const struct dcn30_dwbc_registers dwbc30_regs[] = { dwbc_regs_dcn3(0), }; static const struct dcn30_dwbc_shift dwbc30_shift = { DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_dwbc_mask dwbc30_mask = { DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define mcif_wb_regs_dcn3(id)\ [id] = {\ MCIF_WB_COMMON_REG_LIST_DCN30(id),\ } static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { mcif_wb_regs_dcn3(0) }; static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ } static const struct dcn20_dsc_registers dsc_regs[] = { dsc_regsDCN20(0), dsc_regsDCN20(1), dsc_regsDCN20(2), }; static const struct dcn20_dsc_shift dsc_shift = { DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; static const struct dcn30_mpc_registers mpc_regs = { MPC_REG_LIST_DCN3_0(0), MPC_REG_LIST_DCN3_0(1), MPC_REG_LIST_DCN3_0(2), MPC_REG_LIST_DCN3_0(3), MPC_OUT_MUX_REG_LIST_DCN3_0(0), MPC_OUT_MUX_REG_LIST_DCN3_0(1), MPC_OUT_MUX_REG_LIST_DCN3_0(2), MPC_OUT_MUX_REG_LIST_DCN3_0(3), MPC_RMU_GLOBAL_REG_LIST_DCN3AG, MPC_RMU_REG_LIST_DCN3AG(0), MPC_RMU_REG_LIST_DCN3AG(1), MPC_DWB_MUX_REG_LIST_DCN3_0(0), }; static const struct dcn30_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define optc_regs(id)\ [id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)} static const struct dcn_optc_registers optc_regs[] = { optc_regs(0), optc_regs(1), optc_regs(2), optc_regs(3), }; static const struct dcn_optc_shift optc_shift = { OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn_optc_mask optc_mask = { OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define hubp_regs(id)\ [id] = {\ HUBP_REG_LIST_DCN30(id)\ } static const struct dcn_hubp2_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1), hubp_regs(2), hubp_regs(3), }; static const struct dcn_hubp2_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn_hubp2_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN30(_MASK) }; static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN301(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN301(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN301(_MASK) }; static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN301() }; static const struct dccg_shift dccg_shift = { DCCG_MASK_SH_LIST_DCN301(__SHIFT) }; static const struct dccg_mask dccg_mask = { DCCG_MASK_SH_LIST_DCN301(_MASK) }; static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN301_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN301_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN301_MASK_SH_LIST(_MASK) }; #define vmid_regs(id)\ [id] = {\ DCN20_VMID_REG_LIST(id)\ } static const struct dcn_vmid_registers vmid_regs[] = { vmid_regs(0), vmid_regs(1), vmid_regs(2), vmid_regs(3), vmid_regs(4), vmid_regs(5), vmid_regs(6), vmid_regs(7), vmid_regs(8), vmid_regs(9), vmid_regs(10), vmid_regs(11), vmid_regs(12), vmid_regs(13), vmid_regs(14), vmid_regs(15) }; static const struct dcn20_vmid_shift vmid_shifts = { DCN20_VMID_MASK_SH_LIST(__SHIFT) }; static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; static struct resource_caps res_cap_dcn301 = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, .num_audio = 4, .num_stream_encoder = 4, .num_pll = 4, .num_dwb = 1, .num_ddc = 4, .num_vmid = 16, .num_mpc_3dlut = 2, .num_dsc = 3, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true, .p010 = true, .ayuv = false, }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 16000 }, /* 6:1 downscaling ratio: 1000/6 = 166.666 */ .max_downscale_factor = { .argb8888 = 167, .nv12 = 167, .fp16 = 167 }, 64, 64 }; static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_dpp_power_gate = false, .disable_hubp_power_gate = false, .disable_clock_gate = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, .max_downscale_src_width = 7680,/*upto 8K*/ .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable .dmub_command_table = true, .use_max_lb = false, .exit_idle_opt_for_cursor_updates = true }; static void dcn301_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); *dpp = NULL; } static struct dpp *dcn301_dpp_create(struct dc_context *ctx, uint32_t inst) { struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); if (!dpp) return NULL; if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) return &dpp->base; BREAK_TO_DEBUGGER(); kfree(dpp); return NULL; } static struct output_pixel_processor *dcn301_opp_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } static struct dce_aux *dcn301_aux_engine_create(struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; static struct dce_i2c_hw *dcn301_i2c_hw_create(struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct mpc *dcn301_mpc_create( struct dc_context *ctx, int num_mpcc, int num_rmu) { struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); if (!mpc30) return NULL; dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); return &mpc30->base; } static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) { int i; struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub3) return NULL; hubbub301_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); for (i = 0; i < res_cap_dcn301.num_vmid; i++) { struct dcn20_vmid *vmid = &hubbub3->vmid[i]; vmid->ctx = ctx; vmid->regs = &vmid_regs[i]; vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } hubbub3->num_vmid = res_cap_dcn301.num_vmid; return &hubbub3->base; } static struct timing_generator *dcn301_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &optc_regs[instance]; tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; dcn301_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; static struct link_encoder *dcn301_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; dcn301_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc20->enc10.base; } static struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dcn301_panel_cntl *panel_cntl = kzalloc(sizeof(struct dcn301_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dcn301_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } #define CTX ctx #define REG(reg_name) \ (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = REG_READ(CC_DC_PIPE_DIS); /* RV1 support max 4 pipes */ value = value & 0xf; return value; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *dcn301_create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct vpg *dcn301_vpg_create( struct dc_context *ctx, uint32_t inst) { struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); if (!vpg3) return NULL; vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); return &vpg3->base; } static struct afmt *dcn301_afmt_create( struct dc_context *ctx, uint32_t inst) { struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); if (!afmt3) return NULL; afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); return &afmt3->base; } static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; struct afmt *afmt; int vpg_inst; int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ if (eng_id <= ENGINE_ID_DIGF) { vpg_inst = eng_id; afmt_inst = eng_id; } else return NULL; enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn301_vpg_create(ctx, vpg_inst); afmt = dcn301_afmt_create(ctx, afmt_inst); if (!enc1 || !vpg || !afmt) { kfree(enc1); kfree(vpg); kfree(afmt); return NULL; } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static struct dce_hwseq *dcn301_hwseq_create(struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn301_create_audio, .create_stream_encoder = dcn301_stream_encoder_create, .create_hwseq = dcn301_hwseq_create, }; static void dcn301_destruct(struct dcn301_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { if (pool->base.stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); pool->base.stream_enc[i]->vpg = NULL; } if (pool->base.stream_enc[i]->afmt != NULL) { kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); pool->base.stream_enc[i]->afmt = NULL; } kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(pool->base.hubbub); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.dpps[i] != NULL) dcn301_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN20_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_opp; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dwb; i++) { if (pool->base.dwbc[i] != NULL) { kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); pool->base.dwbc[i] = NULL; } if (pool->base.mcif_wb[i] != NULL) { kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); pool->base.mcif_wb[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn20_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { if (pool->base.mpc_lut[i] != NULL) { dc_3dlut_func_release(pool->base.mpc_lut[i]); pool->base.mpc_lut[i] = NULL; } if (pool->base.mpc_shaper[i] != NULL) { dc_transfer_func_release(pool->base.mpc_shaper[i]); pool->base.mpc_shaper[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn20_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.multiple_abms[i] != NULL) dce_abm_destroy(&pool->base.multiple_abms[i]); } if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); } static struct hubp *dcn301_hubp_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; if (hubp3_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) return &hubp2->base; BREAK_TO_DEBUGGER(); kfree(hubp2); return NULL; } static bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; for (i = 0; i < pipe_count; i++) { struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); if (!dwbc30) { dm_error("DC: failed to create dwbc30!\n"); return false; } dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); pool->dwbc[i] = &dwbc30->base; } return true; } static bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; for (i = 0; i < pipe_count; i++) { struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); if (!mcif_wb30) { dm_error("DC: failed to create mcif_wb30!\n"); return false; } dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); pool->mcif_wb[i] = &mcif_wb30->base; } return true; } static struct display_stream_compressor *dcn301_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); return NULL; } dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } static void dcn301_destroy_resource_pool(struct resource_pool **pool) { struct dcn301_resource_pool *dcn301_pool = TO_DCN301_RES_POOL(*pool); dcn301_destruct(dcn301_pool); kfree(dcn301_pool); *pool = NULL; } static struct clock_source *dcn301_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn301_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; if (ASICREV_IS_VANGOGH(hw_internal_rev)) return true; return false; } static bool init_soc_bounding_box(struct dc *dc, struct dcn301_resource_pool *pool) { struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_01_soc; struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_01_ip; DC_LOGGER_INIT(dc->ctx->logger); if (!is_soc_bounding_box_valid(dc)) { DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); return false; } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = {0}; if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { DC_FP_START(); dcn301_fpu_init_soc_bounding_box(bb_info); DC_FP_END(); } } return true; } static void set_wm_ranges( struct pp_smu_funcs *pp_smu, struct _vcs_dpi_soc_bounding_box_st *loaded_bb) { struct pp_smu_wm_range_sets ranges = {0}; int i; ranges.num_reader_wm_sets = 0; if (loaded_bb->num_states == 1) { ranges.reader_wm_sets[0].wm_inst = 0; ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.num_reader_wm_sets = 1; } else if (loaded_bb->num_states > 1) { for (i = 0; i < 4 && i < loaded_bb->num_states; i++) { ranges.reader_wm_sets[i].wm_inst = i; ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; DC_FP_START(); dcn301_fpu_set_wm_ranges(i, &ranges, loaded_bb); DC_FP_END(); ranges.num_reader_wm_sets = i + 1; } ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; } ranges.num_writer_wm_sets = 1; ranges.writer_wm_sets[0].wm_inst = 0; ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges); } static void dcn301_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel) { DC_FP_START(); dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel); DC_FP_END(); } static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, .set_mcif_arb_params = dcn30_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, .update_bw_bounding_box = dcn301_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state }; static bool dcn301_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn301_resource_pool *pool) { int i, j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; uint32_t pipe_fuses = read_pipe_fuses(ctx); uint32_t num_pipes = 0; DC_LOGGER_INIT(dc->ctx->logger); ctx->dc_bios->regs = &bios_regs; if (dc->ctx->asic_id.chip_id == DEVICE_ID_VGH_1435) res_cap_dcn301.num_pll = 2; pool->base.res_cap = &res_cap_dcn301; pool->base.funcs = &dcn301_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = pool->base.res_cap->num_timing_generator; pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a enabled by default*/ dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.max_slave_planes = 2; dc->caps.max_slave_yuv_planes = 2; dc->caps.max_slave_rgb_planes = 2; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.input_lut_shared = 0; dc->caps.color.dpp.icsc = 1; dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr dc->caps.color.dpp.dgam_rom_caps.srgb = 1; dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; dc->caps.color.dpp.dgam_rom_caps.pq = 1; dc->caps.color.dpp.dgam_rom_caps.hlg = 1; dc->caps.color.dpp.post_csc = 1; dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; dc->caps.color.dpp.hw_3d_lut = 1; dc->caps.color.dpp.ogam_ram = 1; // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.dpp.ogam_rom_caps.pq = 0; dc->caps.color.dpp.ogam_rom_caps.hlg = 0; dc->caps.color.dpp.ocsc = 0; dc->caps.color.mpc.gamut_remap = 1; dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 dc->caps.color.mpc.ogam_ram = 1; dc->caps.color.mpc.ogam_rom_caps.srgb = 0; dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; /* read VBIOS LTTPR caps */ if (ctx->dc_bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; uint8_t is_vbios_lttpr_enable = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; } if (ctx->dc_bios->funcs->get_lttpr_interop) { enum bp_result bp_query_result; uint8_t is_vbios_interop_enabled = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); /************************************************* * Create resources * *************************************************/ /* Clock Sources for Pixel Clock*/ pool->base.clock_sources[DCN301_CLK_SRC_PLL0] = dcn301_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN301_CLK_SRC_PLL1] = dcn301_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCN301_CLK_SRC_PLL2] = dcn301_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCN301_CLK_SRC_PLL3] = dcn301_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clk_src_count = DCN301_CLK_SRC_TOTAL; /* todo: not reuse phy_pll registers */ pool->base.dp_clock_source = dcn301_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } } /* DCCG */ pool->base.dccg = dccg301_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); if (pool->base.dccg == NULL) { dm_error("DC: failed to create dccg!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } init_soc_bounding_box(dc, pool); if (!dc->debug.disable_pplib_wm_range && pool->base.pp_smu->nv_funcs.set_wm_ranges) set_wm_ranges(pool->base.pp_smu, &dcn3_01_soc); num_pipes = dcn3_01_ip.max_num_dpp; for (i = 0; i < dcn3_01_ip.max_num_dpp; i++) if (pipe_fuses & 1 << i) num_pipes--; dcn3_01_ip.max_num_dpp = num_pipes; dcn3_01_ip.max_num_otg = num_pipes; dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); /* IRQ */ init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn30_create(&init_data); if (!pool->base.irqs) goto create_fail; /* HUBBUB */ pool->base.hubbub = dcn301_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto create_fail; } j = 0; /* HUBPs, DPPs, OPPs and TGs */ for (i = 0; i < pool->base.pipe_count; i++) { /* if pipe is disabled, skip instance of HW pipe, * i.e, skip ASIC register instance */ if ((pipe_fuses & (1 << i)) != 0) { DC_LOG_DEBUG("%s: fusing pipe %d\n", __func__, i); continue; } pool->base.hubps[j] = dcn301_hubp_create(ctx, i); if (pool->base.hubps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create hubps!\n"); goto create_fail; } pool->base.dpps[j] = dcn301_dpp_create(ctx, i); if (pool->base.dpps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpps!\n"); goto create_fail; } pool->base.opps[j] = dcn301_opp_create(ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto create_fail; } pool->base.timing_generators[j] = dcn301_timing_generator_create(ctx, i); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; } j++; } pool->base.timing_generator_count = j; pool->base.pipe_count = j; pool->base.mpcc_count = j; /* ABM (or ABMs for NV2x) */ /* TODO: */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); if (pool->base.multiple_abms[i] == NULL) { dm_error("DC: failed to create abm for pipe %d!\n", i); BREAK_TO_DEBUGGER(); goto create_fail; } } /* MPC and DSC */ pool->base.mpc = dcn301_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto create_fail; } for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn301_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create display stream compressor %d!\n", i); goto create_fail; } } /* DWB and MMHUBBUB */ if (!dcn301_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dwbc!\n"); goto create_fail; } if (!dcn301_mmhubbub_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mcif_wb!\n"); goto create_fail; } /* AUX and I2C */ for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn301_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto create_fail; } pool->base.hw_i2cs[i] = dcn301_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto create_fail; } pool->base.sw_i2cs[i] = NULL; } /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ dcn301_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; return true; create_fail: dcn301_destruct(pool); return false; } struct resource_pool *dcn301_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn301_resource_pool *pool = kzalloc(sizeof(struct dcn301_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dcn301_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); kfree(pool); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "dc_dmub_srv.h" #include "dcn301_panel_cntl.h" #include "atom.h" #define TO_DCN301_PANEL_CNTL(panel_cntl)\ container_of(panel_cntl, struct dcn301_panel_cntl, base) #define CTX \ dcn301_panel_cntl->base.ctx #define DC_LOGGER \ dcn301_panel_cntl->base.ctx->logger #define REG(reg)\ dcn301_panel_cntl->regs->reg #undef FN #define FN(reg_name, field_name) \ dcn301_panel_cntl->shift->field_name, dcn301_panel_cntl->mask->field_name static unsigned int dcn301_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) { uint64_t current_backlight; uint32_t round_result; uint32_t bl_period, bl_int_count; uint32_t bl_pwm, fractional_duty_cycle_en; uint32_t bl_period_mask, bl_pwm_mask; struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period); REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count); REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &bl_pwm); REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en); if (bl_int_count == 0) bl_int_count = 16; bl_period_mask = (1 << bl_int_count) - 1; bl_period &= bl_period_mask; bl_pwm_mask = bl_period_mask << (16 - bl_int_count); if (fractional_duty_cycle_en == 0) bl_pwm &= bl_pwm_mask; else bl_pwm &= 0xFFFF; current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count); if (bl_period == 0) bl_period = 0xFFFF; current_backlight = div_u64(current_backlight, bl_period); current_backlight = (current_backlight + 1) >> 1; current_backlight = (uint64_t)(current_backlight) * bl_period; round_result = (uint32_t)(current_backlight & 0xFFFFFFFF); round_result = (round_result >> (bl_int_count-1)) & 1; current_backlight >>= bl_int_count; current_backlight += round_result; return (uint32_t)(current_backlight); } static uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t value; uint32_t current_backlight; /* It must not be 0, so we have to restore them * Bios bug w/a - period resets to zero, * restoring to cache values which is always correct */ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value); if (value == 0 || value == 1) { if (panel_cntl->stored_backlight_registers.BL_PWM_CNTL != 0) { REG_WRITE(BL_PWM_CNTL, panel_cntl->stored_backlight_registers.BL_PWM_CNTL); REG_WRITE(BL_PWM_CNTL2, panel_cntl->stored_backlight_registers.BL_PWM_CNTL2); REG_WRITE(BL_PWM_PERIOD_CNTL, panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL); REG_UPDATE(PWRSEQ_REF_DIV, BL_PWM_REF_DIV, panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); } else { /* TODO: Note: This should not really happen since VBIOS * should have initialized PWM registers on boot. */ REG_WRITE(BL_PWM_CNTL, 0xC000FA00); REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0); } } else { panel_cntl->stored_backlight_registers.BL_PWM_CNTL = REG_READ(BL_PWM_CNTL); panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 = REG_READ(BL_PWM_CNTL2); panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = REG_READ(BL_PWM_PERIOD_CNTL); REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV, &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); } // Enable the backlight output REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1); // Unlock group 2 backlight registers REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); current_backlight = dcn301_get_16_bit_backlight_from_pwm(panel_cntl); return current_backlight; } static void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(*panel_cntl); kfree(dcn301_panel_cntl); *panel_cntl = NULL; } static bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t value; REG_GET(PWRSEQ_CNTL, PANEL_BLON, &value); return value; } static bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t pwr_seq_state, dig_on, dig_on_ovrd; REG_GET(PWRSEQ_STATE, PANEL_PWRSEQ_TARGET_STATE_R, &pwr_seq_state); REG_GET_2(PWRSEQ_CNTL, PANEL_DIGON, &dig_on, PANEL_DIGON_OVRD, &dig_on_ovrd); return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1); } static void dcn301_store_backlight_level(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); panel_cntl->stored_backlight_registers.BL_PWM_CNTL = REG_READ(BL_PWM_CNTL); panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 = REG_READ(BL_PWM_CNTL2); panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = REG_READ(BL_PWM_PERIOD_CNTL); REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV, &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); } static const struct panel_cntl_funcs dcn301_link_panel_cntl_funcs = { .destroy = dcn301_panel_cntl_destroy, .hw_init = dcn301_panel_cntl_hw_init, .is_panel_backlight_on = dcn301_is_panel_backlight_on, .is_panel_powered_on = dcn301_is_panel_powered_on, .store_backlight_level = dcn301_store_backlight_level, .get_current_backlight = dcn301_get_16_bit_backlight_from_pwm, }; void dcn301_panel_cntl_construct( struct dcn301_panel_cntl *dcn301_panel_cntl, const struct panel_cntl_init_data *init_data, const struct dce_panel_cntl_registers *regs, const struct dcn301_panel_cntl_shift *shift, const struct dcn301_panel_cntl_mask *mask) { dcn301_panel_cntl->regs = regs; dcn301_panel_cntl->shift = shift; dcn301_panel_cntl->mask = mask; dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs; dcn301_panel_cntl->base.ctx = init_data->ctx; dcn301_panel_cntl->base.inst = init_data->inst; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file owns the programming sequence of stream's dpms state associated * with the link and link's enable/disable sequences as result of the stream's * dpms state change. * * TODO - The reason link owns stream's dpms programming sequence is * because dpms programming sequence is highly dependent on underlying signal * specific link protocols. This unfortunately causes link to own a portion of * stream state programming sequence. This creates a gray area where the * boundary between link and stream is not clearly defined. */ #include "link_dpms.h" #include "link_hwss.h" #include "link_validation.h" #include "accessories/link_fpga.h" #include "accessories/link_dp_trace.h" #include "protocols/link_dpcd.h" #include "protocols/link_ddc.h" #include "protocols/link_hpd.h" #include "protocols/link_dp_phy.h" #include "protocols/link_dp_capability.h" #include "protocols/link_dp_training.h" #include "protocols/link_edp_panel_control.h" #include "protocols/link_dp_dpia_bw.h" #include "dm_helpers.h" #include "link_enc_cfg.h" #include "resource.h" #include "dsc.h" #include "dccg.h" #include "clk_mgr.h" #include "atomfirmware.h" #define DC_LOGGER_INIT(logger) #define LINK_INFO(...) \ DC_LOG_HW_HOTPLUG( \ __VA_ARGS__) #define RETIMER_REDRIVER_INFO(...) \ DC_LOG_RETIMER_REDRIVER( \ __VA_ARGS__) #include "dc/dcn30/dcn30_vpg.h" #define MAX_MTP_SLOT_COUNT 64 #define LINK_TRAINING_ATTEMPTS 4 #define PEAK_FACTOR_X1000 1006 void link_blank_all_dp_displays(struct dc *dc) { unsigned int i; uint8_t dpcd_power_state = '\0'; enum dc_status status = DC_ERROR_UNEXPECTED; for (i = 0; i < dc->link_count; i++) { if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) continue; /* DP 2.0 spec requires that we read LTTPR caps first */ dp_retrieve_lttpr_cap(dc->links[i]); /* if any of the displays are lit up turn them off */ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, &dpcd_power_state, sizeof(dpcd_power_state)); if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) link_blank_dp_stream(dc->links[i], true); } } void link_blank_all_edp_displays(struct dc *dc) { unsigned int i; uint8_t dpcd_power_state = '\0'; enum dc_status status = DC_ERROR_UNEXPECTED; for (i = 0; i < dc->link_count; i++) { if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) || (!dc->links[i]->edp_sink_present)) continue; /* if any of the displays are lit up turn them off */ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, &dpcd_power_state, sizeof(dpcd_power_state)); if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) link_blank_dp_stream(dc->links[i], true); } } void link_blank_dp_stream(struct dc_link *link, bool hw_init) { unsigned int j; struct dc *dc = link->ctx->dc; enum signal_type signal = link->connector_signal; if ((signal == SIGNAL_TYPE_EDP) || (signal == SIGNAL_TYPE_DISPLAY_PORT)) { if (link->ep_type == DISPLAY_ENDPOINT_PHY && link->link_enc->funcs->get_dig_frontend && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (fe != ENGINE_ID_UNKNOWN) for (j = 0; j < dc->res_pool->stream_enc_count; j++) { if (fe == dc->res_pool->stream_enc[j]->id) { dc->res_pool->stream_enc[j]->funcs->dp_blank(link, dc->res_pool->stream_enc[j]); break; } } } if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) dpcd_write_rx_power_ctrl(link, false); } } void link_set_all_streams_dpms_off_for_link(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; struct dc_state *state = link->dc->current_state; uint8_t count; int i; struct dc_stream_update stream_update; bool dpms_off = true; struct link_resource link_res = {0}; memset(&stream_update, 0, sizeof(stream_update)); stream_update.dpms_off = &dpms_off; link_get_master_pipes_with_dpms_on(link, state, &count, pipes); for (i = 0; i < count; i++) { stream_update.stream = pipes[i]->stream; dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, pipes[i]->stream, &stream_update, state); } /* link can be also enabled by vbios. In this case it is not recorded * in pipe_ctx. Disable link phy here to make sure it is completely off */ dp_disable_link_phy(link, &link_res, link->connector_signal); } void link_resume(struct dc_link *link) { if (link->connector_signal != SIGNAL_TYPE_VIRTUAL) program_hpd_filter(link); } /* This function returns true if the pipe is used to feed video signal directly * to the link. */ static bool is_master_pipe_for_link(const struct dc_link *link, const struct pipe_ctx *pipe) { return resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->link == link; } /* * This function finds all master pipes feeding to a given link with dpms set to * on in given dc state. */ void link_get_master_pipes_with_dpms_on(const struct dc_link *link, struct dc_state *state, uint8_t *count, struct pipe_ctx *pipes[MAX_PIPES]) { int i; struct pipe_ctx *pipe = NULL; *count = 0; for (i = 0; i < MAX_PIPES; i++) { pipe = &state->res_ctx.pipe_ctx[i]; if (is_master_pipe_for_link(link, pipe) && pipe->stream->dpms_off == false) { pipes[(*count)++] = pipe; } } } static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, enum engine_id eng_id, struct ext_hdmi_settings *settings) { bool result = false; int i = 0; struct integrated_info *integrated_info = pipe_ctx->stream->ctx->dc_bios->integrated_info; if (integrated_info == NULL) return false; /* * Get retimer settings from sbios for passing SI eye test for DCE11 * The setting values are varied based on board revision and port id * Therefore the setting values of each ports is passed by sbios. */ // Check if current bios contains ext Hdmi settings if (integrated_info->gpu_cap_info & 0x20) { switch (eng_id) { case ENGINE_ID_DIGA: settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr; settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num; settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num; memmove(settings->reg_settings, integrated_info->dp0_ext_hdmi_reg_settings, sizeof(integrated_info->dp0_ext_hdmi_reg_settings)); memmove(settings->reg_settings_6g, integrated_info->dp0_ext_hdmi_6g_reg_settings, sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings)); result = true; break; case ENGINE_ID_DIGB: settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr; settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num; settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num; memmove(settings->reg_settings, integrated_info->dp1_ext_hdmi_reg_settings, sizeof(integrated_info->dp1_ext_hdmi_reg_settings)); memmove(settings->reg_settings_6g, integrated_info->dp1_ext_hdmi_6g_reg_settings, sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings)); result = true; break; case ENGINE_ID_DIGC: settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr; settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num; settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num; memmove(settings->reg_settings, integrated_info->dp2_ext_hdmi_reg_settings, sizeof(integrated_info->dp2_ext_hdmi_reg_settings)); memmove(settings->reg_settings_6g, integrated_info->dp2_ext_hdmi_6g_reg_settings, sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings)); result = true; break; case ENGINE_ID_DIGD: settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr; settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num; settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num; memmove(settings->reg_settings, integrated_info->dp3_ext_hdmi_reg_settings, sizeof(integrated_info->dp3_ext_hdmi_reg_settings)); memmove(settings->reg_settings_6g, integrated_info->dp3_ext_hdmi_6g_reg_settings, sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings)); result = true; break; default: break; } if (result == true) { // Validate settings from bios integrated info table if (settings->slv_addr == 0) return false; if (settings->reg_num > 9) return false; if (settings->reg_num_6g > 3) return false; for (i = 0; i < settings->reg_num; i++) { if (settings->reg_settings[i].i2c_reg_index > 0x20) return false; } for (i = 0; i < settings->reg_num_6g; i++) { if (settings->reg_settings_6g[i].i2c_reg_index > 0x20) return false; } } } return result; } static bool write_i2c(struct pipe_ctx *pipe_ctx, uint8_t address, uint8_t *buffer, uint32_t length) { struct i2c_command cmd = {0}; struct i2c_payload payload = {0}; memset(&payload, 0, sizeof(payload)); memset(&cmd, 0, sizeof(cmd)); cmd.number_of_payloads = 1; cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz; payload.address = address; payload.data = buffer; payload.length = length; payload.write = true; cmd.payloads = &payload; if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, pipe_ctx->stream->link, &cmd)) return true; return false; } static void write_i2c_retimer_setting( struct pipe_ctx *pipe_ctx, bool is_vga_mode, bool is_over_340mhz, struct ext_hdmi_settings *settings) { uint8_t slave_address = (settings->slv_addr >> 1); uint8_t buffer[2]; const uint8_t apply_rx_tx_change = 0x4; uint8_t offset = 0xA; uint8_t value = 0; int i = 0; bool i2c_success = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); memset(&buffer, 0, sizeof(buffer)); /* Start Ext-Hdmi programming*/ for (i = 0; i < settings->reg_num; i++) { /* Apply 3G settings */ if (settings->reg_settings[i].i2c_reg_index <= 0x20) { buffer[0] = settings->reg_settings[i].i2c_reg_index; buffer[1] = settings->reg_settings[i].i2c_reg_val; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. */ if (settings->reg_settings[i].i2c_reg_index == 0xA || settings->reg_settings[i].i2c_reg_index == 0xB || settings->reg_settings[i].i2c_reg_index == 0xC) { /* Query current value from offset 0xA */ if (settings->reg_settings[i].i2c_reg_index == 0xA) value = settings->reg_settings[i].i2c_reg_val; else { i2c_success = link_query_ddc_data( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) goto i2c_write_fail; } buffer[0] = offset; /* Set APPLY_RX_TX_CHANGE bit to 1 */ buffer[1] = value | apply_rx_tx_change; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; } } } /* Apply 3G settings */ if (is_over_340mhz) { for (i = 0; i < settings->reg_num_6g; i++) { /* Apply 3G settings */ if (settings->reg_settings[i].i2c_reg_index <= 0x20) { buffer[0] = settings->reg_settings_6g[i].i2c_reg_index; buffer[1] = settings->reg_settings_6g[i].i2c_reg_val; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. */ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA || settings->reg_settings_6g[i].i2c_reg_index == 0xB || settings->reg_settings_6g[i].i2c_reg_index == 0xC) { /* Query current value from offset 0xA */ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA) value = settings->reg_settings_6g[i].i2c_reg_val; else { i2c_success = link_query_ddc_data( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) goto i2c_write_fail; } buffer[0] = offset; /* Set APPLY_RX_TX_CHANGE bit to 1 */ buffer[1] = value | apply_rx_tx_change; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; } } } } if (is_vga_mode) { /* Program additional settings if using 640x480 resolution */ /* Write offset 0xFF to 0x01 */ buffer[0] = 0xff; buffer[1] = 0x01; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; buffer[1] = 0x23; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; buffer[1] = 0x00; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; } return; i2c_write_fail: DC_LOG_DEBUG("Set retimer failed"); } static void write_i2c_default_retimer_setting( struct pipe_ctx *pipe_ctx, bool is_vga_mode, bool is_over_340mhz) { uint8_t slave_address = (0xBA >> 1); uint8_t buffer[2]; bool i2c_success = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); memset(&buffer, 0, sizeof(buffer)); /* Program Slave Address for tuning single integrity */ /* Write offset 0x0A to 0x13 */ buffer[0] = 0x0A; buffer[1] = 0x13; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; buffer[1] = 0x17; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0x0B to 0xDA or 0xD8 */ buffer[0] = 0x0B; buffer[1] = is_over_340mhz ? 0xDA : 0xD8; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; buffer[1] = 0x17; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0x0C to 0x1D or 0x91 */ buffer[0] = 0x0C; buffer[1] = is_over_340mhz ? 0x1D : 0x91; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; buffer[1] = 0x17; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; if (is_vga_mode) { /* Program additional settings if using 640x480 resolution */ /* Write offset 0xFF to 0x01 */ buffer[0] = 0xff; buffer[1] = 0x01; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; buffer[1] = 0x23; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; buffer[1] = 0x00; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\ offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) goto i2c_write_fail; } return; i2c_write_fail: DC_LOG_DEBUG("Set default retimer failed"); } static void write_i2c_redriver_setting( struct pipe_ctx *pipe_ctx, bool is_over_340mhz) { uint8_t slave_address = (0xF0 >> 1); uint8_t buffer[16]; bool i2c_success = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); memset(&buffer, 0, sizeof(buffer)); // Program Slave Address for tuning single integrity buffer[3] = 0x4E; buffer[4] = 0x4E; buffer[5] = 0x4E; buffer[6] = is_over_340mhz ? 0x4E : 0x4A; i2c_success = write_i2c(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\ \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\ offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\ i2c_success = %d\n", slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); if (!i2c_success) DC_LOG_DEBUG("Set redriver failed"); } static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) { struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; struct link_encoder *link_enc = NULL; struct cp_psp_stream_config config = {0}; enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link); if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) return; link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); ASSERT(link_enc); if (link_enc == NULL) return; /* otg instance */ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; /* dig front end */ config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; /* stream encoder index */ config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; if (dp_is_128b_132b_signal(pipe_ctx)) config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; /* dig back end */ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; /* link encoder index */ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; if (dp_is_128b_132b_signal(pipe_ctx)) config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; /* dio output index is dpia index for DPIA endpoint & dcio index by default */ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; else config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; /* phy index */ config.phy_idx = resource_transmitter_to_phy_idx( pipe_ctx->stream->link->dc, link_enc->transmitter); if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ config.phy_idx = 0; /* stream properties */ config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; config.mst_enabled = (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; config.dp2_enabled = dp_is_128b_132b_signal(pipe_ctx) ? 1 : 0; config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? 1 : 0; config.dpms_off = dpms_off; /* dm stream context */ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; cp_psp->funcs.update_stream_config(cp_psp->handle, &config); } static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable) { struct dc *dc = pipe_ctx->stream->ctx->dc; if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) return; dc->hwss.set_avmute(pipe_ctx, enable); } static void enable_mst_on_sink(struct dc_link *link, bool enable) { unsigned char mstmCntl; core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); if (enable) mstmCntl |= DP_MST_EN; else mstmCntl &= (~DP_MST_EN); core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); } static void dsc_optc_config_log(struct display_stream_compressor *dsc, struct dsc_optc_config *config) { uint32_t precision = 1 << 28; uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision; uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision; uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod; DC_LOGGER_INIT(dsc->ctx->logger); /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal */ ll_bytes_per_pix_fraq *= 10000000; ll_bytes_per_pix_fraq /= precision; DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)", config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq); DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444); DC_LOG_DSC("\tslice_width %d", config->slice_width); } static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) { struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; bool result = false; if (dc_is_virtual_signal(stream->signal)) result = true; else result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); return result; } /* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first, * i.e. after dp_enable_dsc_on_rx() had been called */ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; DC_LOGGER_INIT(dsc->ctx->logger); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; if (enable) { struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; dsc_cfg.pic_width *= opp_cnt; optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ if (dc_is_dp_signal(stream->signal) && !dp_is_128b_132b_signal(pipe_ctx)) { DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, optc_dsc_mode, dsc_optc_cfg.bytes_per_pixel, dsc_optc_cfg.slice_width); /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */ } /* Enable DSC in OPTC */ DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, optc_dsc_mode, dsc_optc_cfg.bytes_per_pixel, dsc_optc_cfg.slice_width); } else { /* disable DSC in OPTC */ pipe_ctx->stream_res.tg->funcs->set_dsc_config( pipe_ctx->stream_res.tg, OPTC_DSC_DISABLED, 0, 0); /* disable DSC in stream encoder */ if (dc_is_dp_signal(stream->signal)) { if (dp_is_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, false, NULL, true); else { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, false, NULL, true); } } /* disable DSC block */ pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); } } /* * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; * hence PPS info packet update need to use frame update instead of immediate update. * Added parameter immediate_update for this purpose. * The decision to use frame update is hard-coded in function dp_update_dsc_config(), * which is the only place where a "false" would be passed in for param immediate_update. * * immediate_update is only applicable when DSC is enabled. */ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; struct dc_stream_state *stream = pipe_ctx->stream; DC_LOGGER_INIT(dsc->ctx->logger); if (!pipe_ctx->stream->timing.flags.DSC || !dsc) return false; if (enable) { struct dsc_config dsc_cfg; uint8_t dsc_packed_pps[128]; memset(&dsc_cfg, 0, sizeof(dsc_cfg)); memset(dsc_packed_pps, 0, 128); /* Enable DSC hw block */ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); if (dc_is_dp_signal(stream->signal)) { DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); if (dp_is_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, true, &dsc_packed_pps[0], immediate_update); else pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, true, &dsc_packed_pps[0], immediate_update); } } else { /* disable DSC PPS in stream encoder */ memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps)); if (dc_is_dp_signal(stream->signal)) { if (dp_is_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, false, NULL, true); else pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, false, NULL, true); } } return true; } bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; bool result = false; if (!pipe_ctx->stream->timing.flags.DSC) goto out; if (!dsc) goto out; if (enable) { { link_set_dsc_on_stream(pipe_ctx, true); result = true; } } else { dp_set_dsc_on_rx(pipe_ctx, false); link_set_dsc_on_stream(pipe_ctx, false); result = true; } out: return result; } bool link_update_dsc_config(struct pipe_ctx *pipe_ctx) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; if (!pipe_ctx->stream->timing.flags.DSC) return false; if (!dsc) return false; link_set_dsc_on_stream(pipe_ctx, true); link_set_dsc_pps_packet(pipe_ctx, true, false); return true; } static void enable_stream_features(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) { struct dc_link *link = stream->link; union down_spread_ctrl old_downspread; union down_spread_ctrl new_downspread; memset(&old_downspread, 0, sizeof(old_downspread)); core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, &old_downspread.raw, sizeof(old_downspread)); new_downspread.raw = old_downspread.raw; new_downspread.bits.IGNORE_MSA_TIMING_PARAM = (stream->ignore_msa_timing_param) ? 1 : 0; if (new_downspread.raw != old_downspread.raw) { core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &new_downspread.raw, sizeof(new_downspread)); } } else { dm_helpers_mst_enable_stream_features(stream); } } static void log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) { const uint32_t VCP_Y_PRECISION = 1000; uint64_t vcp_x, vcp_y; DC_LOGGER_INIT(link->ctx->logger); // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision avg_time_slots_per_mtp = dc_fixpt_add( avg_time_slots_per_mtp, dc_fixpt_from_fraction( 1, 2*VCP_Y_PRECISION)); vcp_x = dc_fixpt_floor( avg_time_slots_per_mtp); vcp_y = dc_fixpt_floor( dc_fixpt_mul_int( dc_fixpt_sub_int( avg_time_slots_per_mtp, dc_fixpt_floor( avg_time_slots_per_mtp)), VCP_Y_PRECISION)); if (link->type == dc_connection_mst_branch) DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream " "X: %llu " "Y: %llu/%d", vcp_x, vcp_y, VCP_Y_PRECISION); else DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream " "X: %llu " "Y: %llu/%d", vcp_x, vcp_y, VCP_Y_PRECISION); } static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) { struct fixed31_32 mbytes_per_sec; uint32_t link_rate_in_mbytes_per_sec = dp_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings); link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */ mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec); return dc_fixpt_div_int(mbytes_per_sec, 54); } static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) { struct fixed31_32 peak_kbps; uint32_t numerator = 0; uint32_t denominator = 1; /* * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on * common multiplier to render an integer PBN for all link rate/lane * counts combinations * calculate * peak_kbps *= (1006/1000) * peak_kbps *= (64/54) * peak_kbps *= 8 convert to bytes */ numerator = 64 * PEAK_FACTOR_X1000; denominator = 54 * 8 * 1000 * 1000; kbps *= numerator; peak_kbps = dc_fixpt_from_fraction(kbps, denominator); return peak_kbps; } static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) { uint64_t kbps; enum dc_link_encoding_format link_encoding; if (dp_is_128b_132b_signal(pipe_ctx)) link_encoding = DC_LINK_ENCODING_DP_128b_132b; else link_encoding = DC_LINK_ENCODING_DP_8b_10b; kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding); return get_pbn_from_bw_in_kbps(kbps); } // TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) static void get_lane_status( struct dc_link *link, uint32_t lane_count, union lane_status *status, union lane_align_status_updated *status_updated) { unsigned int lane; uint8_t dpcd_buf[3] = {0}; if (status == NULL || status_updated == NULL) { return; } core_link_read_dpcd( link, DP_LANE0_1_STATUS, dpcd_buf, sizeof(dpcd_buf)); for (lane = 0; lane < lane_count; lane++) { status[lane].raw = dp_get_nibble_at_index(&dpcd_buf[0], lane); } status_updated->raw = dpcd_buf[2]; } static bool poll_for_allocation_change_trigger(struct dc_link *link) { /* * wait for ACT handled */ int i; const int act_retries = 30; enum act_return_status result = ACT_FAILED; union payload_table_update_status update_status = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated lane_status_updated; DC_LOGGER_INIT(link->ctx->logger); if (link->aux_access_disabled) return true; for (i = 0; i < act_retries; i++) { get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated); if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) || !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) || !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) || !dp_is_interlane_aligned(lane_status_updated)) { DC_LOG_ERROR("SST Update Payload: Link loss occurred while " "polling for ACT handled."); result = ACT_LINK_LOST; break; } core_link_read_dpcd( link, DP_PAYLOAD_TABLE_UPDATE_STATUS, &update_status.raw, 1); if (update_status.bits.ACT_HANDLED == 1) { DC_LOG_DP2("SST Update Payload: ACT handled by downstream."); result = ACT_SUCCESS; break; } fsleep(5000); } if (result == ACT_FAILED) { DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, " "continue on. Something is wrong with the branch."); } return (result == ACT_SUCCESS); } static void update_mst_stream_alloc_table( struct dc_link *link, struct stream_encoder *stream_enc, struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? const struct dc_dp_mst_stream_allocation_table *proposed_table) { struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; struct link_mst_stream_allocation *dc_alloc; int i; int j; /* if DRM proposed_table has more than one new payload */ ASSERT(proposed_table->stream_count - link->mst_stream_alloc_table.stream_count < 2); /* copy proposed_table to link, add stream encoder */ for (i = 0; i < proposed_table->stream_count; i++) { for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) { dc_alloc = &link->mst_stream_alloc_table.stream_allocations[j]; if (dc_alloc->vcp_id == proposed_table->stream_allocations[i].vcp_id) { work_table[i] = *dc_alloc; work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; break; /* exit j loop */ } } /* new vcp_id */ if (j == link->mst_stream_alloc_table.stream_count) { work_table[i].vcp_id = proposed_table->stream_allocations[i].vcp_id; work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; work_table[i].stream_enc = stream_enc; work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; } } /* update link->mst_stream_alloc_table with work_table */ link->mst_stream_alloc_table.stream_count = proposed_table->stream_count; for (i = 0; i < MAX_CONTROLLER_NUM; i++) link->mst_stream_alloc_table.stream_allocations[i] = work_table[i]; } static void remove_stream_from_alloc_table( struct dc_link *link, struct stream_encoder *dio_stream_enc, struct hpo_dp_stream_encoder *hpo_dp_stream_enc) { int i = 0; struct link_mst_stream_allocation_table *table = &link->mst_stream_alloc_table; if (hpo_dp_stream_enc) { for (; i < table->stream_count; i++) if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc) break; } else { for (; i < table->stream_count; i++) if (dio_stream_enc == table->stream_allocations[i].stream_enc) break; } if (i < table->stream_count) { i++; for (; i < table->stream_count; i++) table->stream_allocations[i-1] = table->stream_allocations[i]; memset(&table->stream_allocations[table->stream_count-1], 0, sizeof(struct link_mst_stream_allocation)); table->stream_count--; } } static enum dc_status deallocate_mst_payload_with_temp_drm_wa( struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dc_dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); int i; bool mst_mode = (link->type == dc_connection_mst_branch); /* adjust for drm changes*/ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); const struct dc_link_settings empty_link_settings = {0}; DC_LOGGER_INIT(link->ctx->logger); if (link_hwss->ext.set_throttled_vcp_size) link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); if (link_hwss->ext.set_hblank_min_symbol_width) link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, &empty_link_settings, avg_time_slots_per_mtp); if (dm_helpers_dp_mst_write_payload_allocation_table( stream->ctx, stream, &proposed_table, false)) update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); else DC_LOG_WARNING("Failed to update" "MST allocation table for" "pipe idx:%d\n", pipe_ctx->pipe_idx); DC_LOG_MST("%s" "stream_count: %d: ", __func__, link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); } if (link_hwss->ext.update_stream_allocation_table == NULL || link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_DEBUG("Unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &link->mst_stream_alloc_table); if (mst_mode) { dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); } dm_helpers_dp_mst_send_payload_allocation( stream->ctx, stream, false); return DC_OK; } static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dc_dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); int i; bool mst_mode = (link->type == dc_connection_mst_branch); const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); const struct dc_link_settings empty_link_settings = {0}; DC_LOGGER_INIT(link->ctx->logger); if (link->dc->debug.temp_mst_deallocation_sequence) return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx); /* deallocate_mst_payload is called before disable link. When mode or * disable/enable monitor, new stream is created which is not in link * stream[] yet. For this, payload is not allocated yet, so de-alloc * should not done. For new mode set, map_resources will get engine * for new stream, so stream_enc->id should be validated until here. */ /* slot X.Y */ if (link_hwss->ext.set_throttled_vcp_size) link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); if (link_hwss->ext.set_hblank_min_symbol_width) link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, &empty_link_settings, avg_time_slots_per_mtp); if (mst_mode) { /* when link is in mst mode, reply on mst manager to remove * payload */ if (dm_helpers_dp_mst_write_payload_allocation_table( stream->ctx, stream, &proposed_table, false)) update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); else DC_LOG_WARNING("Failed to update" "MST allocation table for" "pipe idx:%d\n", pipe_ctx->pipe_idx); } else { /* when link is no longer in mst mode (mst hub unplugged), * remove payload with default dc logic */ remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc); } DC_LOG_MST("%s" "stream_count: %d: ", __func__, link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); } /* update mst stream allocation table hardware state */ if (link_hwss->ext.update_stream_allocation_table == NULL || link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_DEBUG("Unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &link->mst_stream_alloc_table); if (mst_mode) { dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); dm_helpers_dp_mst_send_payload_allocation( stream->ctx, stream, false); } return DC_OK; } /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm */ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dc_dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; int i; enum act_return_status ret; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* enable_link_dp_mst already check link->enabled_stream_count * and stream is in link->stream[]. This is called during set mode, * stream_enc is available. */ /* get calculate VC payload for stream: stream_alloc */ if (dm_helpers_dp_mst_write_payload_allocation_table( stream->ctx, stream, &proposed_table, true)) update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); else DC_LOG_WARNING("Failed to update" "MST allocation table for" "pipe idx:%d\n", pipe_ctx->pipe_idx); DC_LOG_MST("%s " "stream_count: %d: \n ", __func__, link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); } ASSERT(proposed_table.stream_count > 0); /* program DP source TX for payload */ if (link_hwss->ext.update_stream_allocation_table == NULL || link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &link->mst_stream_alloc_table); /* send down message */ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); if (ret != ACT_LINK_LOST) { dm_helpers_dp_mst_send_payload_allocation( stream->ctx, stream, true); } /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); if (pbn_per_slot.value == 0) { DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n"); return DC_UNSUPPORTED_VALUE; } pbn = get_pbn_from_timing(pipe_ctx); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); log_vcp_x_y(link, avg_time_slots_per_mtp); if (link_hwss->ext.set_throttled_vcp_size) link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); if (link_hwss->ext.set_hblank_min_symbol_width) link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, &link->cur_link_settings, avg_time_slots_per_mtp); return DC_OK; } struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( const struct dc_stream_state *stream, const struct dc_link *link) { struct fixed31_32 link_bw_effective = dc_fixpt_from_int( dp_link_bandwidth_kbps(link, &link->cur_link_settings)); struct fixed31_32 timeslot_bw_effective = dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); struct fixed31_32 timing_bw = dc_fixpt_from_int( dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link))); struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_div(timing_bw, timeslot_bw_effective); return avg_time_slots_per_mtp; } static bool write_128b_132b_sst_payload_allocation_table( const struct dc_stream_state *stream, struct dc_link *link, struct link_mst_stream_allocation_table *proposed_table, bool allocate) { const uint8_t vc_id = 1; /// VC ID always 1 for SST const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST bool result = false; uint8_t req_slot_count = 0; struct fixed31_32 avg_time_slots_per_mtp = { 0 }; union payload_table_update_status update_status = { 0 }; const uint32_t max_retries = 30; uint32_t retries = 0; DC_LOGGER_INIT(link->ctx->logger); if (allocate) { avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link); req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); /// Validation should filter out modes that exceed link BW ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); if (req_slot_count > MAX_MTP_SLOT_COUNT) return false; } else { /// Leave req_slot_count = 0 if allocate is false. } proposed_table->stream_count = 1; /// Always 1 stream for SST proposed_table->stream_allocations[0].slot_count = req_slot_count; proposed_table->stream_allocations[0].vcp_id = vc_id; if (link->aux_access_disabled) return true; /// Write DPCD 2C0 = 1 to start updating update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1; core_link_write_dpcd( link, DP_PAYLOAD_TABLE_UPDATE_STATUS, &update_status.raw, 1); /// Program the changes in DPCD 1C0 - 1C2 ASSERT(vc_id == 1); core_link_write_dpcd( link, DP_PAYLOAD_ALLOCATE_SET, &vc_id, 1); ASSERT(start_time_slot == 0); core_link_write_dpcd( link, DP_PAYLOAD_ALLOCATE_START_TIME_SLOT, &start_time_slot, 1); core_link_write_dpcd( link, DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT, &req_slot_count, 1); /// Poll till DPCD 2C0 read 1 /// Try for at least 150ms (30 retries, with 5ms delay after each attempt) while (retries < max_retries) { if (core_link_read_dpcd( link, DP_PAYLOAD_TABLE_UPDATE_STATUS, &update_status.raw, 1) == DC_OK) { if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) { DC_LOG_DP2("SST Update Payload: downstream payload table updated."); result = true; break; } } else { union dpcd_rev dpcdRev; if (core_link_read_dpcd( link, DP_DPCD_REV, &dpcdRev.raw, 1) != DC_OK) { DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision " "of sink while polling payload table " "updated status bit."); break; } } retries++; fsleep(5000); } if (!result && retries == max_retries) { DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, " "continue on. Something is wrong with the branch."); // TODO - DP2.0 Payload: Read and log the payload table from downstream branch } return result; } /* * Payload allocation/deallocation for SST introduced in DP2.0 */ static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct link_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; const struct dc_link_settings empty_link_settings = {0}; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* slot X.Y for SST payload deallocate */ if (!allocate) { avg_time_slots_per_mtp = dc_fixpt_from_int(0); log_vcp_x_y(link, avg_time_slots_per_mtp); if (link_hwss->ext.set_throttled_vcp_size) link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); if (link_hwss->ext.set_hblank_min_symbol_width) link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, &empty_link_settings, avg_time_slots_per_mtp); } /* calculate VC payload and update branch with new payload allocation table*/ if (!write_128b_132b_sst_payload_allocation_table( stream, link, &proposed_table, allocate)) { DC_LOG_ERROR("SST Update Payload: Failed to update " "allocation table for " "pipe idx: %d\n", pipe_ctx->pipe_idx); return DC_FAIL_DP_PAYLOAD_ALLOCATION; } proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; ASSERT(proposed_table.stream_count == 1); //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p " "vcp_id: %d " "slot_count: %d\n", (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc, proposed_table.stream_allocations[0].vcp_id, proposed_table.stream_allocations[0].slot_count); /* program DP source TX for payload */ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &proposed_table); /* poll for ACT handled */ if (!poll_for_allocation_change_trigger(link)) { // Failures will result in blackscreen and errors logged BREAK_TO_DEBUGGER(); } /* slot X.Y for SST payload allocate */ if (allocate && link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) { avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link); log_vcp_x_y(link, avg_time_slots_per_mtp); if (link_hwss->ext.set_throttled_vcp_size) link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); if (link_hwss->ext.set_hblank_min_symbol_width) link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, &link->cur_link_settings, avg_time_slots_per_mtp); } /* Always return DC_OK. * If part of sequence fails, log failure(s) and show blackscreen */ return DC_OK; } enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; struct dc_dp_mst_stream_allocation_table proposed_table = {0}; uint8_t i; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* decrease throttled vcp size */ pbn_per_slot = get_pbn_per_slot(stream); pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); if (link_hwss->ext.set_throttled_vcp_size) link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); if (link_hwss->ext.set_hblank_min_symbol_width) link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, &link->cur_link_settings, avg_time_slots_per_mtp); /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ dm_helpers_dp_mst_send_payload_allocation( stream->ctx, stream, true); /* notify immediate branch device table update */ if (dm_helpers_dp_mst_write_payload_allocation_table( stream->ctx, stream, &proposed_table, true)) { /* update mst stream allocation table software state */ update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); } else { DC_LOG_WARNING("Failed to update" "MST allocation table for" "pipe idx:%d\n", pipe_ctx->pipe_idx); } DC_LOG_MST("%s " "stream_count: %d: \n ", __func__, link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); } ASSERT(proposed_table.stream_count > 0); /* update mst stream allocation table hardware state */ if (link_hwss->ext.update_stream_allocation_table == NULL || link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &link->mst_stream_alloc_table); /* poll for immediate branch device ACT handled */ dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); return DC_OK; } enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; struct dc_dp_mst_stream_allocation_table proposed_table = {0}; uint8_t i; enum act_return_status ret; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* notify immediate branch device table update */ if (dm_helpers_dp_mst_write_payload_allocation_table( stream->ctx, stream, &proposed_table, true)) { /* update mst stream allocation table software state */ update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); } DC_LOG_MST("%s " "stream_count: %d: \n ", __func__, link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); } ASSERT(proposed_table.stream_count > 0); /* update mst stream allocation table hardware state */ if (link_hwss->ext.update_stream_allocation_table == NULL || link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &link->mst_stream_alloc_table); /* poll for immediate branch device ACT handled */ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); if (ret != ACT_LINK_LOST) { /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ dm_helpers_dp_mst_send_payload_allocation( stream->ctx, stream, true); } /* increase throttled vcp size */ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); pbn_per_slot = get_pbn_per_slot(stream); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); if (link_hwss->ext.set_throttled_vcp_size) link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); if (link_hwss->ext.set_hblank_min_symbol_width) link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, &link->cur_link_settings, avg_time_slots_per_mtp); return DC_OK; } static void disable_link_dp(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { struct dc_link_settings link_settings = link->cur_link_settings; if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST && link->mst_stream_alloc_table.stream_count > 0) /* disable MST link only when last vc payload is deallocated */ return; dp_disable_link_phy(link, link_res, signal); if (link->connector_signal == SIGNAL_TYPE_EDP) { if (!link->dc->config.edp_no_power_sequencing) link->dc->hwss.edp_power_control(link, false); } if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) /* set the sink to SST mode after disabling the link */ enable_mst_on_sink(link, false); if (link_dp_get_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { dp_set_fec_enable(link, false); dp_set_fec_ready(link, link_res, false); } } static void disable_link(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { if (dc_is_dp_signal(signal)) { disable_link_dp(link, link_res, signal); } else if (signal != SIGNAL_TYPE_VIRTUAL) { link->dc->hwss.disable_link_output(link, link_res, signal); } if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { /* MST disable link only when no stream use the link */ if (link->mst_stream_alloc_table.stream_count <= 0) link->link_status.link_active = false; } else { link->link_status.link_active = false; } } static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; enum dc_color_depth display_color_depth; enum engine_id eng_id; struct ext_hdmi_settings settings = {0}; bool is_over_340mhz = false; bool is_vga_mode = (stream->timing.h_addressable == 640) && (stream->timing.v_addressable == 480); struct dc *dc = pipe_ctx->stream->ctx->dc; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); if (stream->phy_pix_clk == 0) stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; if (stream->phy_pix_clk > 340000) is_over_340mhz = true; if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps & EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { /* DP159, Retimer settings */ eng_id = pipe_ctx->stream_res.stream_enc->id; if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) { write_i2c_retimer_setting(pipe_ctx, is_vga_mode, is_over_340mhz, &settings); } else { write_i2c_default_retimer_setting(pipe_ctx, is_vga_mode, is_over_340mhz); } } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { /* PI3EQX1204, Redriver settings */ write_i2c_redriver_setting(pipe_ctx, is_over_340mhz); } } if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) write_scdc_data( stream->link->ddc, stream->phy_pix_clk, stream->timing.flags.LTE_340MCSC_SCRAMBLE); memset(&stream->link->cur_link_settings, 0, sizeof(struct dc_link_settings)); display_color_depth = stream->timing.display_color_depth; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) display_color_depth = COLOR_DEPTH_888; /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS * character clock in case that beyond 340MHz. */ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) link_hwss->setup_stream_encoder(pipe_ctx); dc->hwss.enable_tmds_link_output( link, &pipe_ctx->link_res, pipe_ctx->stream->signal, pipe_ctx->clock_source->id, display_color_depth, stream->phy_pix_clk); if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) read_scdc_data(link->ddc); } static enum dc_status enable_link_dp(struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; bool skip_video_pattern; struct dc_link *link = stream->link; const struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings; bool fec_enable; int i; bool apply_seamless_boot_optimization = false; uint32_t bl_oled_enable_delay = 50; // in ms uint32_t post_oui_delay = 30; // 30ms /* Reduce link bandwidth between failed link training attempts. */ bool do_fallback = false; int lt_attempts = LINK_TRAINING_ATTEMPTS; // Increase retry count if attempting DP1.x on FIXED_VS link if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) lt_attempts = 10; // check for seamless boot for (i = 0; i < state->stream_count; i++) { if (state->streams[i]->apply_seamless_boot_optimization) { apply_seamless_boot_optimization = true; break; } } /* * If the link is DP-over-USB4 do the following: * - Train with fallback when enabling DPIA link. Conventional links are * trained with fallback during sink detection. * - Allocate only what the stream needs for bw in Gbps. Inform the CM * in case stream needs more or less bw from what has been allocated * earlier at plug time. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { do_fallback = true; } /* * Temporary w/a to get DP2.0 link rates to work with SST. * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. */ if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING && pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && link->dc->debug.set_mst_en_for_sst) { enable_mst_on_sink(link, true); } if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ if (!link->dc->config.edp_no_power_sequencing) link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); } if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ } else { pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); } // during mode switch we do DP_SET_POWER off then on, and OUI is lost dpcd_set_source_specific_data(link); if (link->dpcd_sink_ext_caps.raw != 0) { post_oui_delay += link->panel_config.pps.extra_post_OUI_ms; msleep(post_oui_delay); } // similarly, mode switch can cause loss of cable ID dpcd_write_cable_id_to_dprx(link); skip_video_pattern = true; if (link_settings->link_rate == LINK_RATE_LOW) skip_video_pattern = false; if (perform_link_training_with_retries(link_settings, skip_video_pattern, lt_attempts, pipe_ctx, pipe_ctx->stream->signal, do_fallback)) { status = DC_OK; } else { status = DC_FAIL_DP_LINK_TRAINING; } if (link->preferred_training_settings.fec_enable) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) dp_set_fec_enable(link, fec_enable); // during mode set we do DP_SET_POWER off then on, aux writes are lost if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { set_cached_brightness_aux(link); if (link->dpcd_sink_ext_caps.bits.oled == 1) msleep(bl_oled_enable_delay); edp_backlight_enable_aux(link, true); } return status; } static enum dc_status enable_link_edp( struct dc_state *state, struct pipe_ctx *pipe_ctx) { return enable_link_dp(state, pipe_ctx); } static void enable_link_lvds(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dc *dc = stream->ctx->dc; if (stream->phy_pix_clk == 0) stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; memset(&stream->link->cur_link_settings, 0, sizeof(struct dc_link_settings)); dc->hwss.enable_lvds_link_output( link, &pipe_ctx->link_res, pipe_ctx->clock_source->id, stream->phy_pix_clk); } static enum dc_status enable_link_dp_mst( struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dc_link *link = pipe_ctx->stream->link; unsigned char mstm_cntl; /* sink signal type after MST branch is MST. Multiple MST sinks * share one link. Link DP PHY is enable or training only once. */ if (link->link_status.link_active) return DC_OK; /* clear payload table */ core_link_read_dpcd(link, DP_MSTM_CTRL, &mstm_cntl, 1); if (mstm_cntl & DP_MST_EN) dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link); /* to make sure the pending down rep can be processed * before enabling the link */ dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link); /* set the sink to MST mode before enabling the link */ enable_mst_on_sink(link, true); return enable_link_dp(state, pipe_ctx); } static enum dc_status enable_link( struct dc_state *state, struct pipe_ctx *pipe_ctx) { enum dc_status status = DC_ERROR_UNEXPECTED; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; /* There's some scenarios where driver is unloaded with display * still enabled. When driver is reloaded, it may cause a display * to not light up if there is a mismatch between old and new * link settings. Need to call disable first before enabling at * new link settings. */ if (link->link_status.link_active && !stream->skip_edp_power_down) disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); switch (pipe_ctx->stream->signal) { case SIGNAL_TYPE_DISPLAY_PORT: status = enable_link_dp(state, pipe_ctx); break; case SIGNAL_TYPE_EDP: status = enable_link_edp(state, pipe_ctx); break; case SIGNAL_TYPE_DISPLAY_PORT_MST: status = enable_link_dp_mst(state, pipe_ctx); msleep(200); break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_HDMI_TYPE_A: enable_link_hdmi(pipe_ctx); status = DC_OK; break; case SIGNAL_TYPE_LVDS: enable_link_lvds(pipe_ctx); status = DC_OK; break; case SIGNAL_TYPE_VIRTUAL: status = DC_OK; break; default: break; } if (status == DC_OK) { pipe_ctx->stream->link->link_status.link_active = true; } return status; } void link_set_dpms_off(struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; ASSERT(is_master_pipe_for_link(link, pipe_ctx)); if (dp_is_128b_132b_signal(pipe_ctx)) vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); if (pipe_ctx->stream->sink) { if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, pipe_ctx->stream->sink->edid_caps.display_name, pipe_ctx->stream->signal); } } if (dc_is_virtual_signal(pipe_ctx->stream->signal)) return; if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) set_avmute(pipe_ctx, true); } dc->hwss.disable_audio_stream(pipe_ctx); update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && dp_is_128b_132b_signal(pipe_ctx)) update_sst_payload(pipe_ctx, false); if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { struct ext_hdmi_settings settings = {0}; enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id; unsigned short masked_chip_caps = link->chip_caps & EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; //Need to inform that sink is going to use legacy HDMI mode. write_scdc_data( link->ddc, 165000,//vbios only handles 165Mhz. false); if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { /* DP159, Retimer settings */ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) write_i2c_retimer_setting(pipe_ctx, false, false, &settings); else write_i2c_default_retimer_setting(pipe_ctx, false, false); } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { /* PI3EQX1204, Redriver settings */ write_i2c_redriver_setting(pipe_ctx, false); } } if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && !dp_is_128b_132b_signal(pipe_ctx)) { /* In DP1.x SST mode, our encoder will go to TPS1 * when link is on but stream is off. * Disabling link before stream will avoid exposing TPS1 pattern * during the disable sequence as it will confuse some receivers * state machine. * In DP2 or MST mode, our encoder will stay video active */ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); dc->hwss.disable_stream(pipe_ctx); } else { dc->hwss.disable_stream(pipe_ctx); if (!pipe_ctx->stream->skip_edp_power_down) { disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); } } if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) link_set_dsc_enable(pipe_ctx, false); } if (dp_is_128b_132b_signal(pipe_ctx)) { if (pipe_ctx->stream_res.tg->funcs->set_out_mux) pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); } if (vpg && vpg->funcs->vpg_powerdown) vpg->funcs->vpg_powerdown(vpg); } void link_set_dpms_on( struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; enum dc_status status; struct link_encoder *link_enc; enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); bool apply_edp_fast_boot_optimization = pipe_ctx->stream->apply_edp_fast_boot_optimization; ASSERT(is_master_pipe_for_link(link, pipe_ctx)); if (dp_is_128b_132b_signal(pipe_ctx)) vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); if (pipe_ctx->stream->sink) { if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, pipe_ctx->stream->sink->edid_caps.display_name, pipe_ctx->stream->signal); } } if (dc_is_virtual_signal(pipe_ctx->stream->signal)) return; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && !dp_is_128b_132b_signal(pipe_ctx)) { if (link_enc) link_enc->funcs->setup( link_enc, pipe_ctx->stream->signal); } pipe_ctx->stream->link->link_state_valid = true; if (pipe_ctx->stream_res.tg->funcs->set_out_mux) { if (dp_is_128b_132b_signal(pipe_ctx)) otg_out_dest = OUT_MUX_HPO_DP; else otg_out_dest = OUT_MUX_DIO; pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); } link_hwss->setup_stream_attribute(pipe_ctx); pipe_ctx->stream->apply_edp_fast_boot_optimization = false; // Enable VPG before building infoframe if (vpg && vpg->funcs->vpg_poweron) vpg->funcs->vpg_poweron(vpg); resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { pipe_ctx->stream->dpms_off = false; /* Still enable stream features & audio on seamless boot for DP external displays */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { enable_stream_features(pipe_ctx); dc->hwss.enable_audio_stream(pipe_ctx); } update_psp_stream_config(pipe_ctx, false); return; } /* eDP lit up by bios already, no need to enable again. */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && apply_edp_fast_boot_optimization && !pipe_ctx->stream->timing.flags.DSC && !pipe_ctx->next_odm_pipe) { pipe_ctx->stream->dpms_off = false; update_psp_stream_config(pipe_ctx, false); return; } if (pipe_ctx->stream->dpms_off) return; /* Have to setup DSC before DIG FE and BE are connected (which happens before the * link training). This is to make sure the bandwidth sent to DIG BE won't be * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag * will be automatically set at a later time when the video is enabled * (DP_VID_STREAM_EN = 1). */ if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) link_set_dsc_enable(pipe_ctx, true); } status = enable_link(state, pipe_ctx); if (status != DC_OK) { DC_LOG_WARNING("enabling link %u failed: %d\n", pipe_ctx->stream->link->link_index, status); /* Abort stream enable *unless* the failure was due to * DP link training - some DP monitors will recover and * show the stream anyway. But MST displays can't proceed * without link training. */ if (status != DC_FAIL_DP_LINK_TRAINING || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { if (false == stream->link->link_status.link_active) disable_link(stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); BREAK_TO_DEBUGGER(); return; } } /* turn off otg test pattern if enable */ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, COLOR_DEPTH_UNDEFINED); /* This second call is needed to reconfigure the DIG * as a workaround for the incorrect value being applied * from transmitter control. */ if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || dp_is_128b_132b_signal(pipe_ctx))) { if (link_enc) link_enc->funcs->setup( link_enc, pipe_ctx->stream->signal); } dc->hwss.enable_stream(pipe_ctx); /* Set DPS PPS SDP (AKA "info frames") */ if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { dp_set_dsc_on_rx(pipe_ctx, true); link_set_dsc_pps_packet(pipe_ctx, true, true); } } if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) allocate_mst_payload(pipe_ctx); else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && dp_is_128b_132b_signal(pipe_ctx)) update_sst_payload(pipe_ctx, true); dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); if (stream->sink_patches.delay_ignore_msa > 0) msleep(stream->sink_patches.delay_ignore_msa); if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); update_psp_stream_config(pipe_ctx, false); dc->hwss.enable_audio_stream(pipe_ctx); if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { set_avmute(pipe_ctx, false); } }
linux-master
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_hwss_hpo_frl.h" #include "core_types.h" #include "virtual/virtual_link_hwss.h" static void setup_hpo_frl_stream_attribute(struct pipe_ctx *pipe_ctx) { struct hpo_frl_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_frl_stream_enc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int odm_combine_num_segments = 1; /* get number of ODM combine input segments */ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) odm_combine_num_segments++; stream_enc->funcs->hdmi_frl_set_stream_attribute( stream_enc, &stream->timing, &stream->link->frl_link_settings.borrow_params, odm_combine_num_segments); } static const struct link_hwss hpo_frl_link_hwss = { .setup_stream_encoder = virtual_setup_stream_encoder, .reset_stream_encoder = virtual_reset_stream_encoder, .setup_stream_attribute = setup_hpo_frl_stream_attribute, }; bool can_use_hpo_frl_link_hwss(const struct dc_link *link, const struct link_resource *link_res) { return link_res->hpo_frl_link_enc != NULL; } const struct link_hwss *get_hpo_frl_link_hwss(void) { return &hpo_frl_link_hwss; }
linux-master
drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file owns the creation/destruction of link structure. */ #include "link_factory.h" #include "link_detection.h" #include "link_resource.h" #include "link_validation.h" #include "link_dpms.h" #include "accessories/link_dp_cts.h" #include "accessories/link_dp_trace.h" #include "accessories/link_fpga.h" #include "protocols/link_ddc.h" #include "protocols/link_dp_capability.h" #include "protocols/link_dp_dpia_bw.h" #include "protocols/link_dp_dpia.h" #include "protocols/link_dp_irq_handler.h" #include "protocols/link_dp_phy.h" #include "protocols/link_dp_training.h" #include "protocols/link_edp_panel_control.h" #include "protocols/link_hpd.h" #include "gpio_service_interface.h" #include "atomfirmware.h" #define DC_LOGGER_INIT(logger) #define LINK_INFO(...) \ DC_LOG_HW_HOTPLUG( \ __VA_ARGS__) /* link factory owns the creation/destruction of link structures. */ static void construct_link_service_factory(struct link_service *link_srv) { link_srv->create_link = link_create; link_srv->destroy_link = link_destroy; } /* link_detection manages link detection states and receiver states by using * various link protocols. It also provides helper functions to interpret * certain capabilities or status based on the states it manages or retrieve * them directly from connected receivers. */ static void construct_link_service_detection(struct link_service *link_srv) { link_srv->detect_link = link_detect; link_srv->detect_connection_type = link_detect_connection_type; link_srv->add_remote_sink = link_add_remote_sink; link_srv->remove_remote_sink = link_remove_remote_sink; link_srv->get_hpd_state = link_get_hpd_state; link_srv->get_hpd_gpio = link_get_hpd_gpio; link_srv->enable_hpd = link_enable_hpd; link_srv->disable_hpd = link_disable_hpd; link_srv->enable_hpd_filter = link_enable_hpd_filter; link_srv->reset_cur_dp_mst_topology = link_reset_cur_dp_mst_topology; link_srv->get_status = link_get_status; link_srv->is_hdcp1x_supported = link_is_hdcp14; link_srv->is_hdcp2x_supported = link_is_hdcp22; link_srv->clear_dprx_states = link_clear_dprx_states; } /* link resource implements accessors to link resource. */ static void construct_link_service_resource(struct link_service *link_srv) { link_srv->get_cur_res_map = link_get_cur_res_map; link_srv->restore_res_map = link_restore_res_map; link_srv->get_cur_link_res = link_get_cur_link_res; } /* link validation owns timing validation against various link limitations. (ex. * link bandwidth, receiver capability or our hardware capability) It also * provides helper functions exposing bandwidth formulas used in validation. */ static void construct_link_service_validation(struct link_service *link_srv) { link_srv->validate_mode_timing = link_validate_mode_timing; link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps; link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth; } /* link dpms owns the programming sequence of stream's dpms state associated * with the link and link's enable/disable sequences as result of the stream's * dpms state change. */ static void construct_link_service_dpms(struct link_service *link_srv) { link_srv->set_dpms_on = link_set_dpms_on; link_srv->set_dpms_off = link_set_dpms_off; link_srv->resume = link_resume; link_srv->blank_all_dp_displays = link_blank_all_dp_displays; link_srv->blank_all_edp_displays = link_blank_all_edp_displays; link_srv->blank_dp_stream = link_blank_dp_stream; link_srv->increase_mst_payload = link_increase_mst_payload; link_srv->reduce_mst_payload = link_reduce_mst_payload; link_srv->set_dsc_on_stream = link_set_dsc_on_stream; link_srv->set_dsc_enable = link_set_dsc_enable; link_srv->update_dsc_config = link_update_dsc_config; } /* link ddc implements generic display communication protocols such as i2c, aux * and scdc. It should not contain any specific applications of these * protocols such as display capability query, detection, or handshaking such as * link training. */ static void construct_link_service_ddc(struct link_service *link_srv) { link_srv->create_ddc_service = link_create_ddc_service; link_srv->destroy_ddc_service = link_destroy_ddc_service; link_srv->query_ddc_data = link_query_ddc_data; link_srv->aux_transfer_raw = link_aux_transfer_raw; link_srv->configure_fixed_vs_pe_retimer = link_configure_fixed_vs_pe_retimer; link_srv->aux_transfer_with_retries_no_mutex = link_aux_transfer_with_retries_no_mutex; link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode; link_srv->get_aux_defer_delay = link_get_aux_defer_delay; } /* link dp capability implements dp specific link capability retrieval sequence. * It is responsible for retrieving, parsing, overriding, deciding capability * obtained from dp link. Link capability consists of encoders, DPRXs, cables, * retimers, usb and all other possible backend capabilities. */ static void construct_link_service_dp_capability(struct link_service *link_srv) { link_srv->dp_is_sink_present = dp_is_sink_present; link_srv->dp_is_fec_supported = dp_is_fec_supported; link_srv->dp_is_128b_132b_signal = dp_is_128b_132b_signal; link_srv->dp_get_max_link_enc_cap = dp_get_max_link_enc_cap; link_srv->dp_get_verified_link_cap = dp_get_verified_link_cap; link_srv->dp_get_encoding_format = link_dp_get_encoding_format; link_srv->dp_should_enable_fec = dp_should_enable_fec; link_srv->dp_decide_link_settings = link_decide_link_settings; link_srv->mst_decide_link_encoding_format = mst_decide_link_encoding_format; link_srv->edp_decide_link_settings = edp_decide_link_settings; link_srv->bw_kbps_from_raw_frl_link_rate_data = link_bw_kbps_from_raw_frl_link_rate_data; link_srv->dp_overwrite_extended_receiver_cap = dp_overwrite_extended_receiver_cap; link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode; } /* link dp phy/dpia implements basic dp phy/dpia functionality such as * enable/disable output and set lane/drive settings. It is responsible for * maintaining and update software state representing current phy/dpia status * such as current link settings. */ static void construct_link_service_dp_phy_or_dpia(struct link_service *link_srv) { link_srv->dpia_handle_usb4_bandwidth_allocation_for_link = dpia_handle_usb4_bandwidth_allocation_for_link; link_srv->dpia_handle_bw_alloc_response = dpia_handle_bw_alloc_response; link_srv->dp_set_drive_settings = dp_set_drive_settings; link_srv->dpcd_write_rx_power_ctrl = dpcd_write_rx_power_ctrl; } /* link dp irq handler implements DP HPD short pulse handling sequence according * to DP specifications */ static void construct_link_service_dp_irq_handler(struct link_service *link_srv) { link_srv->dp_parse_link_loss_status = dp_parse_link_loss_status; link_srv->dp_should_allow_hpd_rx_irq = dp_should_allow_hpd_rx_irq; link_srv->dp_handle_link_loss = dp_handle_link_loss; link_srv->dp_read_hpd_rx_irq_data = dp_read_hpd_rx_irq_data; link_srv->dp_handle_hpd_rx_irq = dp_handle_hpd_rx_irq; } /* link edp panel control implements retrieval and configuration of eDP panel * features such as PSR and ABM and it also manages specs defined eDP panel * power sequences. */ static void construct_link_service_edp_panel_control(struct link_service *link_srv) { link_srv->edp_panel_backlight_power_on = edp_panel_backlight_power_on; link_srv->edp_get_backlight_level = edp_get_backlight_level; link_srv->edp_get_backlight_level_nits = edp_get_backlight_level_nits; link_srv->edp_set_backlight_level = edp_set_backlight_level; link_srv->edp_set_backlight_level_nits = edp_set_backlight_level_nits; link_srv->edp_get_target_backlight_pwm = edp_get_target_backlight_pwm; link_srv->edp_get_psr_state = edp_get_psr_state; link_srv->edp_set_psr_allow_active = edp_set_psr_allow_active; link_srv->edp_setup_psr = edp_setup_psr; link_srv->edp_set_sink_vtotal_in_psr_active = edp_set_sink_vtotal_in_psr_active; link_srv->edp_get_psr_residency = edp_get_psr_residency; link_srv->edp_get_replay_state = edp_get_replay_state; link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active; link_srv->edp_setup_replay = edp_setup_replay; link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal; link_srv->edp_replay_residency = edp_replay_residency; link_srv->edp_wait_for_t12 = edp_wait_for_t12; link_srv->edp_is_ilr_optimization_required = edp_is_ilr_optimization_required; link_srv->edp_backlight_enable_aux = edp_backlight_enable_aux; link_srv->edp_add_delay_for_T9 = edp_add_delay_for_T9; link_srv->edp_receiver_ready_T9 = edp_receiver_ready_T9; link_srv->edp_receiver_ready_T7 = edp_receiver_ready_T7; link_srv->edp_power_alpm_dpcd_enable = edp_power_alpm_dpcd_enable; } /* link dp cts implements dp compliance test automation protocols and manual * testing interfaces for debugging and certification purpose. */ static void construct_link_service_dp_cts(struct link_service *link_srv) { link_srv->dp_handle_automated_test = dp_handle_automated_test; link_srv->dp_set_test_pattern = dp_set_test_pattern; link_srv->dp_set_preferred_link_settings = dp_set_preferred_link_settings; link_srv->dp_set_preferred_training_settings = dp_set_preferred_training_settings; } /* link dp trace implements tracing interfaces for tracking major dp sequences * including execution status and timestamps */ static void construct_link_service_dp_trace(struct link_service *link_srv) { link_srv->dp_trace_is_initialized = dp_trace_is_initialized; link_srv->dp_trace_set_is_logged_flag = dp_trace_set_is_logged_flag; link_srv->dp_trace_is_logged = dp_trace_is_logged; link_srv->dp_trace_get_lt_end_timestamp = dp_trace_get_lt_end_timestamp; link_srv->dp_trace_get_lt_counts = dp_trace_get_lt_counts; link_srv->dp_trace_get_link_loss_count = dp_trace_get_link_loss_count; link_srv->dp_trace_set_edp_power_timestamp = dp_trace_set_edp_power_timestamp; link_srv->dp_trace_get_edp_poweron_timestamp = dp_trace_get_edp_poweron_timestamp; link_srv->dp_trace_get_edp_poweroff_timestamp = dp_trace_get_edp_poweroff_timestamp; link_srv->dp_trace_source_sequence = dp_trace_source_sequence; } static void construct_link_service(struct link_service *link_srv) { /* All link service functions should fall under some sub categories. * If a new function doesn't perfectly fall under an existing sub * category, it must be that you are either adding a whole new aspect of * responsibility to link service or something doesn't belong to link * service. In that case please contact the arch owner to arrange a * design review meeting. */ construct_link_service_factory(link_srv); construct_link_service_detection(link_srv); construct_link_service_resource(link_srv); construct_link_service_validation(link_srv); construct_link_service_dpms(link_srv); construct_link_service_ddc(link_srv); construct_link_service_dp_capability(link_srv); construct_link_service_dp_phy_or_dpia(link_srv); construct_link_service_dp_irq_handler(link_srv); construct_link_service_edp_panel_control(link_srv); construct_link_service_dp_cts(link_srv); construct_link_service_dp_trace(link_srv); } struct link_service *link_create_link_service(void) { struct link_service *link_srv = kzalloc(sizeof(*link_srv), GFP_KERNEL); if (link_srv == NULL) goto fail; construct_link_service(link_srv); return link_srv; fail: return NULL; } void link_destroy_link_service(struct link_service **link_srv) { kfree(*link_srv); *link_srv = NULL; } static enum transmitter translate_encoder_to_transmitter( struct graphics_object_id encoder) { switch (encoder.id) { case ENCODER_ID_INTERNAL_UNIPHY: switch (encoder.enum_id) { case ENUM_ID_1: return TRANSMITTER_UNIPHY_A; case ENUM_ID_2: return TRANSMITTER_UNIPHY_B; default: return TRANSMITTER_UNKNOWN; } break; case ENCODER_ID_INTERNAL_UNIPHY1: switch (encoder.enum_id) { case ENUM_ID_1: return TRANSMITTER_UNIPHY_C; case ENUM_ID_2: return TRANSMITTER_UNIPHY_D; default: return TRANSMITTER_UNKNOWN; } break; case ENCODER_ID_INTERNAL_UNIPHY2: switch (encoder.enum_id) { case ENUM_ID_1: return TRANSMITTER_UNIPHY_E; case ENUM_ID_2: return TRANSMITTER_UNIPHY_F; default: return TRANSMITTER_UNKNOWN; } break; case ENCODER_ID_INTERNAL_UNIPHY3: switch (encoder.enum_id) { case ENUM_ID_1: return TRANSMITTER_UNIPHY_G; default: return TRANSMITTER_UNKNOWN; } break; case ENCODER_ID_EXTERNAL_NUTMEG: switch (encoder.enum_id) { case ENUM_ID_1: return TRANSMITTER_NUTMEG_CRT; default: return TRANSMITTER_UNKNOWN; } break; case ENCODER_ID_EXTERNAL_TRAVIS: switch (encoder.enum_id) { case ENUM_ID_1: return TRANSMITTER_TRAVIS_CRT; case ENUM_ID_2: return TRANSMITTER_TRAVIS_LCD; default: return TRANSMITTER_UNKNOWN; } break; default: return TRANSMITTER_UNKNOWN; } } static void link_destruct(struct dc_link *link) { int i; if (link->hpd_gpio) { dal_gpio_destroy_irq(&link->hpd_gpio); link->hpd_gpio = NULL; } if (link->ddc) link_destroy_ddc_service(&link->ddc); if (link->panel_cntl) link->panel_cntl->funcs->destroy(&link->panel_cntl); if (link->link_enc) { /* Update link encoder resource tracking variables. These are used for * the dynamic assignment of link encoders to streams. Virtual links * are not assigned encoder resources on creation. */ if (link->link_id.id != CONNECTOR_ID_VIRTUAL) { link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL; link->dc->res_pool->dig_link_enc_count--; } link->link_enc->funcs->destroy(&link->link_enc); } if (link->local_sink) dc_sink_release(link->local_sink); for (i = 0; i < link->sink_count; ++i) dc_sink_release(link->remote_sinks[i]); } static enum channel_id get_ddc_line(struct dc_link *link) { struct ddc *ddc; enum channel_id channel; channel = CHANNEL_ID_UNKNOWN; ddc = get_ddc_pin(link->ddc); if (ddc) { switch (dal_ddc_get_line(ddc)) { case GPIO_DDC_LINE_DDC1: channel = CHANNEL_ID_DDC1; break; case GPIO_DDC_LINE_DDC2: channel = CHANNEL_ID_DDC2; break; case GPIO_DDC_LINE_DDC3: channel = CHANNEL_ID_DDC3; break; case GPIO_DDC_LINE_DDC4: channel = CHANNEL_ID_DDC4; break; case GPIO_DDC_LINE_DDC5: channel = CHANNEL_ID_DDC5; break; case GPIO_DDC_LINE_DDC6: channel = CHANNEL_ID_DDC6; break; case GPIO_DDC_LINE_DDC_VGA: channel = CHANNEL_ID_DDC_VGA; break; case GPIO_DDC_LINE_I2C_PAD: channel = CHANNEL_ID_I2C_PAD; break; default: BREAK_TO_DEBUGGER(); break; } } return channel; } static bool construct_phy(struct dc_link *link, const struct link_init_data *init_params) { uint8_t i; struct ddc_service_init_data ddc_service_init_data = { 0 }; struct dc_context *dc_ctx = init_params->ctx; struct encoder_init_data enc_init_data = { 0 }; struct panel_cntl_init_data panel_cntl_init_data = { 0 }; struct integrated_info info = { 0 }; struct dc_bios *bios = init_params->dc->ctx->dc_bios; const struct dc_vbios_funcs *bp_funcs = bios->funcs; struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 }; DC_LOGGER_INIT(dc_ctx->logger); link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; link->link_status.dpcd_caps = &link->dpcd_caps; link->dc = init_params->dc; link->ctx = dc_ctx; link->link_index = init_params->link_index; memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides)); memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings)); link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index); link->ep_type = DISPLAY_ENDPOINT_PHY; DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); if (bios->funcs->get_disp_connector_caps_info) { bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info); link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY; DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display); } if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", __func__, init_params->connector_index, link->link_id.type, OBJECT_TYPE_CONNECTOR); goto create_fail; } if (link->dc->res_pool->funcs->link_init) link->dc->res_pool->funcs->link_init(link); link->hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (link->hpd_gpio) { dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT); dal_gpio_unlock_pin(link->hpd_gpio); link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio); DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id); DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en); } switch (link->link_id.id) { case CONNECTOR_ID_HDMI_TYPE_A: link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A; break; case CONNECTOR_ID_SINGLE_LINK_DVID: case CONNECTOR_ID_SINGLE_LINK_DVII: link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; break; case CONNECTOR_ID_DUAL_LINK_DVID: case CONNECTOR_ID_DUAL_LINK_DVII: link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; break; case CONNECTOR_ID_DISPLAY_PORT: case CONNECTOR_ID_USBC: link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; if (link->hpd_gpio) link->irq_source_hpd_rx = dal_irq_get_rx_source(link->hpd_gpio); break; case CONNECTOR_ID_EDP: link->connector_signal = SIGNAL_TYPE_EDP; if (link->hpd_gpio) { if (!link->dc->config.allow_edp_hotplug_detection) link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; switch (link->dc->config.allow_edp_hotplug_detection) { case HPD_EN_FOR_ALL_EDP: link->irq_source_hpd_rx = dal_irq_get_rx_source(link->hpd_gpio); break; case HPD_EN_FOR_PRIMARY_EDP_ONLY: if (link->link_index == 0) link->irq_source_hpd_rx = dal_irq_get_rx_source(link->hpd_gpio); else link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; break; case HPD_EN_FOR_SECONDARY_EDP_ONLY: if (link->link_index == 1) link->irq_source_hpd_rx = dal_irq_get_rx_source(link->hpd_gpio); else link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; break; default: link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; break; } } break; case CONNECTOR_ID_LVDS: link->connector_signal = SIGNAL_TYPE_LVDS; break; default: DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id); goto create_fail; } LINK_INFO("Connector[%d] description: signal: %s\n", init_params->connector_index, signal_type_to_string(link->connector_signal)); ddc_service_init_data.ctx = link->ctx; ddc_service_init_data.id = link->link_id; ddc_service_init_data.link = link; link->ddc = link_create_ddc_service(&ddc_service_init_data); if (!link->ddc) { DC_ERROR("Failed to create ddc_service!\n"); goto ddc_create_fail; } if (!link->ddc->ddc_pin) { DC_ERROR("Failed to get I2C info for connector!\n"); goto ddc_create_fail; } link->ddc_hw_inst = dal_ddc_get_line(get_ddc_pin(link->ddc)); if (link->dc->res_pool->funcs->panel_cntl_create && (link->link_id.id == CONNECTOR_ID_EDP || link->link_id.id == CONNECTOR_ID_LVDS)) { panel_cntl_init_data.ctx = dc_ctx; panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count; link->panel_cntl = link->dc->res_pool->funcs->panel_cntl_create( &panel_cntl_init_data); panel_cntl_init_data.ctx->dc_edp_id_count++; if (link->panel_cntl == NULL) { DC_ERROR("Failed to create link panel_cntl!\n"); goto panel_cntl_create_fail; } } enc_init_data.ctx = dc_ctx; bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder); enc_init_data.connector = link->link_id; enc_init_data.channel = get_ddc_line(link); enc_init_data.hpd_source = get_hpd_line(link); link->hpd_src = enc_init_data.hpd_source; enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder); link->link_enc = link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); if (!link->link_enc) { DC_ERROR("Failed to create link encoder!\n"); goto link_enc_create_fail; } /* Update link encoder tracking variables. These are used for the dynamic * assignment of link encoders to streams. */ link->eng_id = link->link_enc->preferred_engine; link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc; link->dc->res_pool->dig_link_enc_count++; link->link_enc_hw_inst = link->link_enc->transmitter; for (i = 0; i < 4; i++) { if (bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag) != BP_RESULT_OK) { DC_ERROR("Failed to find device tag!\n"); goto device_tag_fail; } /* Look for device tag that matches connector signal, * CRT for rgb, LCD for other supported signal tyes */ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id)) continue; if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT && link->connector_signal != SIGNAL_TYPE_RGB) continue; if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD && link->connector_signal == SIGNAL_TYPE_RGB) continue; DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device); DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type); DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id); break; } if (bios->integrated_info) info = *bios->integrated_info; /* Look for channel mapping corresponding to connector and device tag */ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) { struct external_display_path *path = &info.ext_disp_conn_info.path[i]; if (path->device_connector_id.enum_id == link->link_id.enum_id && path->device_connector_id.id == link->link_id.id && path->device_connector_id.type == link->link_id.type) { if (link->device_tag.acpi_device != 0 && path->device_acpi_enum == link->device_tag.acpi_device) { link->ddi_channel_mapping = path->channel_mapping; link->chip_caps = path->caps; DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); } else if (path->device_tag == link->device_tag.dev_id.raw_device_tag) { link->ddi_channel_mapping = path->channel_mapping; link->chip_caps = path->caps; DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); } if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) { link->bios_forced_drive_settings.VOLTAGE_SWING = (info.ext_disp_conn_info.fixdpvoltageswing & 0x3); link->bios_forced_drive_settings.PRE_EMPHASIS = ((info.ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3); } break; } } if (bios->funcs->get_atom_dc_golden_table) bios->funcs->get_atom_dc_golden_table(bios); /* * TODO check if GPIO programmed correctly * * If GPIO isn't programmed correctly HPD might not rise or drain * fast enough, leading to bounces. */ program_hpd_filter(link); link->psr_settings.psr_vtotal_control_support = false; link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); return true; device_tag_fail: link->link_enc->funcs->destroy(&link->link_enc); link_enc_create_fail: if (link->panel_cntl != NULL) link->panel_cntl->funcs->destroy(&link->panel_cntl); panel_cntl_create_fail: link_destroy_ddc_service(&link->ddc); ddc_create_fail: create_fail: if (link->hpd_gpio) { dal_gpio_destroy_irq(&link->hpd_gpio); link->hpd_gpio = NULL; } DC_LOG_DC("BIOS object table - %s failed.\n", __func__); return false; } static bool construct_dpia(struct dc_link *link, const struct link_init_data *init_params) { struct ddc_service_init_data ddc_service_init_data = { 0 }; struct dc_context *dc_ctx = init_params->ctx; DC_LOGGER_INIT(dc_ctx->logger); /* Initialized irq source for hpd and hpd rx */ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; link->link_status.dpcd_caps = &link->dpcd_caps; link->dc = init_params->dc; link->ctx = dc_ctx; link->link_index = init_params->link_index; memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides)); memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings)); /* Dummy Init for linkid */ link->link_id.type = OBJECT_TYPE_CONNECTOR; link->link_id.id = CONNECTOR_ID_DISPLAY_PORT; link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index; link->is_internal_display = false; link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; LINK_INFO("Connector[%d] description:signal %d\n", init_params->connector_index, link->connector_signal); link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA; link->is_dig_mapping_flexible = true; /* TODO: Initialize link : funcs->link_init */ ddc_service_init_data.ctx = link->ctx; ddc_service_init_data.id = link->link_id; ddc_service_init_data.link = link; /* Set indicator for dpia link so that ddc wont be created */ ddc_service_init_data.is_dpia_link = true; link->ddc = link_create_ddc_service(&ddc_service_init_data); if (!link->ddc) { DC_ERROR("Failed to create ddc_service!\n"); goto ddc_create_fail; } /* Set dpia port index : 0 to number of dpia ports */ link->ddc_hw_inst = init_params->connector_index; // Assign Dpia preferred eng_id if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia) link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst); /* TODO: Create link encoder */ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ link->wa_flags.dp_mot_reset_segment = true; return true; ddc_create_fail: return false; } static bool link_construct(struct dc_link *link, const struct link_init_data *init_params) { /* Handle dpia case */ if (init_params->is_dpia_link == true) return construct_dpia(link, init_params); else return construct_phy(link, init_params); } struct dc_link *link_create(const struct link_init_data *init_params) { struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); if (NULL == link) goto alloc_fail; if (false == link_construct(link, init_params)) goto construct_fail; return link; construct_fail: kfree(link); alloc_fail: return NULL; } void link_destroy(struct dc_link **link) { link_destruct(*link); kfree(*link); *link = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/link/link_factory.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements accessors to link resource. */ #include "link_resource.h" #include "protocols/link_dp_capability.h" void link_get_cur_link_res(const struct dc_link *link, struct link_resource *link_res) { int i; struct pipe_ctx *pipe = NULL; memset(link_res, 0, sizeof(*link_res)); for (i = 0; i < MAX_PIPES; i++) { pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { if (pipe->stream->link == link) { *link_res = pipe->link_res; break; } } } } void link_get_cur_res_map(const struct dc *dc, uint32_t *map) { struct dc_link *link; uint32_t i; uint32_t hpo_dp_recycle_map = 0; *map = 0; if (dc->caps.dp_hpo) { for (i = 0; i < dc->caps.max_links; i++) { link = dc->links[i]; if (link->link_status.link_active && link_dp_get_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && link_dp_get_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability * but current link doesn't use it. */ hpo_dp_recycle_map |= (1 << i); } *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); } } void link_restore_res_map(const struct dc *dc, uint32_t *map) { struct dc_link *link; uint32_t i; unsigned int available_hpo_dp_count; uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) >> LINK_RES_HPO_DP_REC_MAP__SHIFT; if (dc->caps.dp_hpo) { available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count; /* remove excess 128b/132b encoding support for not recycled links */ for (i = 0; i < dc->caps.max_links; i++) { if ((hpo_dp_recycle_map & (1 << i)) == 0) { link = dc->links[i]; if (link->type != dc_connection_none && link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { if (available_hpo_dp_count > 0) available_hpo_dp_count--; else /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ link->verified_link_cap.link_rate = LINK_RATE_HIGH3; } } } /* remove excess 128b/132b encoding support for recycled links */ for (i = 0; i < dc->caps.max_links; i++) { if ((hpo_dp_recycle_map & (1 << i)) != 0) { link = dc->links[i]; if (link->type != dc_connection_none && link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { if (available_hpo_dp_count > 0) available_hpo_dp_count--; else /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ link->verified_link_cap.link_rate = LINK_RATE_HIGH3; } } } } }
linux-master
drivers/gpu/drm/amd/display/dc/link/link_resource.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file manages link detection states and receiver states by using various * link protocols. It also provides helper functions to interpret certain * capabilities or status based on the states it manages or retrieve them * directly from connected receivers. */ #include "link_dpms.h" #include "link_detection.h" #include "link_hwss.h" #include "protocols/link_edp_panel_control.h" #include "protocols/link_ddc.h" #include "protocols/link_hpd.h" #include "protocols/link_dpcd.h" #include "protocols/link_dp_capability.h" #include "protocols/link_dp_dpia.h" #include "protocols/link_dp_phy.h" #include "protocols/link_dp_training.h" #include "accessories/link_dp_trace.h" #include "link_enc_cfg.h" #include "dm_helpers.h" #include "clk_mgr.h" #define DC_LOGGER_INIT(logger) #define LINK_INFO(...) \ DC_LOG_HW_HOTPLUG( \ __VA_ARGS__) /* * Some receivers fail to train on first try and are good * on subsequent tries. 2 retries should be plenty. If we * don't have a successful training then we don't expect to * ever get one. */ #define LINK_TRAINING_MAX_VERIFY_RETRY 2 static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR"; static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal) { enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE; switch (sink_signal) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_HDMI_TYPE_A: case SIGNAL_TYPE_LVDS: case SIGNAL_TYPE_RGB: transaction_type = DDC_TRANSACTION_TYPE_I2C; break; case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_EDP: transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; break; case SIGNAL_TYPE_DISPLAY_PORT_MST: /* MST does not use I2COverAux, but there is the * SPECIAL use case for "immediate dwnstrm device * access" (EPR#370830). */ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; break; default: break; } return transaction_type; } static enum signal_type get_basic_signal_type(struct graphics_object_id encoder, struct graphics_object_id downstream) { if (downstream.type == OBJECT_TYPE_CONNECTOR) { switch (downstream.id) { case CONNECTOR_ID_SINGLE_LINK_DVII: switch (encoder.id) { case ENCODER_ID_INTERNAL_DAC1: case ENCODER_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_ID_INTERNAL_DAC2: case ENCODER_ID_INTERNAL_KLDSCP_DAC2: return SIGNAL_TYPE_RGB; default: return SIGNAL_TYPE_DVI_SINGLE_LINK; } break; case CONNECTOR_ID_DUAL_LINK_DVII: { switch (encoder.id) { case ENCODER_ID_INTERNAL_DAC1: case ENCODER_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_ID_INTERNAL_DAC2: case ENCODER_ID_INTERNAL_KLDSCP_DAC2: return SIGNAL_TYPE_RGB; default: return SIGNAL_TYPE_DVI_DUAL_LINK; } } break; case CONNECTOR_ID_SINGLE_LINK_DVID: return SIGNAL_TYPE_DVI_SINGLE_LINK; case CONNECTOR_ID_DUAL_LINK_DVID: return SIGNAL_TYPE_DVI_DUAL_LINK; case CONNECTOR_ID_VGA: return SIGNAL_TYPE_RGB; case CONNECTOR_ID_HDMI_TYPE_A: return SIGNAL_TYPE_HDMI_TYPE_A; case CONNECTOR_ID_LVDS: return SIGNAL_TYPE_LVDS; case CONNECTOR_ID_DISPLAY_PORT: case CONNECTOR_ID_USBC: return SIGNAL_TYPE_DISPLAY_PORT; case CONNECTOR_ID_EDP: return SIGNAL_TYPE_EDP; default: return SIGNAL_TYPE_NONE; } } else if (downstream.type == OBJECT_TYPE_ENCODER) { switch (downstream.id) { case ENCODER_ID_EXTERNAL_NUTMEG: case ENCODER_ID_EXTERNAL_TRAVIS: return SIGNAL_TYPE_DISPLAY_PORT; default: return SIGNAL_TYPE_NONE; } } return SIGNAL_TYPE_NONE; } /* * @brief * Detect output sink type */ static enum signal_type link_detect_sink_signal_type(struct dc_link *link, enum dc_detect_reason reason) { enum signal_type result; struct graphics_object_id enc_id; if (link->is_dig_mapping_flexible) enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN}; else enc_id = link->link_enc->id; result = get_basic_signal_type(enc_id, link->link_id); /* Use basic signal type for link without physical connector. */ if (link->ep_type != DISPLAY_ENDPOINT_PHY) return result; /* Internal digital encoder will detect only dongles * that require digital signal */ /* Detection mechanism is different * for different native connectors. * LVDS connector supports only LVDS signal; * PCIE is a bus slot, the actual connector needs to be detected first; * eDP connector supports only eDP signal; * HDMI should check straps for audio */ /* PCIE detects the actual connector on add-on board */ if (link->link_id.id == CONNECTOR_ID_PCIE) { /* ZAZTODO implement PCIE add-on card detection */ } switch (link->link_id.id) { case CONNECTOR_ID_HDMI_TYPE_A: { /* check audio support: * if native HDMI is not supported, switch to DVI */ struct audio_support *aud_support = &link->dc->res_pool->audio_support; if (!aud_support->hdmi_audio_native) if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A) result = SIGNAL_TYPE_DVI_SINGLE_LINK; } break; case CONNECTOR_ID_DISPLAY_PORT: case CONNECTOR_ID_USBC: { /* DP HPD short pulse. Passive DP dongle will not * have short pulse */ if (reason != DETECT_REASON_HPDRX) { /* Check whether DP signal detected: if not - * we assume signal is DVI; it could be corrected * to HDMI after dongle detection */ if (!dm_helpers_is_dp_sink_present(link)) result = SIGNAL_TYPE_DVI_SINGLE_LINK; } } break; default: break; } return result; } static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type, struct audio_support *audio_support) { enum signal_type signal = SIGNAL_TYPE_NONE; switch (dongle_type) { case DISPLAY_DONGLE_DP_HDMI_DONGLE: if (audio_support->hdmi_audio_on_dongle) signal = SIGNAL_TYPE_HDMI_TYPE_A; else signal = SIGNAL_TYPE_DVI_SINGLE_LINK; break; case DISPLAY_DONGLE_DP_DVI_DONGLE: signal = SIGNAL_TYPE_DVI_SINGLE_LINK; break; case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: if (audio_support->hdmi_audio_native) signal = SIGNAL_TYPE_HDMI_TYPE_A; else signal = SIGNAL_TYPE_DVI_SINGLE_LINK; break; default: signal = SIGNAL_TYPE_NONE; break; } return signal; } static void read_scdc_caps(struct ddc_service *ddc_service, struct dc_sink *sink) { uint8_t slave_address = HDMI_SCDC_ADDRESS; uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI; link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte, sizeof(sink->scdc_caps.manufacturer_OUI.byte)); offset = HDMI_SCDC_DEVICE_ID; link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &(sink->scdc_caps.device_id.byte), sizeof(sink->scdc_caps.device_id.byte)); } static bool i2c_read( struct ddc_service *ddc, uint32_t address, uint8_t *buffer, uint32_t len) { uint8_t offs_data = 0; struct i2c_payload payloads[2] = { { .write = true, .address = address, .length = 1, .data = &offs_data }, { .write = false, .address = address, .length = len, .data = buffer } }; struct i2c_command command = { .payloads = payloads, .number_of_payloads = 2, .engine = DDC_I2C_COMMAND_ENGINE, .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; return dm_helpers_submit_i2c( ddc->ctx, ddc->link, &command); } enum { DP_SINK_CAP_SIZE = DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1 }; static void query_dp_dual_mode_adaptor( struct ddc_service *ddc, struct display_sink_capability *sink_cap) { uint8_t i; bool is_valid_hdmi_signature; enum display_dongle_type *dongle = &sink_cap->dongle_type; uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; bool is_type2_dongle = false; int retry_count = 2; struct dp_hdmi_dongle_signature_data *dongle_signature; /* Assume we have no valid DP passive dongle connected */ *dongle = DISPLAY_DONGLE_NONE; sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK; /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/ if (!i2c_read( ddc, DP_HDMI_DONGLE_ADDRESS, type2_dongle_buf, sizeof(type2_dongle_buf))) { /* Passive HDMI dongles can sometimes fail here without retrying*/ while (retry_count > 0) { if (i2c_read(ddc, DP_HDMI_DONGLE_ADDRESS, type2_dongle_buf, sizeof(type2_dongle_buf))) break; retry_count--; } if (retry_count == 0) { *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), "DP-DVI passive dongle %dMhz: ", DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); return; } } /* Check if Type 2 dongle.*/ if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID) is_type2_dongle = true; dongle_signature = (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf; is_valid_hdmi_signature = true; /* Check EOT */ if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) { is_valid_hdmi_signature = false; } /* Check signature */ for (i = 0; i < sizeof(dongle_signature->id); ++i) { /* If its not the right signature, * skip mismatch in subversion byte.*/ if (dongle_signature->id[i] != dp_hdmi_dongle_signature_str[i] && i != 3) { if (is_type2_dongle) { is_valid_hdmi_signature = false; break; } } } if (is_type2_dongle) { uint32_t max_tmds_clk = type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK]; max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2; if (0 == max_tmds_clk || max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK || max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) { *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), "DP-DVI passive dongle %dMhz: ", DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); } else { if (is_valid_hdmi_signature == true) { *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), "Type 2 DP-HDMI passive dongle %dMhz: ", max_tmds_clk); } else { *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ", max_tmds_clk); } /* Multiply by 1000 to convert to kHz. */ sink_cap->max_hdmi_pixel_clock = max_tmds_clk * 1000; } sink_cap->is_dongle_type_one = false; } else { if (is_valid_hdmi_signature == true) { *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), "Type 1 DP-HDMI passive dongle %dMhz: ", sink_cap->max_hdmi_pixel_clock / 1000); } else { *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", sink_cap->max_hdmi_pixel_clock / 1000); } sink_cap->is_dongle_type_one = true; } return; } static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc, struct display_sink_capability *sink_cap, struct audio_support *audio_support) { query_dp_dual_mode_adaptor(ddc, sink_cap); return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type, audio_support); } static void link_disconnect_sink(struct dc_link *link) { if (link->local_sink) { dc_sink_release(link->local_sink); link->local_sink = NULL; } link->dpcd_sink_count = 0; //link->dpcd_caps.dpcd_rev.raw = 0; } static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link) { dc_sink_release(link->local_sink); link->local_sink = prev_sink; } static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) { struct hdcp_protection_message msg22; struct hdcp_protection_message msg14; memset(&msg22, 0, sizeof(struct hdcp_protection_message)); memset(&msg14, 0, sizeof(struct hdcp_protection_message)); memset(link->hdcp_caps.rx_caps.raw, 0, sizeof(link->hdcp_caps.rx_caps.raw)); if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && link->ddc->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) || link->connector_signal == SIGNAL_TYPE_EDP) { msg22.data = link->hdcp_caps.rx_caps.raw; msg22.length = sizeof(link->hdcp_caps.rx_caps.raw); msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS; } else { msg22.data = &link->hdcp_caps.rx_caps.fields.version; msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version); msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION; } msg22.version = HDCP_VERSION_22; msg22.link = HDCP_LINK_PRIMARY; msg22.max_retries = 5; dc_process_hdcp_msg(signal, link, &msg22); if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { msg14.data = &link->hdcp_caps.bcaps.raw; msg14.length = sizeof(link->hdcp_caps.bcaps.raw); msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS; msg14.version = HDCP_VERSION_14; msg14.link = HDCP_LINK_PRIMARY; msg14.max_retries = 5; dc_process_hdcp_msg(signal, link, &msg14); } } static void read_current_link_settings_on_detect(struct dc_link *link) { union lane_count_set lane_count_set = {0}; uint8_t link_bw_set; uint8_t link_rate_set; uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; union max_down_spread max_down_spread = {0}; // Read DPCD 00101h to find out the number of lanes currently set for (i = 0; i < read_dpcd_retry_cnt; i++) { status = core_link_read_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, sizeof(lane_count_set)); /* First DPCD read after VDD ON can fail if the particular board * does not have HPD pin wired correctly. So if DPCD read fails, * which it should never happen, retry a few times. Target worst * case scenario of 80 ms. */ if (status == DC_OK) { link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET; break; } msleep(8); } // Read DPCD 00100h to find if standard link rates are set core_link_read_dpcd(link, DP_LINK_BW_SET, &link_bw_set, sizeof(link_bw_set)); if (link_bw_set == 0) { if (link->connector_signal == SIGNAL_TYPE_EDP) { /* If standard link rates are not being used, * Read DPCD 00115h to find the edp link rate set used */ core_link_read_dpcd(link, DP_LINK_RATE_SET, &link_rate_set, sizeof(link_rate_set)); // edp_supported_link_rates_count = 0 for DP if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { link->cur_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[link_rate_set]; link->cur_link_settings.link_rate_set = link_rate_set; link->cur_link_settings.use_link_rate_set = true; } } else { // Link Rate not found. Seamless boot may not work. ASSERT(false); } } else { link->cur_link_settings.link_rate = link_bw_set; link->cur_link_settings.use_link_rate_set = false; } // Read DPCD 00003h to find the max down spread. core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, &max_down_spread.raw, sizeof(max_down_spread)); link->cur_link_settings.link_spread = max_down_spread.bits.MAX_DOWN_SPREAD ? LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; } static bool detect_dp(struct dc_link *link, struct display_sink_capability *sink_caps, enum dc_detect_reason reason) { struct audio_support *audio_support = &link->dc->res_pool->audio_support; sink_caps->signal = link_detect_sink_signal_type(link, reason); sink_caps->transaction_type = get_ddc_transaction_type(sink_caps->signal); if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; if (!detect_dp_sink_caps(link)) return false; if (is_dp_branch_device(link)) /* DP SST branch */ link->type = dc_connection_sst_branch; } else { if (link->dc->debug.disable_dp_plus_plus_wa && link->link_enc->features.flags.bits.IS_UHBR20_CAPABLE) return false; /* DP passive dongles */ sink_caps->signal = dp_passive_dongle_detection(link->ddc, sink_caps, audio_support); link->dpcd_caps.dongle_type = sink_caps->dongle_type; link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; link->dpcd_caps.dpcd_rev.raw = 0; } return true; } static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) { if (old_edid->length != new_edid->length) return false; if (new_edid->length == 0) return false; return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0); } static bool wait_for_entering_dp_alt_mode(struct dc_link *link) { /** * something is terribly wrong if time out is > 200ms. (5Hz) * 500 microseconds * 400 tries us 200 ms **/ unsigned int sleep_time_in_microseconds = 500; unsigned int tries_allowed = 400; bool is_in_alt_mode; unsigned long long enter_timestamp; unsigned long long finish_timestamp; unsigned long long time_taken_in_ns; int tries_taken; DC_LOGGER_INIT(link->ctx->logger); /** * this function will only exist if we are on dcn21 (is_in_alt_mode is a * function pointer, so checking to see if it is equal to 0 is the same * as checking to see if it is null **/ if (!link->link_enc->funcs->is_in_alt_mode) return true; is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode); if (is_in_alt_mode) return true; enter_timestamp = dm_get_timestamp(link->ctx); for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) { udelay(sleep_time_in_microseconds); /* ask the link if alt mode is enabled, if so return ok */ if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) { finish_timestamp = dm_get_timestamp(link->ctx); time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); DC_LOG_WARNING("Alt mode entered finished after %llu ms\n", div_u64(time_taken_in_ns, 1000000)); return true; } } finish_timestamp = dm_get_timestamp(link->ctx); time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); DC_LOG_WARNING("Alt mode has timed out after %llu ms\n", div_u64(time_taken_in_ns, 1000000)); return false; } static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) { /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock * reports DSC support. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->type == dc_connection_mst_branch && link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) link->wa_flags.dpia_mst_dsc_always_on = true; } static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) { /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) link->wa_flags.dpia_mst_dsc_always_on = false; } static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason) { DC_LOGGER_INIT(link->ctx->logger); LINK_INFO("link=%d, mst branch is now Connected\n", link->link_index); link->type = dc_connection_mst_branch; apply_dpia_mst_dsc_always_on_wa(link); dm_helpers_dp_update_branch_info(link->ctx, link); if (dm_helpers_dp_mst_start_top_mgr(link->ctx, link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) { link_disconnect_sink(link); } else { link->type = dc_connection_sst_branch; } return link->type == dc_connection_mst_branch; } bool link_reset_cur_dp_mst_topology(struct dc_link *link) { DC_LOGGER_INIT(link->ctx->logger); LINK_INFO("link=%d, mst branch is now Disconnected\n", link->link_index); revert_dpia_mst_dsc_always_on_wa(link); return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); } static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, enum dc_detect_reason reason) { int i; bool can_apply_seamless_boot = false; for (i = 0; i < dc->current_state->stream_count; i++) { if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { can_apply_seamless_boot = true; break; } } return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT; } static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc) { dc_z10_restore(dc); clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); } static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc) { clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); } static void verify_link_capability_destructive(struct dc_link *link, struct dc_sink *sink, enum dc_detect_reason reason) { bool should_prepare_phy_clocks = should_prepare_phy_clocks_for_link_verification(link->dc, reason); if (should_prepare_phy_clocks) prepare_phy_clocks_for_destructive_link_verification(link->dc); if (dc_is_dp_signal(link->local_sink->sink_signal)) { struct dc_link_settings known_limit_link_setting = dp_get_max_link_cap(link); link_set_all_streams_dpms_off_for_link(link); dp_verify_link_cap_with_retries( link, &known_limit_link_setting, LINK_TRAINING_MAX_VERIFY_RETRY); } else { ASSERT(0); } if (should_prepare_phy_clocks) restore_phy_clocks_for_destructive_link_verification(link->dc); } static void verify_link_capability_non_destructive(struct dc_link *link) { if (dc_is_dp_signal(link->local_sink->sink_signal)) { if (dc_is_embedded_signal(link->local_sink->sink_signal) || link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) /* TODO - should we check link encoder's max link caps here? * How do we know which link encoder to check from? */ link->verified_link_cap = link->reported_link_cap; else link->verified_link_cap = dp_get_max_link_cap(link); } } static bool should_verify_link_capability_destructively(struct dc_link *link, enum dc_detect_reason reason) { bool destrictive = false; struct dc_link_settings max_link_cap; bool is_link_enc_unavailable = link->link_enc && link->dc->res_pool->funcs->link_encs_assign && !link_enc_cfg_is_link_enc_avail( link->ctx->dc, link->link_enc->preferred_engine, link); if (dc_is_dp_signal(link->local_sink->sink_signal)) { max_link_cap = dp_get_max_link_cap(link); destrictive = true; if (link->dc->debug.skip_detection_link_training || dc_is_embedded_signal(link->local_sink->sink_signal) || link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { destrictive = false; } else if (link_dp_get_encoding_format(&max_link_cap) == DP_8b_10b_ENCODING) { if (link->dpcd_caps.is_mst_capable || is_link_enc_unavailable) { destrictive = false; } } } return destrictive; } static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, enum dc_detect_reason reason) { if (should_verify_link_capability_destructively(link, reason)) verify_link_capability_destructive(link, sink, reason); else verify_link_capability_non_destructive(link); } /* * detect_link_and_local_sink() - Detect if a sink is attached to a given link * * link->local_sink is created or destroyed as needed. * * This does not create remote sinks. */ static bool detect_link_and_local_sink(struct dc_link *link, enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; struct display_sink_capability sink_caps = { 0 }; uint32_t i; bool converter_disable_audio = false; struct audio_support *aud_support = &link->dc->res_pool->audio_support; bool same_edid = false; enum dc_edid_status edid_status; struct dc_context *dc_ctx = link->ctx; struct dc *dc = dc_ctx->dc; struct dc_sink *sink = NULL; struct dc_sink *prev_sink = NULL; struct dpcd_caps prev_dpcd_caps; enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type pre_connection_type = link->type; const uint32_t post_oui_delay = 30; // 30ms DC_LOGGER_INIT(link->ctx->logger); if (dc_is_virtual_signal(link->connector_signal)) return false; if (((link->connector_signal == SIGNAL_TYPE_LVDS || link->connector_signal == SIGNAL_TYPE_EDP) && (!link->dc->config.allow_edp_hotplug_detection)) && link->local_sink) { // need to re-write OUI and brightness in resume case if (link->connector_signal == SIGNAL_TYPE_EDP && (link->dpcd_sink_ext_caps.bits.oled == 1)) { dpcd_set_source_specific_data(link); msleep(post_oui_delay); set_cached_brightness_aux(link); } return true; } if (!link_detect_connection_type(link, &new_connection_type)) { BREAK_TO_DEBUGGER(); return false; } prev_sink = link->local_sink; if (prev_sink) { dc_sink_retain(prev_sink); memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps)); } link_disconnect_sink(link); if (new_connection_type != dc_connection_none) { link->type = new_connection_type; link->link_state_valid = false; /* From Disconnected-to-Connected. */ switch (link->connector_signal) { case SIGNAL_TYPE_HDMI_TYPE_A: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; if (aud_support->hdmi_audio_native) sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; else sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; break; } case SIGNAL_TYPE_DVI_SINGLE_LINK: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; break; } case SIGNAL_TYPE_DVI_DUAL_LINK: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; break; } case SIGNAL_TYPE_LVDS: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_LVDS; break; } case SIGNAL_TYPE_EDP: { detect_edp_sink_caps(link); read_current_link_settings_on_detect(link); /* Disable power sequence on MIPI panel + converter */ if (dc->config.enable_mipi_converter_optimization && dc_ctx->dce_version == DCN_VERSION_3_01 && link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 && memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580, sizeof(link->dpcd_caps.branch_dev_name)) == 0) { dc->config.edp_no_power_sequencing = true; if (!link->dpcd_caps.set_power_state_capable_edp) link->wa_flags.dp_keep_receiver_powered = true; } sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; } case SIGNAL_TYPE_DISPLAY_PORT: { /* wa HPD high coming too early*/ if (link->ep_type == DISPLAY_ENDPOINT_PHY && link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { /* if alt mode times out, return false */ if (!wait_for_entering_dp_alt_mode(link)) return false; } if (!detect_dp(link, &sink_caps, reason)) { link->type = pre_connection_type; if (prev_sink) dc_sink_release(prev_sink); return false; } /* Active SST downstream branch device unplug*/ if (link->type == dc_connection_sst_branch && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { if (prev_sink) /* Downstream unplug */ dc_sink_release(prev_sink); return true; } /* disable audio for non DP to HDMI active sst converter */ if (link->type == dc_connection_sst_branch && is_dp_active_dongle(link) && (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)) converter_disable_audio = true; /* limited link rate to HBR3 for DPIA until we implement USB4 V2 */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->reported_link_cap.link_rate > LINK_RATE_HIGH3) link->reported_link_cap.link_rate = LINK_RATE_HIGH3; break; } default: DC_ERROR("Invalid connector type! signal:%d\n", link->connector_signal); if (prev_sink) dc_sink_release(prev_sink); return false; } /* switch() */ if (link->dpcd_caps.sink_count.bits.SINK_COUNT) link->dpcd_sink_count = link->dpcd_caps.sink_count.bits.SINK_COUNT; else link->dpcd_sink_count = 1; set_ddc_transaction_type(link->ddc, sink_caps.transaction_type); link->aux_mode = link_is_in_aux_transaction_mode(link->ddc); sink_init_data.link = link; sink_init_data.sink_signal = sink_caps.signal; sink = dc_sink_create(&sink_init_data); if (!sink) { DC_ERROR("Failed to create sink!\n"); if (prev_sink) dc_sink_release(prev_sink); return false; } sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; sink->converter_disable_audio = converter_disable_audio; /* dc_sink_create returns a new reference */ link->local_sink = sink; edid_status = dm_helpers_read_local_edid(link->ctx, link, sink); switch (edid_status) { case EDID_BAD_CHECKSUM: DC_LOG_ERROR("EDID checksum invalid.\n"); break; case EDID_PARTIAL_VALID: DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); break; case EDID_NO_RESPONSE: DC_LOG_ERROR("No EDID read.\n"); /* * Abort detection for non-DP connectors if we have * no EDID * * DP needs to report as connected if HDP is high * even if we have no EDID in order to go to * fail-safe mode */ if (dc_is_hdmi_signal(link->connector_signal) || dc_is_dvi_signal(link->connector_signal)) { if (prev_sink) dc_sink_release(prev_sink); return false; } if (link->type == dc_connection_sst_branch && link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER && reason == DETECT_REASON_HPDRX) { /* Abort detection for DP-VGA adapters when EDID * can't be read and detection reason is VGA-side * hotplug */ if (prev_sink) dc_sink_release(prev_sink); link_disconnect_sink(link); return true; } break; default: break; } // Check if edid is the same if ((prev_sink) && (edid_status == EDID_THE_SAME || edid_status == EDID_OK)) same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); if (sink->edid_caps.panel_patch.skip_scdc_overwrite) link->ctx->dc->debug.hdmi20_disable = true; if (dc_is_hdmi_signal(link->connector_signal)) read_scdc_caps(link->ddc, link->local_sink); if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { /* * TODO debug why certain monitors don't like * two link trainings */ query_hdcp_capability(sink->sink_signal, link); } else { // If edid is the same, then discard new sink and revert back to original sink if (same_edid) { link_disconnect_remap(prev_sink, link); sink = prev_sink; prev_sink = NULL; } query_hdcp_capability(sink->sink_signal, link); } /* HDMI-DVI Dongle */ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && !sink->edid_caps.edid_hdmi) sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) dp_trace_init(link); /* Connectivity log: detection */ for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) { CONN_DATA_DETECT(link, &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE], DC_EDID_BLOCK_SIZE, "%s: [Block %d] ", sink->edid_caps.display_name, i); } DC_LOG_DETECTION_EDID_PARSER("%s: " "manufacturer_id = %X, " "product_id = %X, " "serial_number = %X, " "manufacture_week = %d, " "manufacture_year = %d, " "display_name = %s, " "speaker_flag = %d, " "audio_mode_count = %d\n", __func__, sink->edid_caps.manufacturer_id, sink->edid_caps.product_id, sink->edid_caps.serial_number, sink->edid_caps.manufacture_week, sink->edid_caps.manufacture_year, sink->edid_caps.display_name, sink->edid_caps.speaker_flags, sink->edid_caps.audio_mode_count); for (i = 0; i < sink->edid_caps.audio_mode_count; i++) { DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, " "format_code = %d, " "channel_count = %d, " "sample_rate = %d, " "sample_size = %d\n", __func__, i, sink->edid_caps.audio_modes[i].format_code, sink->edid_caps.audio_modes[i].channel_count, sink->edid_caps.audio_modes[i].sample_rate, sink->edid_caps.audio_modes[i].sample_size); } if (link->connector_signal == SIGNAL_TYPE_EDP) { // Init dc_panel_config by HW config if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults) dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config); // Pickup base DM settings dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink); // Override dc_panel_config if system has specific settings dm_helpers_override_panel_settings(dc_ctx, &link->panel_config); } } else { /* From Connected-to-Disconnected. */ link->type = dc_connection_none; sink_caps.signal = SIGNAL_TYPE_NONE; memset(&link->hdcp_caps, 0, sizeof(struct hdcp_caps)); /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk * is not cleared. If we emulate a DP signal on this connection, it thinks * the dongle is still there and limits the number of modes we can emulate. * Clear dongle_max_pix_clk on disconnect to fix this */ link->dongle_max_pix_clk = 0; dc_link_clear_dprx_states(link); dp_trace_reset(link); } LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", link->link_index, sink, (sink_caps.signal == SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), prev_sink, same_edid); if (prev_sink) dc_sink_release(prev_sink); return true; } /* * link_detect_connection_type() - Determine if there is a sink connected * * @type: Returned connection type * Does not detect downstream devices, such as MST sinks * or display connected through active dongles */ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type) { uint32_t is_hpd_high = 0; if (link->connector_signal == SIGNAL_TYPE_LVDS) { *type = dc_connection_single; return true; } if (link->connector_signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ if (!link->dc->config.edp_no_power_sequencing) link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); } /* Link may not have physical HPD pin. */ if (link->ep_type != DISPLAY_ENDPOINT_PHY) { if (link->is_hpd_pending || !dpia_query_hpd_status(link)) *type = dc_connection_none; else *type = dc_connection_single; return true; } if (!query_hpd_status(link, &is_hpd_high)) goto hpd_gpio_failure; if (is_hpd_high) { *type = dc_connection_single; /* TODO: need to do the actual detection */ } else { *type = dc_connection_none; if (link->connector_signal == SIGNAL_TYPE_EDP) { /* eDP is not connected, power down it */ if (!link->dc->config.edp_no_power_sequencing) link->dc->hwss.edp_power_control(link, false); } } return true; hpd_gpio_failure: return false; } bool link_detect(struct dc_link *link, enum dc_detect_reason reason) { bool is_local_sink_detect_success; bool is_delegated_to_mst_top_mgr = false; enum dc_connection_type pre_link_type = link->type; DC_LOGGER_INIT(link->ctx->logger); is_local_sink_detect_success = detect_link_and_local_sink(link, reason); if (is_local_sink_detect_success && link->local_sink) verify_link_capability(link, link->local_sink, reason); DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__, link->link_index, is_local_sink_detect_success, pre_link_type, link->type); if (is_local_sink_detect_success && link->local_sink && dc_is_dp_signal(link->local_sink->sink_signal) && link->dpcd_caps.is_mst_capable) is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); if (is_local_sink_detect_success && pre_link_type == dc_connection_mst_branch && link->type != dc_connection_mst_branch) is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link); return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; } void link_clear_dprx_states(struct dc_link *link) { memset(&link->dprx_states, 0, sizeof(link->dprx_states)); } bool link_is_hdcp14(struct dc_link *link, enum signal_type signal) { bool ret = false; switch (signal) { case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE; break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_HDMI_TYPE_A: /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable, * we can poll for bksv but some displays have an issue with this. Since its so rare * for a display to not be 1.4 capable, this assumtion is ok */ ret = true; break; default: break; } return ret; } bool link_is_hdcp22(struct dc_link *link, enum signal_type signal) { bool ret = false; switch (signal) { case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE && link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable && (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0; break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_HDMI_TYPE_A: ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0; break; default: break; } return ret; } const struct dc_link_status *link_get_status(const struct dc_link *link) { return &link->link_status; } static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) { if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { BREAK_TO_DEBUGGER(); return false; } dc_sink_retain(sink); dc_link->remote_sinks[dc_link->sink_count] = sink; dc_link->sink_count++; return true; } struct dc_sink *link_add_remote_sink( struct dc_link *link, const uint8_t *edid, int len, struct dc_sink_init_data *init_data) { struct dc_sink *dc_sink; enum dc_edid_status edid_status; if (len > DC_MAX_EDID_BUFFER_SIZE) { dm_error("Max EDID buffer size breached!\n"); return NULL; } if (!init_data) { BREAK_TO_DEBUGGER(); return NULL; } if (!init_data->link) { BREAK_TO_DEBUGGER(); return NULL; } dc_sink = dc_sink_create(init_data); if (!dc_sink) return NULL; memmove(dc_sink->dc_edid.raw_edid, edid, len); dc_sink->dc_edid.length = len; if (!link_add_remote_sink_helper( link, dc_sink)) goto fail_add_sink; edid_status = dm_helpers_parse_edid_caps( link, &dc_sink->dc_edid, &dc_sink->edid_caps); /* * Treat device as no EDID device if EDID * parsing fails */ if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) { dc_sink->dc_edid.length = 0; dm_error("Bad EDID, status%d!\n", edid_status); } return dc_sink; fail_add_sink: dc_sink_release(dc_sink); return NULL; } void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) { int i; if (!link->sink_count) { BREAK_TO_DEBUGGER(); return; } for (i = 0; i < link->sink_count; i++) { if (link->remote_sinks[i] == sink) { dc_sink_release(sink); link->remote_sinks[i] = NULL; /* shrink array to remove empty place */ while (i < link->sink_count - 1) { link->remote_sinks[i] = link->remote_sinks[i+1]; i++; } link->remote_sinks[i] = NULL; link->sink_count--; return; } } }
linux-master
drivers/gpu/drm/amd/display/dc/link/link_detection.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file owns timing validation against various link limitations. (ex. * link bandwidth, receiver capability or our hardware capability) It also * provides helper functions exposing bandwidth formulas used in validation. */ #include "link_validation.h" #include "protocols/link_dp_capability.h" #include "protocols/link_dp_dpia_bw.h" #include "resource.h" #define DC_LOGGER_INIT(logger) static uint32_t get_tmds_output_pixel_clock_100hz(const struct dc_crtc_timing *timing) { uint32_t pxl_clk = timing->pix_clk_100hz; if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) pxl_clk /= 2; else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) pxl_clk = pxl_clk * 2 / 3; if (timing->display_color_depth == COLOR_DEPTH_101010) pxl_clk = pxl_clk * 10 / 8; else if (timing->display_color_depth == COLOR_DEPTH_121212) pxl_clk = pxl_clk * 12 / 8; return pxl_clk; } static bool dp_active_dongle_validate_timing( const struct dc_crtc_timing *timing, const struct dpcd_caps *dpcd_caps) { const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; switch (dpcd_caps->dongle_type) { case DISPLAY_DONGLE_DP_VGA_CONVERTER: case DISPLAY_DONGLE_DP_DVI_CONVERTER: case DISPLAY_DONGLE_DP_DVI_DONGLE: if (timing->pixel_encoding == PIXEL_ENCODING_RGB) return true; else return false; default: break; } if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && dongle_caps->extendedCapValid == true) { /* Check Pixel Encoding */ switch (timing->pixel_encoding) { case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: break; case PIXEL_ENCODING_YCBCR422: if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) return false; break; case PIXEL_ENCODING_YCBCR420: if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) return false; break; default: /* Invalid Pixel Encoding*/ return false; } switch (timing->display_color_depth) { case COLOR_DEPTH_666: case COLOR_DEPTH_888: /*888 and 666 should always be supported*/ break; case COLOR_DEPTH_101010: if (dongle_caps->dp_hdmi_max_bpc < 10) return false; break; case COLOR_DEPTH_121212: if (dongle_caps->dp_hdmi_max_bpc < 12) return false; break; case COLOR_DEPTH_141414: case COLOR_DEPTH_161616: default: /* These color depths are currently not supported */ return false; } /* Check 3D format */ switch (timing->timing_3d_format) { case TIMING_3D_FORMAT_NONE: case TIMING_3D_FORMAT_FRAME_ALTERNATE: /*Only frame alternate 3D is supported on active dongle*/ break; default: /*other 3D formats are not supported due to bad infoframe translation */ return false; } if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter struct dc_crtc_timing outputTiming = *timing; #if defined(CONFIG_DRM_AMD_DC_FP) if (timing->flags.DSC && !timing->dsc_cfg.is_frl) /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ outputTiming.flags.DSC = 0; #endif if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) return false; } else { // DP to HDMI TMDS converter if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) return false; } } if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 && dongle_caps->dfp_cap_ext.supported) { if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000)) return false; if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable) return false; if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable) return false; if (timing->pixel_encoding == PIXEL_ENCODING_RGB) { if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) return false; if (timing->display_color_depth == COLOR_DEPTH_666 && !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_888 && !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_101010 && !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_121212 && !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_161616 && !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc) return false; } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) { if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) return false; if (timing->display_color_depth == COLOR_DEPTH_888 && !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_101010 && !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_121212 && !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_161616 && !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc) return false; } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) return false; if (timing->display_color_depth == COLOR_DEPTH_888 && !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_101010 && !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_121212 && !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_161616 && !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc) return false; } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) return false; if (timing->display_color_depth == COLOR_DEPTH_888 && !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_101010 && !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_121212 && !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc) return false; else if (timing->display_color_depth == COLOR_DEPTH_161616 && !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc) return false; } } return true; } uint32_t dp_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_settings) { uint32_t total_data_bw_efficiency_x10000 = 0; uint32_t link_rate_per_lane_kbps = 0; switch (link_dp_get_encoding_format(link_settings)) { case DP_8b_10b_ENCODING: /* For 8b/10b encoding: * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported. */ link_rate_per_lane_kbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; if (dp_should_enable_fec(link)) { total_data_bw_efficiency_x10000 /= 100; total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; } break; case DP_128b_132b_ENCODING: /* For 128b/132b encoding: * link rate is defined in the unit of 10mbps per lane. * total data bandwidth efficiency is always 96.71%. */ link_rate_per_lane_kbps = link_settings->link_rate * 10000; total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; break; default: break; } /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000; } static bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing) { uint32_t req_bw; uint32_t max_bw; const struct dc_link_settings *link_setting; /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL) return false; /*always DP fail safe mode*/ if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 && timing->h_addressable == (uint32_t) 640 && timing->v_addressable == (uint32_t) 480) return true; link_setting = dp_get_verified_link_cap(link); /* TODO: DYNAMIC_VALIDATION needs to be implemented */ /*if (flags.DYNAMIC_VALIDATION == 1 && link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN) link_setting = &link->verified_link_cap; */ req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link)); max_bw = dp_link_bandwidth_kbps(link, link_setting); if (req_bw <= max_bw) { /* remember the biggest mode here, during * initial link training (to get * verified_link_cap), LS sends event about * cannot train at reported cap to upper * layer and upper layer will re-enumerate modes. * this is not necessary if the lower * verified_link_cap is enough to drive * all the modes */ /* TODO: DYNAMIC_VALIDATION needs to be implemented */ /* if (flags.DYNAMIC_VALIDATION == 1) dpsst->max_req_bw_for_verified_linkcap = dal_max( dpsst->max_req_bw_for_verified_linkcap, req_bw); */ return true; } else return false; } enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, struct dc_link *link, const struct dc_crtc_timing *timing) { uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10; struct dpcd_caps *dpcd_caps = &link->dpcd_caps; /* A hack to avoid failing any modes for EDID override feature on * topology change such as lower quality cable for DP or different dongle */ if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL) return DC_OK; /* Passive Dongle */ if (max_pix_clk != 0 && get_tmds_output_pixel_clock_100hz(timing) > max_pix_clk) return DC_EXCEED_DONGLE_CAP; /* Active Dongle*/ if (!dp_active_dongle_validate_timing(timing, dpcd_caps)) return DC_EXCEED_DONGLE_CAP; switch (stream->signal) { case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: if (!dp_validate_mode_timing( link, timing)) return DC_NO_DP_LINK_BANDWIDTH; break; default: break; } return DC_OK; } bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams) { bool ret = true; int bw_needed[MAX_DPIA_NUM]; struct dc_link *link[MAX_DPIA_NUM]; if (!num_streams || num_streams > MAX_DPIA_NUM) return ret; for (uint8_t i = 0; i < num_streams; ++i) { link[i] = stream[i].link; bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, dc_link_get_highest_encoding_format(link[i])); } ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); return ret; }
linux-master
drivers/gpu/drm/amd/display/dc/link/link_validation.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_hwss_dpia.h" #include "core_types.h" #include "link_hwss_dio.h" #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) static void update_dpia_stream_allocation_table(struct dc_link *link, const struct link_resource *link_res, const struct link_mst_stream_allocation_table *table) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); static enum dc_status status; uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF; int i; DC_LOGGER_INIT(link->ctx->logger); for (i = 0; i < table->stream_count; i++) mst_alloc_slots += table->stream_allocations[i].slot_count; status = dc_process_dmub_set_mst_slots(link->dc, link->link_index, mst_alloc_slots, &prev_mst_slots_in_use); ASSERT(status == DC_OK); DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n", status, mst_alloc_slots, prev_mst_slots_in_use); ASSERT(link_enc); link_enc->funcs->update_mst_stream_allocation_table(link_enc, table); } static const struct link_hwss dpia_link_hwss = { .setup_stream_encoder = setup_dio_stream_encoder, .reset_stream_encoder = reset_dio_stream_encoder, .setup_stream_attribute = setup_dio_stream_attribute, .disable_link_output = disable_dio_link_output, .setup_audio_output = setup_dio_audio_output, .enable_audio_packet = enable_dio_audio_packet, .disable_audio_packet = disable_dio_audio_packet, .ext = { .set_throttled_vcp_size = set_dio_throttled_vcp_size, .enable_dp_link_output = enable_dio_dp_link_output, .set_dp_link_test_pattern = set_dio_dp_link_test_pattern, .set_dp_lane_settings = set_dio_dp_lane_settings, .update_stream_allocation_table = update_dpia_stream_allocation_table, }, }; bool can_use_dpia_link_hwss(const struct dc_link *link, const struct link_resource *link_res) { return link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign; } const struct link_hwss *get_dpia_link_hwss(void) { return &dpia_link_hwss; }
linux-master
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_hwss_hpo_dp.h" #include "link_hwss_hpo_fixed_vs_pe_retimer_dp.h" #include "link_hwss_dio_fixed_vs_pe_retimer.h" static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, const struct dc_lane_settings *hw_lane_settings) { const uint8_t vendor_ffe_preset_table[16] = { 0x01, 0x41, 0x61, 0x81, 0xB1, 0x05, 0x35, 0x65, 0x85, 0xA5, 0x09, 0x39, 0x59, 0x89, 0x0F, 0x24}; const uint8_t ffe_mask[4] = { (hw_lane_settings[0].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) & (hw_lane_settings[0].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), (hw_lane_settings[1].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) & (hw_lane_settings[1].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), (hw_lane_settings[2].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) & (hw_lane_settings[2].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), (hw_lane_settings[3].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) & (hw_lane_settings[3].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF)}; const uint8_t ffe_cfg[4] = { vendor_ffe_preset_table[hw_lane_settings[0].FFE_PRESET.settings.level] & ffe_mask[0], vendor_ffe_preset_table[hw_lane_settings[1].FFE_PRESET.settings.level] & ffe_mask[1], vendor_ffe_preset_table[hw_lane_settings[2].FFE_PRESET.settings.level] & ffe_mask[2], vendor_ffe_preset_table[hw_lane_settings[3].FFE_PRESET.settings.level] & ffe_mask[3]}; const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link); const uint8_t vendor_lttpr_write_data_ffe1[4] = {0x01, 0x50, dp_type, 0x0F}; const uint8_t vendor_lttpr_write_data_ffe2[4] = {0x01, 0x55, dp_type, ffe_cfg[0]}; const uint8_t vendor_lttpr_write_data_ffe3[4] = {0x01, 0x56, dp_type, ffe_cfg[1]}; const uint8_t vendor_lttpr_write_data_ffe4[4] = {0x01, 0x57, dp_type, ffe_cfg[2]}; const uint8_t vendor_lttpr_write_data_ffe5[4] = {0x01, 0x58, dp_type, ffe_cfg[3]}; link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_ffe1[0], sizeof(vendor_lttpr_write_data_ffe1)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_ffe2[0], sizeof(vendor_lttpr_write_data_ffe2)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_ffe3[0], sizeof(vendor_lttpr_write_data_ffe3)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_ffe4[0], sizeof(vendor_lttpr_write_data_ffe4)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_ffe5[0], sizeof(vendor_lttpr_write_data_ffe5)); } static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, struct encoder_set_dp_phy_pattern_param *tp_params) { const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0}; const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0}; const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21}; const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21}; const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F}; const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F}; const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20}; const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20}; const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20}; const uint8_t vendor_lttpr_write_data_pg10[4] = {0x1, 0x30, 0x55, 0x20}; link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg1[0], sizeof(vendor_lttpr_write_data_pg1)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg2[0], sizeof(vendor_lttpr_write_data_pg2)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg3[0], sizeof(vendor_lttpr_write_data_pg3)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg4[0], sizeof(vendor_lttpr_write_data_pg4)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg5[0], sizeof(vendor_lttpr_write_data_pg5)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg6[0], sizeof(vendor_lttpr_write_data_pg6)); if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg7[0], sizeof(vendor_lttpr_write_data_pg7)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg8[0], sizeof(vendor_lttpr_write_data_pg8)); if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg9[0], sizeof(vendor_lttpr_write_data_pg9)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg10[0], sizeof(vendor_lttpr_write_data_pg10)); } static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link *link, const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params, const struct link_hwss *link_hwss) { struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; if (tp_params == NULL) return false; if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN || tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) { // Deprogram overrides from previously set square wave override if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || link->current_test_pattern == DP_TEST_PATTERN_D102) link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0)); else dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); return false; } hw_tp_params.dp_phy_pattern = DP_TEST_PATTERN_PRBS31; hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode; if (link_hwss->ext.set_dp_link_test_pattern) link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params); dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params); dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]); return true; } static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link, const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params) { if (!dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern( link, link_res, tp_params, get_hpo_dp_link_hwss())) { link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( link_res->hpo_dp_link_enc, tp_params); } link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, const struct link_resource *link_res, const struct dc_link_settings *link_settings, const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { link_res->hpo_dp_link_enc->funcs->set_ffe( link_res->hpo_dp_link_enc, link_settings, lane_settings[0].FFE_PRESET.raw); // FFE is programmed when retimer is programmed for SQ128, but explicit // programming needed here as well in case FFE-only update is requested if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); } static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { if (link_settings->lane_count == LANE_COUNT_FOUR) enable_dio_fixed_vs_pe_retimer_program_4lane_output(link); enable_hpo_dp_link_output(link, link_res, signal, clock_source, link_settings); } static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = { .setup_stream_encoder = setup_hpo_dp_stream_encoder, .reset_stream_encoder = reset_hpo_dp_stream_encoder, .setup_stream_attribute = setup_hpo_dp_stream_attribute, .disable_link_output = disable_hpo_dp_link_output, .setup_audio_output = setup_hpo_dp_audio_output, .enable_audio_packet = enable_hpo_dp_audio_packet, .disable_audio_packet = disable_hpo_dp_audio_packet, .ext = { .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size, .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width, .enable_dp_link_output = enable_hpo_fixed_vs_pe_retimer_dp_link_output, .set_dp_link_test_pattern = set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern, .set_dp_lane_settings = set_hpo_fixed_vs_pe_retimer_dp_lane_settings, .update_stream_allocation_table = update_hpo_dp_stream_allocation_table, }, }; bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link) { if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) return false; if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) return false; return true; } const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void) { return &hpo_fixed_vs_pe_retimer_dp_link_hwss; }
linux-master
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_hwss_dio.h" #include "link_hwss_dio_fixed_vs_pe_retimer.h" #include "link_enc_cfg.h" uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link) { // TODO: Get USB-C cable orientation if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) return 0xF2; else return 0x12; } void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link) { const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link); const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; const uint8_t vendor_lttpr_exit_manual_automation_1[4] = {0x1, 0x50, dp_type, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_2[4] = {0x1, 0x50, 0x50, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_3[4] = {0x1, 0x51, 0x50, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_4[4] = {0x1, 0x10, 0x58, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_5[4] = {0x1, 0x10, 0x59, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_6[4] = {0x1, 0x30, 0x51, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_7[4] = {0x1, 0x30, 0x52, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_8[4] = {0x1, 0x30, 0x54, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_9[4] = {0x1, 0x30, 0x55, 0x0}; link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_1[0], sizeof(vendor_lttpr_exit_manual_automation_1)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_2[0], sizeof(vendor_lttpr_exit_manual_automation_2)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_3[0], sizeof(vendor_lttpr_exit_manual_automation_3)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_4[0], sizeof(vendor_lttpr_exit_manual_automation_4)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_5[0], sizeof(vendor_lttpr_exit_manual_automation_5)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_6[0], sizeof(vendor_lttpr_exit_manual_automation_6)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_7[0], sizeof(vendor_lttpr_exit_manual_automation_7)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_8[0], sizeof(vendor_lttpr_exit_manual_automation_8)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_9[0], sizeof(vendor_lttpr_exit_manual_automation_9)); } static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_link *link, const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params, const struct link_hwss *link_hwss) { struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; const uint8_t pltpat_custom[10] = {0x1F, 0x7C, 0xF0, 0xC1, 0x07, 0x1F, 0x7C, 0xF0, 0xC1, 0x07}; const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; if (tp_params == NULL) return false; if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) { // Deprogram overrides from previous test pattern dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); } switch (tp_params->dp_phy_pattern) { case DP_TEST_PATTERN_80BIT_CUSTOM: if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern, pltpat_custom, tp_params->custom_pattern_size) != 0) return false; break; case DP_TEST_PATTERN_D102: break; default: if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || link->current_test_pattern == DP_TEST_PATTERN_D102) // Deprogram overrides from previous test pattern link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0)); return false; } hw_tp_params.dp_phy_pattern = tp_params->dp_phy_pattern; hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode; if (link_hwss->ext.set_dp_link_test_pattern) link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0)); return true; } static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link, const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override( link, link_res, tp_params, get_dio_link_hwss())) { link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); } link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link) { const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); } static void enable_dio_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { if (link_settings->lane_count == LANE_COUNT_FOUR) enable_dio_fixed_vs_pe_retimer_program_4lane_output(link); enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings); } static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = { .setup_stream_encoder = setup_dio_stream_encoder, .reset_stream_encoder = reset_dio_stream_encoder, .setup_stream_attribute = setup_dio_stream_attribute, .disable_link_output = disable_dio_link_output, .setup_audio_output = setup_dio_audio_output, .enable_audio_packet = enable_dio_audio_packet, .disable_audio_packet = disable_dio_audio_packet, .ext = { .set_throttled_vcp_size = set_dio_throttled_vcp_size, .enable_dp_link_output = enable_dio_fixed_vs_pe_retimer_dp_link_output, .set_dp_link_test_pattern = set_dio_fixed_vs_pe_retimer_dp_link_test_pattern, .set_dp_lane_settings = set_dio_dp_lane_settings, .update_stream_allocation_table = update_dio_stream_allocation_table, }, }; bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link) { if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) return false; if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) return false; return true; } const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void) { return &dio_fixed_vs_pe_retimer_link_hwss; }
linux-master
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_hwss_dio.h" #include "core_types.h" #include "link_enc_cfg.h" void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; stream_encoder->funcs->set_throttled_vcp_size( stream_encoder, throttled_vcp_size); } void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; link_enc->funcs->connect_dig_be_to_fe(link_enc, pipe_ctx->stream_res.stream_enc->id, true); if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); if (stream_enc->funcs->map_stream_to_link) stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); if (stream_enc->funcs->enable_fifo) stream_enc->funcs->enable_fifo(stream_enc); } void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; if (stream_enc && stream_enc->funcs->disable_fifo) stream_enc->funcs->disable_fifo(stream_enc); link_enc->funcs->connect_dig_be_to_fe( link_enc, pipe_ctx->stream_res.stream_enc->id, false); if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); } void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx) { struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; if (!dc_is_virtual_signal(stream->signal)) stream_encoder->funcs->setup_stereo_sync( stream_encoder, pipe_ctx->stream_res.tg->inst, stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); if (dc_is_dp_signal(stream->signal)) stream_encoder->funcs->dp_set_stream_attribute( stream_encoder, &stream->timing, stream->output_color_space, stream->use_vsc_sdp_for_colorimetry, link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); else if (dc_is_hdmi_tmds_signal(stream->signal)) stream_encoder->funcs->hdmi_set_stream_attribute( stream_encoder, &stream->timing, stream->phy_pix_clk, pipe_ctx->stream_res.audio != NULL); else if (dc_is_dvi_signal(stream->signal)) stream_encoder->funcs->dvi_set_stream_attribute( stream_encoder, &stream->timing, (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ? true : false); else if (dc_is_lvds_signal(stream->signal)) stream_encoder->funcs->lvds_set_stream_attribute( stream_encoder, &stream->timing); if (dc_is_dp_signal(stream->signal)) link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); } void enable_dio_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); if (dc_is_dp_sst_signal(signal)) link_enc->funcs->enable_dp_output( link_enc, link_settings, clock_source); else link_enc->funcs->enable_dp_mst_output( link_enc, link_settings, clock_source); link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); } void disable_dio_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); link_enc->funcs->disable_output(link_enc, signal); link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); } void set_dio_dp_link_test_pattern(struct dc_link *link, const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } void set_dio_dp_lane_settings(struct dc_link *link, const struct link_resource *link_res, const struct dc_link_settings *link_settings, const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings); } void update_dio_stream_allocation_table(struct dc_link *link, const struct link_resource *link_res, const struct link_mst_stream_allocation_table *table) { struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); link_enc->funcs->update_mst_stream_allocation_table(link_enc, table); } void setup_dio_audio_output(struct pipe_ctx *pipe_ctx, struct audio_output *audio_output, uint32_t audio_inst) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( pipe_ctx->stream_res.stream_enc, audio_inst, &pipe_ctx->stream->audio_info); else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( pipe_ctx->stream_res.stream_enc, audio_inst, &pipe_ctx->stream->audio_info, &audio_output->crtc_info); } void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable( pipe_ctx->stream_res.stream_enc); pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, false); if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM); } void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx) { pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); if (pipe_ctx->stream_res.audio) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( pipe_ctx->stream_res.stream_enc); } if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM); } static const struct link_hwss dio_link_hwss = { .setup_stream_encoder = setup_dio_stream_encoder, .reset_stream_encoder = reset_dio_stream_encoder, .setup_stream_attribute = setup_dio_stream_attribute, .disable_link_output = disable_dio_link_output, .setup_audio_output = setup_dio_audio_output, .enable_audio_packet = enable_dio_audio_packet, .disable_audio_packet = disable_dio_audio_packet, .ext = { .set_throttled_vcp_size = set_dio_throttled_vcp_size, .enable_dp_link_output = enable_dio_dp_link_output, .set_dp_link_test_pattern = set_dio_dp_link_test_pattern, .set_dp_lane_settings = set_dio_dp_lane_settings, .update_stream_allocation_table = update_dio_stream_allocation_table, }, }; bool can_use_dio_link_hwss(const struct dc_link *link, const struct link_resource *link_res) { return link->link_enc != NULL; } const struct link_hwss *get_dio_link_hwss(void) { return &dio_link_hwss; }
linux-master
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_hwss_hpo_dp.h" #include "dm_helpers.h" #include "core_types.h" #include "dccg.h" #include "clk_mgr.h" void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; hpo_dp_link_encoder->funcs->set_throttled_vcp_size(hpo_dp_link_encoder, hpo_dp_stream_encoder->inst, throttled_vcp_size); } void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, const struct dc_link_settings *link_settings, struct fixed31_32 throttled_vcp_size) { struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct fixed31_32 h_blank_in_ms, time_slot_in_ms, mtp_cnt_per_h_blank; uint32_t link_bw_in_kbps = hpo_dp_stream_encoder->ctx->dc->link_srv->dp_link_bandwidth_kbps( pipe_ctx->stream->link, link_settings); uint16_t hblank_min_symbol_width = 0; if (link_bw_in_kbps > 0) { h_blank_in_ms = dc_fixpt_div(dc_fixpt_from_int( timing->h_total - timing->h_addressable), dc_fixpt_from_fraction(timing->pix_clk_100hz, 10)); time_slot_in_ms = dc_fixpt_from_fraction(32 * 4, link_bw_in_kbps); mtp_cnt_per_h_blank = dc_fixpt_div(h_blank_in_ms, dc_fixpt_mul_int(time_slot_in_ms, 64)); hblank_min_symbol_width = dc_fixpt_floor( dc_fixpt_mul(mtp_cnt_per_h_blank, throttled_vcp_size)); } hpo_dp_stream_encoder->funcs->set_hblank_min_symbol_width(hpo_dp_stream_encoder, hblank_min_symbol_width); } void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; stream_enc->funcs->enable_stream(stream_enc); stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); } void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; stream_enc->funcs->disable(stream_enc); } void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; stream_enc->funcs->set_stream_attribute( stream_enc, &stream->timing, stream->output_color_space, stream->use_vsc_sdp_for_colorimetry, stream->timing.flags.DSC, false); link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); } void enable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( link->dc->res_pool->dccg, link_res->hpo_dp_link_enc->inst, true); link_res->hpo_dp_link_enc->funcs->enable_link_phy( link_res->hpo_dp_link_enc, link_settings, link->link_enc->transmitter, link->link_enc->hpd_source); } void disable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( link->dc->res_pool->dccg, link_res->hpo_dp_link_enc->inst, false); } static void set_hpo_dp_link_test_pattern(struct dc_link *link, const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params) { link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( link_res->hpo_dp_link_enc, tp_params); link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } static void set_hpo_dp_lane_settings(struct dc_link *link, const struct link_resource *link_res, const struct dc_link_settings *link_settings, const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { link_res->hpo_dp_link_enc->funcs->set_ffe( link_res->hpo_dp_link_enc, link_settings, lane_settings[0].FFE_PRESET.raw); } void update_hpo_dp_stream_allocation_table(struct dc_link *link, const struct link_resource *link_res, const struct link_mst_stream_allocation_table *table) { link_res->hpo_dp_link_enc->funcs->update_stream_allocation_table( link_res->hpo_dp_link_enc, table); } void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, struct audio_output *audio_output, uint32_t audio_inst) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( pipe_ctx->stream_res.hpo_dp_stream_enc, audio_inst, &pipe_ctx->stream->audio_info); } void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable( pipe_ctx->stream_res.hpo_dp_stream_enc); } void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->stream_res.audio) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.hpo_dp_stream_enc); } static const struct link_hwss hpo_dp_link_hwss = { .setup_stream_encoder = setup_hpo_dp_stream_encoder, .reset_stream_encoder = reset_hpo_dp_stream_encoder, .setup_stream_attribute = setup_hpo_dp_stream_attribute, .disable_link_output = disable_hpo_dp_link_output, .setup_audio_output = setup_hpo_dp_audio_output, .enable_audio_packet = enable_hpo_dp_audio_packet, .disable_audio_packet = disable_hpo_dp_audio_packet, .ext = { .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size, .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width, .enable_dp_link_output = enable_hpo_dp_link_output, .set_dp_link_test_pattern = set_hpo_dp_link_test_pattern, .set_dp_lane_settings = set_hpo_dp_lane_settings, .update_stream_allocation_table = update_hpo_dp_stream_allocation_table, }, }; bool can_use_hpo_dp_link_hwss(const struct dc_link *link, const struct link_resource *link_res) { return link_res->hpo_dp_link_enc != NULL; } const struct link_hwss *get_hpo_dp_link_hwss(void) { return &hpo_dp_link_hwss; }
linux-master
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_fpga.h" #include "link/link_dpms.h" #include "dm_helpers.h" #include "link_hwss.h" #include "dccg.h" #include "resource.h" #define DC_LOGGER_INIT(logger) void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct link_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; uint8_t req_slot_count = 0; uint8_t vc_id = 1; /// VC ID always 1 for SST struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings; const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res); DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); stream->link->cur_link_settings = link_settings; if (link_hwss->ext.enable_dp_link_output) link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res, stream->signal, pipe_ctx->clock_source->id, &link_settings); /* Enable DP_STREAM_ENC */ dc->hwss.enable_stream(pipe_ctx); /* Set DPS PPS SDP (AKA "info frames") */ if (pipe_ctx->stream->timing.flags.DSC) { link_set_dsc_pps_packet(pipe_ctx, true, true); } /* Allocate Payload */ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) { // MST case uint8_t i; proposed_table.stream_count = state->stream_count; for (i = 0; i < state->stream_count; i++) { avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link); req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); proposed_table.stream_allocations[i].slot_count = req_slot_count; proposed_table.stream_allocations[i].vcp_id = i+1; /* NOTE: This makes assumption that pipe_ctx index is same as stream index */ proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc; } } else { // SST case avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, stream->link); req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); proposed_table.stream_count = 1; /// Always 1 stream for SST proposed_table.stream_allocations[0].slot_count = req_slot_count; proposed_table.stream_allocations[0].vcp_id = vc_id; proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; } link_hwss->ext.update_stream_allocation_table(stream->link, &pipe_ctx->link_res, &proposed_table); if (link_hwss->ext.set_throttled_vcp_size) link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); dc->hwss.enable_audio_stream(pipe_ctx); }
linux-master
drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_dp_cts.h" #include "link/link_resource.h" #include "link/protocols/link_dpcd.h" #include "link/protocols/link_dp_training.h" #include "link/protocols/link_dp_phy.h" #include "link/protocols/link_dp_training_fixed_vs_pe_retimer.h" #include "link/protocols/link_dp_capability.h" #include "link/link_dpms.h" #include "resource.h" #include "dm_helpers.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" #define DC_LOGGER \ link->ctx->logger static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) { switch (test_rate) { case DP_TEST_LINK_RATE_RBR: return LINK_RATE_LOW; case DP_TEST_LINK_RATE_HBR: return LINK_RATE_HIGH; case DP_TEST_LINK_RATE_HBR2: return LINK_RATE_HIGH2; case DP_TEST_LINK_RATE_HBR3: return LINK_RATE_HIGH3; case DP_TEST_LINK_RATE_UHBR10: return LINK_RATE_UHBR10; case DP_TEST_LINK_RATE_UHBR20: return LINK_RATE_UHBR20; case DP_TEST_LINK_RATE_UHBR13_5: return LINK_RATE_UHBR13_5; default: return LINK_RATE_UNKNOWN; } } static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern) { return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern && test_pattern <= DP_TEST_PATTERN_SQUARE_END); } static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) { if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || test_pattern == DP_TEST_PATTERN_VIDEO_MODE) return true; else return false; } static void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, bool skip_video_pattern) { struct pipe_ctx *pipes[MAX_PIPES]; struct dc_state *state = link->dc->current_state; uint8_t count; int i; udelay(100); link_get_master_pipes_with_dpms_on(link, state, &count, pipes); for (i = 0; i < count; i++) { link_set_dpms_off(pipes[i]); pipes[i]->link_config.dp_link_settings = *link_setting; update_dp_encoder_resources_for_test_harness( link->dc, state, pipes[i]); } for (i = count-1; i >= 0; i--) link_set_dpms_on(state, pipes[i]); } static void dp_test_send_link_training(struct dc_link *link) { struct dc_link_settings link_settings = {0}; uint8_t test_rate = 0; core_link_read_dpcd( link, DP_TEST_LANE_COUNT, (unsigned char *)(&link_settings.lane_count), 1); core_link_read_dpcd( link, DP_TEST_LINK_RATE, &test_rate, 1); link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate); /* Set preferred link settings */ link->verified_link_cap.lane_count = link_settings.lane_count; link->verified_link_cap.link_rate = link_settings.link_rate; dp_retrain_link_dp_test(link, &link_settings, false); } static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) { union audio_test_mode dpcd_test_mode = {0}; struct audio_test_pattern_type dpcd_pattern_type = {0}; union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; struct pipe_ctx *pipe_ctx = &pipes[0]; unsigned int channel_count; unsigned int channel = 0; unsigned int modes = 0; unsigned int sampling_rate_in_hz = 0; // get audio test mode and test pattern parameters core_link_read_dpcd( link, DP_TEST_AUDIO_MODE, &dpcd_test_mode.raw, sizeof(dpcd_test_mode)); core_link_read_dpcd( link, DP_TEST_AUDIO_PATTERN_TYPE, &dpcd_pattern_type.value, sizeof(dpcd_pattern_type)); channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); // read pattern periods for requested channels when sawTooth pattern is requested if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; // read period for each channel for (channel = 0; channel < channel_count; channel++) { core_link_read_dpcd( link, DP_TEST_AUDIO_PERIOD_CH1 + channel, &dpcd_pattern_period[channel].raw, sizeof(dpcd_pattern_period[channel])); } } // translate sampling rate switch (dpcd_test_mode.bits.sampling_rate) { case AUDIO_SAMPLING_RATE_32KHZ: sampling_rate_in_hz = 32000; break; case AUDIO_SAMPLING_RATE_44_1KHZ: sampling_rate_in_hz = 44100; break; case AUDIO_SAMPLING_RATE_48KHZ: sampling_rate_in_hz = 48000; break; case AUDIO_SAMPLING_RATE_88_2KHZ: sampling_rate_in_hz = 88200; break; case AUDIO_SAMPLING_RATE_96KHZ: sampling_rate_in_hz = 96000; break; case AUDIO_SAMPLING_RATE_176_4KHZ: sampling_rate_in_hz = 176400; break; case AUDIO_SAMPLING_RATE_192KHZ: sampling_rate_in_hz = 192000; break; default: sampling_rate_in_hz = 0; break; } link->audio_test_data.flags.test_requested = 1; link->audio_test_data.flags.disable_video = disable_video; link->audio_test_data.sampling_rate = sampling_rate_in_hz; link->audio_test_data.channel_count = channel_count; link->audio_test_data.pattern_type = test_pattern; if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; } } } /* TODO Raven hbr2 compliance eye output is unstable * (toggling on and off) with debugger break * This caueses intermittent PHY automation failure * Need to look into the root cause */ static void dp_test_send_phy_test_pattern(struct dc_link *link) { union phy_test_pattern dpcd_test_pattern; union lane_adjust dpcd_lane_adjustment[2]; unsigned char dpcd_post_cursor_2_adjustment = 0; unsigned char test_pattern_buffer[ (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; unsigned int test_pattern_size = 0; enum dp_test_pattern test_pattern; union lane_adjust dpcd_lane_adjust; unsigned int lane; struct link_training_settings link_training_settings; unsigned char no_preshoot = 0; unsigned char no_deemphasis = 0; dpcd_test_pattern.raw = 0; memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment)); memset(&link_training_settings, 0, sizeof(link_training_settings)); /* get phy test pattern and pattern parameters from DP receiver */ core_link_read_dpcd( link, DP_PHY_TEST_PATTERN, &dpcd_test_pattern.raw, sizeof(dpcd_test_pattern)); core_link_read_dpcd( link, DP_ADJUST_REQUEST_LANE0_1, &dpcd_lane_adjustment[0].raw, sizeof(dpcd_lane_adjustment)); /* prepare link training settings */ link_training_settings.link_settings = link->cur_link_settings; link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings); if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT) dp_fixed_vs_pe_read_lane_adjust( link, link_training_settings.dpcd_lane_settings); /*get post cursor 2 parameters * For DP 1.1a or eariler, this DPCD register's value is 0 * For DP 1.2 or later: * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1 * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3 */ core_link_read_dpcd( link, DP_ADJUST_REQUEST_POST_CURSOR2, &dpcd_post_cursor_2_adjustment, sizeof(dpcd_post_cursor_2_adjustment)); /* translate request */ switch (dpcd_test_pattern.bits.PATTERN) { case PHY_TEST_PATTERN_D10_2: test_pattern = DP_TEST_PATTERN_D102; break; case PHY_TEST_PATTERN_SYMBOL_ERROR: test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR; break; case PHY_TEST_PATTERN_PRBS7: test_pattern = DP_TEST_PATTERN_PRBS7; break; case PHY_TEST_PATTERN_80BIT_CUSTOM: test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM; break; case PHY_TEST_PATTERN_CP2520_1: /* CP2520 pattern is unstable, temporarily use TPS4 instead */ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? DP_TEST_PATTERN_TRAINING_PATTERN4 : DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; break; case PHY_TEST_PATTERN_CP2520_2: /* CP2520 pattern is unstable, temporarily use TPS4 instead */ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? DP_TEST_PATTERN_TRAINING_PATTERN4 : DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; break; case PHY_TEST_PATTERN_CP2520_3: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; case PHY_TEST_PATTERN_128b_132b_TPS1: test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; break; case PHY_TEST_PATTERN_128b_132b_TPS2: test_pattern = DP_TEST_PATTERN_128b_132b_TPS2; break; case PHY_TEST_PATTERN_PRBS9: test_pattern = DP_TEST_PATTERN_PRBS9; break; case PHY_TEST_PATTERN_PRBS11: test_pattern = DP_TEST_PATTERN_PRBS11; break; case PHY_TEST_PATTERN_PRBS15: test_pattern = DP_TEST_PATTERN_PRBS15; break; case PHY_TEST_PATTERN_PRBS23: test_pattern = DP_TEST_PATTERN_PRBS23; break; case PHY_TEST_PATTERN_PRBS31: test_pattern = DP_TEST_PATTERN_PRBS31; break; case PHY_TEST_PATTERN_264BIT_CUSTOM: test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM; break; case PHY_TEST_PATTERN_SQUARE: test_pattern = DP_TEST_PATTERN_SQUARE; break; case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED; no_preshoot = 1; break; case PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: test_pattern = DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED; no_deemphasis = 1; break; case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED; no_preshoot = 1; no_deemphasis = 1; break; default: test_pattern = DP_TEST_PATTERN_VIDEO_MODE; break; } if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1; core_link_read_dpcd( link, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, test_pattern_buffer, test_pattern_size); } if (is_dp_phy_sqaure_pattern(test_pattern)) { test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) core_link_read_dpcd( link, DP_PHY_SQUARE_PATTERN, test_pattern_buffer, test_pattern_size); } if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) { test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256- DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1; core_link_read_dpcd( link, DP_TEST_264BIT_CUSTOM_PATTERN_7_0, test_pattern_buffer, test_pattern_size); } for (lane = 0; lane < (unsigned int)(link->cur_link_settings.lane_count); lane++) { dpcd_lane_adjust.raw = dp_get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_8b_10b_ENCODING) { link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = (enum dc_voltage_swing) (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS = (enum dc_pre_emphasis) (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = (enum dc_post_cursor2) ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); } else if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) { link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level = dpcd_lane_adjust.tx_ffe.PRESET_VALUE; link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_preshoot = no_preshoot; link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_deemphasis = no_deemphasis; } } dp_hw_to_dpcd_lane_settings(&link_training_settings, link_training_settings.hw_lane_settings, link_training_settings.dpcd_lane_settings); /*Usage: Measure DP physical lane signal * by DP SI test equipment automatically. * PHY test pattern request is generated by equipment via HPD interrupt. * HPD needs to be active all the time. HPD should be active * all the time. Do not touch it. * forward request to DS */ dp_set_test_pattern( link, test_pattern, DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, &link_training_settings, test_pattern_buffer, test_pattern_size); } static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *pipe_ctx, enum dp_test_pattern test_pattern, enum dp_test_pattern_color_space test_pattern_color_space) { enum controller_dp_test_pattern controller_test_pattern; enum dc_color_depth color_depth = pipe_ctx-> stream->timing.display_color_depth; struct bit_depth_reduction_params params; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; struct pipe_ctx *odm_pipe; int odm_cnt = 1; int h_active = pipe_ctx->stream->timing.h_addressable + pipe_ctx->stream->timing.h_border_left + pipe_ctx->stream->timing.h_border_right; int v_active = pipe_ctx->stream->timing.v_addressable + pipe_ctx->stream->timing.v_border_bottom + pipe_ctx->stream->timing.v_border_top; int odm_slice_width, last_odm_slice_width, offset = 0; memset(&params, 0, sizeof(params)); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) odm_cnt++; odm_slice_width = h_active / odm_cnt; last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); switch (test_pattern) { case DP_TEST_PATTERN_COLOR_SQUARES: controller_test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; break; case DP_TEST_PATTERN_COLOR_SQUARES_CEA: controller_test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA; break; case DP_TEST_PATTERN_VERTICAL_BARS: controller_test_pattern = CONTROLLER_DP_TEST_PATTERN_VERTICALBARS; break; case DP_TEST_PATTERN_HORIZONTAL_BARS: controller_test_pattern = CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS; break; case DP_TEST_PATTERN_COLOR_RAMP: controller_test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORRAMP; break; default: controller_test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; break; } switch (test_pattern) { case DP_TEST_PATTERN_COLOR_SQUARES: case DP_TEST_PATTERN_COLOR_SQUARES_CEA: case DP_TEST_PATTERN_VERTICAL_BARS: case DP_TEST_PATTERN_HORIZONTAL_BARS: case DP_TEST_PATTERN_COLOR_RAMP: { /* disable bit depth reduction */ pipe_ctx->stream->bit_depth_params = params; if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { opp->funcs->opp_program_bit_depth_reduction(opp, &params); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, controller_test_pattern, color_depth); } else if (link->dc->hwss.set_disp_pattern_generator) { enum controller_dp_color_space controller_color_space; struct output_pixel_processor *odm_opp; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; break; case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; break; case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; break; case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: default: controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); ASSERT(0); break; } odm_pipe = pipe_ctx; while (odm_pipe->next_odm_pipe) { odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, controller_test_pattern, controller_color_space, color_depth, NULL, odm_slice_width, v_active, offset); offset += odm_slice_width; odm_pipe = odm_pipe->next_odm_pipe; } odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, controller_test_pattern, controller_color_space, color_depth, NULL, last_odm_slice_width, v_active, offset); } } break; case DP_TEST_PATTERN_VIDEO_MODE: { /* restore bitdepth reduction */ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params); pipe_ctx->stream->bit_depth_params = params; if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { opp->funcs->opp_program_bit_depth_reduction(opp, &params); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, color_depth); } else if (link->dc->hwss.set_disp_pattern_generator) { struct output_pixel_processor *odm_opp; odm_pipe = pipe_ctx; while (odm_pipe->next_odm_pipe) { odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, odm_slice_width, v_active, offset); offset += odm_slice_width; odm_pipe = odm_pipe->next_odm_pipe; } odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, last_odm_slice_width, v_active, offset); } } break; default: break; } } void dp_handle_automated_test(struct dc_link *link) { union test_request test_request; union test_response test_response; memset(&test_request, 0, sizeof(test_request)); memset(&test_response, 0, sizeof(test_response)); core_link_read_dpcd( link, DP_TEST_REQUEST, &test_request.raw, sizeof(union test_request)); if (test_request.bits.LINK_TRAINING) { /* ACK first to let DP RX test box monitor LT sequence */ test_response.bits.ACK = 1; core_link_write_dpcd( link, DP_TEST_RESPONSE, &test_response.raw, sizeof(test_response)); dp_test_send_link_training(link); /* no acknowledge request is needed again */ test_response.bits.ACK = 0; } if (test_request.bits.LINK_TEST_PATTRN) { union test_misc dpcd_test_params; union link_test_pattern dpcd_test_pattern; memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); /* get link test pattern and pattern parameters */ core_link_read_dpcd( link, DP_TEST_PATTERN, &dpcd_test_pattern.raw, sizeof(dpcd_test_pattern)); core_link_read_dpcd( link, DP_TEST_MISC0, &dpcd_test_params.raw, sizeof(dpcd_test_params)); test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link, dpcd_test_pattern, dpcd_test_params) ? 1 : 0; } if (test_request.bits.AUDIO_TEST_PATTERN) { dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); test_response.bits.ACK = 1; } if (test_request.bits.PHY_TEST_PATTERN) { dp_test_send_phy_test_pattern(link); test_response.bits.ACK = 1; } /* send request acknowledgment */ if (test_response.bits.ACK) core_link_write_dpcd( link, DP_TEST_RESPONSE, &test_response.raw, sizeof(test_response)); } bool dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) { struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; struct pipe_ctx *pipe_ctx = NULL; unsigned int lane; unsigned int i; unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0}; union dpcd_training_pattern training_pattern; enum dpcd_phy_test_patterns pattern; memset(&training_pattern, 0, sizeof(training_pattern)); for (i = 0; i < MAX_PIPES; i++) { if (pipes[i].stream == NULL) continue; if (resource_is_pipe_type(&pipes[i], OTG_MASTER) && pipes[i].stream->link == link) { pipe_ctx = &pipes[i]; break; } } if (pipe_ctx == NULL) return false; /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */ if (link->test_pattern_enabled && test_pattern == DP_TEST_PATTERN_VIDEO_MODE) { /* Set CRTC Test Pattern */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, (uint8_t *)p_custom_pattern, (uint32_t)cust_pattern_size); /* Unblank Stream */ link->dc->hwss.unblank_stream( pipe_ctx, &link->verified_link_cap); /* TODO:m_pHwss->MuteAudioEndpoint * (pPathMode->pDisplayPath, false); */ /* Reset Test Pattern state */ link->test_pattern_enabled = false; link->current_test_pattern = test_pattern; return true; } /* Check for PHY Test Patterns */ if (is_dp_phy_pattern(test_pattern)) { /* Set DPCD Lane Settings before running test pattern */ if (p_link_settings != NULL) { if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { dp_fixed_vs_pe_set_retimer_lane_settings( link, p_link_settings->dpcd_lane_settings, p_link_settings->link_settings.lane_count); } else { dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); } dpcd_set_lane_settings(link, p_link_settings, DPRX); } /* Blank stream if running test pattern */ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { /*TODO: * m_pHwss-> * MuteAudioEndpoint(pPathMode->pDisplayPath, true); */ /* Blank stream */ link->dc->hwss.blank_stream(pipe_ctx); } dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, (uint8_t *)p_custom_pattern, (uint32_t)cust_pattern_size); if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { /* Set Test Pattern state */ link->test_pattern_enabled = true; link->current_test_pattern = test_pattern; if (p_link_settings != NULL) dpcd_set_link_settings(link, p_link_settings); } switch (test_pattern) { case DP_TEST_PATTERN_VIDEO_MODE: pattern = PHY_TEST_PATTERN_NONE; break; case DP_TEST_PATTERN_D102: pattern = PHY_TEST_PATTERN_D10_2; break; case DP_TEST_PATTERN_SYMBOL_ERROR: pattern = PHY_TEST_PATTERN_SYMBOL_ERROR; break; case DP_TEST_PATTERN_PRBS7: pattern = PHY_TEST_PATTERN_PRBS7; break; case DP_TEST_PATTERN_80BIT_CUSTOM: pattern = PHY_TEST_PATTERN_80BIT_CUSTOM; break; case DP_TEST_PATTERN_CP2520_1: pattern = PHY_TEST_PATTERN_CP2520_1; break; case DP_TEST_PATTERN_CP2520_2: pattern = PHY_TEST_PATTERN_CP2520_2; break; case DP_TEST_PATTERN_CP2520_3: pattern = PHY_TEST_PATTERN_CP2520_3; break; case DP_TEST_PATTERN_128b_132b_TPS1: pattern = PHY_TEST_PATTERN_128b_132b_TPS1; break; case DP_TEST_PATTERN_128b_132b_TPS2: pattern = PHY_TEST_PATTERN_128b_132b_TPS2; break; case DP_TEST_PATTERN_PRBS9: pattern = PHY_TEST_PATTERN_PRBS9; break; case DP_TEST_PATTERN_PRBS11: pattern = PHY_TEST_PATTERN_PRBS11; break; case DP_TEST_PATTERN_PRBS15: pattern = PHY_TEST_PATTERN_PRBS15; break; case DP_TEST_PATTERN_PRBS23: pattern = PHY_TEST_PATTERN_PRBS23; break; case DP_TEST_PATTERN_PRBS31: pattern = PHY_TEST_PATTERN_PRBS31; break; case DP_TEST_PATTERN_264BIT_CUSTOM: pattern = PHY_TEST_PATTERN_264BIT_CUSTOM; break; case DP_TEST_PATTERN_SQUARE: pattern = PHY_TEST_PATTERN_SQUARE; break; case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED; break; case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: pattern = PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED; break; case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED; break; default: return false; } if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/) return false; if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { if (is_dp_phy_sqaure_pattern(test_pattern)) core_link_write_dpcd(link, DP_LINK_SQUARE_PATTERN, p_custom_pattern, 1); /* tell receiver that we are sending qualification * pattern DP 1.2 or later - DP receiver's link quality * pattern is set using DPCD LINK_QUAL_LANEx_SET * register (0x10B~0x10E)\ */ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) link_qual_pattern[lane] = (unsigned char)(pattern); core_link_write_dpcd(link, DP_LINK_QUAL_LANE0_SET, link_qual_pattern, sizeof(link_qual_pattern)); } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 || link->dpcd_caps.dpcd_rev.raw == 0) { /* tell receiver that we are sending qualification * pattern DP 1.1a or earlier - DP receiver's link * quality pattern is set using * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET * register (0x102). We will use v_1.3 when we are * setting test pattern for DP 1.1. */ core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET, &training_pattern.raw, sizeof(training_pattern)); training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern; core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET, &training_pattern.raw, sizeof(training_pattern)); } } else { enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: color_space = COLOR_SPACE_SRGB; if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) color_space = COLOR_SPACE_SRGB_LIMITED; break; case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: color_space = COLOR_SPACE_YCBCR601; if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) color_space = COLOR_SPACE_YCBCR601_LIMITED; break; case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: color_space = COLOR_SPACE_YCBCR709; if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) color_space = COLOR_SPACE_YCBCR709_LIMITED; break; default: break; } if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; hw_locks.bits.lock_dig = 1; inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, true, &hw_locks, &inst_flags); } else pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( pipe_ctx->stream_res.tg); } pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); /* update MSA to requested color space */ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream->timing, color_space, pipe_ctx->stream->use_vsc_sdp_for_colorimetry, link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range else pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); resource_build_info_frame(pipe_ctx); link->dc->hwss.update_info_frame(pipe_ctx); } /* CRTC Patterns */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; hw_locks.bits.lock_dig = 1; inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, false, &hw_locks, &inst_flags); } else pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( pipe_ctx->stream_res.tg); } /* Set Test Pattern state */ link->test_pattern_enabled = true; link->current_test_pattern = test_pattern; } return true; } void dp_set_preferred_link_settings(struct dc *dc, struct dc_link_settings *link_setting, struct dc_link *link) { int i; struct pipe_ctx *pipe; struct dc_stream_state *link_stream; struct dc_link_settings store_settings = *link_setting; link->preferred_link_setting = store_settings; /* Retrain with preferred link settings only relevant for * DP signal type * Check for non-DP signal or if passive dongle present */ if (!dc_is_dp_signal(link->connector_signal) || link->dongle_max_pix_clk > 0) return; for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->link) { if (pipe->stream->link == link) { link_stream = pipe->stream; break; } } } /* Stream not found */ if (i == MAX_PIPES) return; /* Cannot retrain link if backend is off */ if (link_stream->dpms_off) return; if (link_decide_link_settings(link_stream, &store_settings)) dp_retrain_link_dp_test(link, &store_settings, false); } void dp_set_preferred_training_settings(struct dc *dc, struct dc_link_settings *link_setting, struct dc_link_training_overrides *lt_overrides, struct dc_link *link, bool skip_immediate_retrain) { if (lt_overrides != NULL) link->preferred_training_settings = *lt_overrides; else memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings)); if (link_setting != NULL) { link->preferred_link_setting = *link_setting; } else { link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; } if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && link->type == dc_connection_mst_branch) dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link); /* Retrain now, or wait until next stream update to apply */ if (skip_immediate_retrain == false) dp_set_preferred_link_settings(dc, &link->preferred_link_setting, link); }
linux-master
drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_dp_trace.h" #include "link/protocols/link_dpcd.h" #include "link.h" void dp_trace_init(struct dc_link *link) { memset(&link->dp_trace, 0, sizeof(link->dp_trace)); link->dp_trace.is_initialized = true; } void dp_trace_reset(struct dc_link *link) { memset(&link->dp_trace, 0, sizeof(link->dp_trace)); } bool dp_trace_is_initialized(struct dc_link *link) { return link->dp_trace.is_initialized; } void dp_trace_detect_lt_init(struct dc_link *link) { memset(&link->dp_trace.detect_lt_trace, 0, sizeof(link->dp_trace.detect_lt_trace)); } void dp_trace_commit_lt_init(struct dc_link *link) { memset(&link->dp_trace.commit_lt_trace, 0, sizeof(link->dp_trace.commit_lt_trace)); } void dp_trace_link_loss_increment(struct dc_link *link) { link->dp_trace.link_loss_count++; } void dp_trace_lt_fail_count_update(struct dc_link *link, unsigned int fail_count, bool in_detection) { if (in_detection) link->dp_trace.detect_lt_trace.counts.fail = fail_count; else link->dp_trace.commit_lt_trace.counts.fail = fail_count; } void dp_trace_lt_total_count_increment(struct dc_link *link, bool in_detection) { if (in_detection) link->dp_trace.detect_lt_trace.counts.total++; else link->dp_trace.commit_lt_trace.counts.total++; } void dp_trace_set_is_logged_flag(struct dc_link *link, bool in_detection, bool is_logged) { if (in_detection) link->dp_trace.detect_lt_trace.is_logged = is_logged; else link->dp_trace.commit_lt_trace.is_logged = is_logged; } bool dp_trace_is_logged(struct dc_link *link, bool in_detection) { if (in_detection) return link->dp_trace.detect_lt_trace.is_logged; else return link->dp_trace.commit_lt_trace.is_logged; } void dp_trace_lt_result_update(struct dc_link *link, enum link_training_result result, bool in_detection) { if (in_detection) link->dp_trace.detect_lt_trace.result = result; else link->dp_trace.commit_lt_trace.result = result; } void dp_trace_set_lt_start_timestamp(struct dc_link *link, bool in_detection) { if (in_detection) link->dp_trace.detect_lt_trace.timestamps.start = dm_get_timestamp(link->dc->ctx); else link->dp_trace.commit_lt_trace.timestamps.start = dm_get_timestamp(link->dc->ctx); } void dp_trace_set_lt_end_timestamp(struct dc_link *link, bool in_detection) { if (in_detection) link->dp_trace.detect_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx); else link->dp_trace.commit_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx); } unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link, bool in_detection) { if (in_detection) return link->dp_trace.detect_lt_trace.timestamps.end; else return link->dp_trace.commit_lt_trace.timestamps.end; } const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link, bool in_detection) { if (in_detection) return &link->dp_trace.detect_lt_trace.counts; else return &link->dp_trace.commit_lt_trace.counts; } unsigned int dp_trace_get_link_loss_count(struct dc_link *link) { return link->dp_trace.link_loss_count; } void dp_trace_set_edp_power_timestamp(struct dc_link *link, bool power_up) { if (!power_up) /*save driver power off time stamp*/ link->dp_trace.edp_trace_power_timestamps.poweroff = dm_get_timestamp(link->dc->ctx); else link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx); } uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link) { return link->dp_trace.edp_trace_power_timestamps.poweron; } uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link) { return link->dp_trace.edp_trace_power_timestamps.poweroff; } void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode) { if (link != NULL && link->dc->debug.enable_driver_sequence_debug) core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, &dp_test_mode, sizeof(dp_test_mode)); }
linux-master
drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * */ #include "link_dp_training_auxless.h" #include "link_dp_phy.h" #define DC_LOGGER \ link->ctx->logger bool dp_perform_link_training_skip_aux( struct dc_link *link, const struct link_resource *link_res, const struct dc_link_settings *link_setting) { struct link_training_settings lt_settings = {0}; dp_decide_training_settings( link, link_setting, &lt_settings); override_training_settings( link, &link->preferred_training_settings, &lt_settings); /* 1. Perform_clock_recovery_sequence. */ /* transmit training pattern for clock recovery */ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); /* call HWSS to set lane settings*/ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX); /* wait receiver to lock-on*/ dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); /* 2. Perform_channel_equalization_sequence. */ /* transmit training pattern for channel equalization. */ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); /* call HWSS to set lane settings*/ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX); /* wait receiver to lock-on. */ dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); /* 3. Perform_link_training_int. */ /* Mainlink output idle pattern. */ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); dp_log_training_result(link, &lt_settings, LINK_TRAINING_SUCCESS); return true; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
// SPDX-License-Identifier: MIT /* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc.h" #include "inc/core_status.h" #include "dpcd_defs.h" #include "link_dp_dpia.h" #include "link_hwss.h" #include "dm_helpers.h" #include "dmub/inc/dmub_cmd.h" #include "link_dpcd.h" #include "link_dp_training.h" #include "dc_dmub_srv.h" #define DC_LOGGER \ link->ctx->logger /** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ /* DPCD DP Tunneling over USB4 */ #define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d #define DP_IN_ADAPTER_INFO 0xe000e #define DP_USB4_DRIVER_ID 0xe000f #define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) { enum dc_status status = DC_OK; uint8_t dpcd_dp_tun_data[3] = {0}; uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; uint8_t i = 0; status = core_link_read_dpcd( link, DP_TUNNELING_CAPABILITIES_SUPPORT, dpcd_dp_tun_data, sizeof(dpcd_dp_tun_data)); status = core_link_read_dpcd( link, DP_USB4_ROUTER_TOPOLOGY_ID, dpcd_topology_data, sizeof(dpcd_topology_data)); link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; return status; } bool dpia_query_hpd_status(struct dc_link *link) { union dmub_rb_cmd cmd = {0}; struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; bool is_hpd_high = false; /* prepare QUERY_HPD command */ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; /* Return HPD status reported by DMUB if query successfully executed. */ if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) is_hpd_high = cmd.query_hpd.data.result; DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", __func__, link->link_index, link->link_id.enum_id - ENUM_ID_1, cmd.query_hpd.data.status, cmd.query_hpd.data.result); return is_hpd_high; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements retrieval and configuration of eDP panel features such * as PSR and ABM and it also manages specs defined eDP panel power sequences. */ #include "link_edp_panel_control.h" #include "link_dpcd.h" #include "link_dp_capability.h" #include "dm_helpers.h" #include "dal_asic_id.h" #include "dce/dmub_psr.h" #include "dc/dc_dmub_srv.h" #include "dce/dmub_replay.h" #include "abm.h" #define DC_LOGGER_INIT(logger) #define DP_SINK_PR_ENABLE_AND_CONFIGURATION 0x37B /* Travis */ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; /* Nutmeg */ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) { union dpcd_edp_config edp_config_set; bool panel_mode_edp = false; enum dc_status result; memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); switch (panel_mode) { case DP_PANEL_MODE_EDP: case DP_PANEL_MODE_SPECIAL: panel_mode_edp = true; break; default: break; } /*set edp panel mode in receiver*/ result = core_link_read_dpcd( link, DP_EDP_CONFIGURATION_SET, &edp_config_set.raw, sizeof(edp_config_set.raw)); if (result == DC_OK && edp_config_set.bits.PANEL_MODE_EDP != panel_mode_edp) { edp_config_set.bits.PANEL_MODE_EDP = panel_mode_edp; result = core_link_write_dpcd( link, DP_EDP_CONFIGURATION_SET, &edp_config_set.raw, sizeof(edp_config_set.raw)); ASSERT(result == DC_OK); } link->panel_mode = panel_mode; DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " "eDP panel mode enabled: %d \n", link->link_index, link->dpcd_caps.panel_mode_edp, panel_mode_edp); } enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) { /* We need to explicitly check that connector * is not DP. Some Travis_VGA get reported * by video bios as DP. */ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) { switch (link->dpcd_caps.branch_dev_id) { case DP_BRANCH_DEVICE_ID_0022B9: /* alternate scrambler reset is required for Travis * for the case when external chip does not * provide sink device id, alternate scrambler * scheme will be overriden later by querying * Encoder features */ if (strncmp( link->dpcd_caps.branch_dev_name, DP_VGA_LVDS_CONVERTER_ID_2, sizeof( link->dpcd_caps. branch_dev_name)) == 0) { return DP_PANEL_MODE_SPECIAL; } break; case DP_BRANCH_DEVICE_ID_00001A: /* alternate scrambler reset is required for Travis * for the case when external chip does not provide * sink device id, alternate scrambler scheme will * be overriden later by querying Encoder feature */ if (strncmp(link->dpcd_caps.branch_dev_name, DP_VGA_LVDS_CONVERTER_ID_3, sizeof( link->dpcd_caps. branch_dev_name)) == 0) { return DP_PANEL_MODE_SPECIAL; } break; default: break; } } if (link->dpcd_caps.panel_mode_edp && (link->connector_signal == SIGNAL_TYPE_EDP || (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && link->is_internal_display))) { return DP_PANEL_MODE_EDP; } return DP_PANEL_MODE_DEFAULT; } bool edp_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, uint32_t transition_time_in_ms) { struct dpcd_source_backlight_set dpcd_backlight_set; uint8_t backlight_control = isHDR ? 1 : 0; if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; // OLEDs have no PWM, they can only use AUX if (link->dpcd_sink_ext_caps.bits.oled == 1) backlight_control = 1; *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; link->backlight_settings.backlight_millinits = backlight_millinits; if (!link->dpcd_caps.panel_luminance_control) { if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)(&dpcd_backlight_set), sizeof(dpcd_backlight_set)) != DC_OK) return false; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, &backlight_control, 1) != DC_OK) return false; } else { const uint8_t backlight_enable = DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; struct target_luminance_value *target_luminance = NULL; //if target luminance value is greater than 24 bits, clip the value to 24 bits if (backlight_millinits > 0xFFFFFF) backlight_millinits = 0xFFFFFF; target_luminance = (struct target_luminance_value *)&backlight_millinits; if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(backlight_enable)) != DC_OK) return false; if (core_link_write_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, (uint8_t *)(target_luminance), sizeof(struct target_luminance_value)) != DC_OK) return false; } return true; } bool edp_get_backlight_level_nits(struct dc_link *link, uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak) { union dpcd_source_backlight_get dpcd_backlight_get; memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get)); if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, sizeof(union dpcd_source_backlight_get))) return false; *backlight_millinits_avg = dpcd_backlight_get.bytes.backlight_millinits_avg; *backlight_millinits_peak = dpcd_backlight_get.bytes.backlight_millinits_peak; /* On non-supported panels dpcd_read usually succeeds with 0 returned */ if (*backlight_millinits_avg == 0 || *backlight_millinits_avg > *backlight_millinits_peak) return false; return true; } bool edp_backlight_enable_aux(struct dc_link *link, bool enable) { uint8_t backlight_enable = enable ? 1 : 0; if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, &backlight_enable, 1) != DC_OK) return false; return true; } // we read default from 0x320 because we expect BIOS wrote it there // regular get_backlight_nit reads from panel set at 0x326 static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits) { if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; if (!link->dpcd_caps.panel_luminance_control) { if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)backlight_millinits, sizeof(uint32_t))) return false; } else { //setting to 0 as a precaution, since target_luminance_value is 3 bytes memset(backlight_millinits, 0, sizeof(uint32_t)); if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, (uint8_t *)backlight_millinits, sizeof(struct target_luminance_value))) return false; } return true; } bool set_default_brightness_aux(struct dc_link *link) { uint32_t default_backlight; if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { if (!read_default_bl_aux(link, &default_backlight)) default_backlight = 150000; // if < 5 nits or > 5000, it might be wrong readback if (default_backlight < 5000 || default_backlight > 5000000) default_backlight = 150000; // return edp_set_backlight_level_nits(link, true, default_backlight, 0); } return false; } bool set_cached_brightness_aux(struct dc_link *link) { if (link->backlight_settings.backlight_millinits) return edp_set_backlight_level_nits(link, true, link->backlight_settings.backlight_millinits, 0); else return set_default_brightness_aux(link); return false; } bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) { struct dc_link_settings link_setting; uint8_t link_bw_set; uint8_t link_rate_set; uint32_t req_bw; union lane_count_set lane_count_set = {0}; ASSERT(link || crtc_timing); // invalid input if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate) return false; // Read DPCD 00100h to find if standard link rates are set core_link_read_dpcd(link, DP_LINK_BW_SET, &link_bw_set, sizeof(link_bw_set)); if (link_bw_set) { DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n"); return true; } // Read DPCD 00115h to find the edp link rate set used core_link_read_dpcd(link, DP_LINK_RATE_SET, &link_rate_set, sizeof(link_rate_set)); // Read DPCD 00101h to find out the number of lanes currently set core_link_read_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, sizeof(lane_count_set)); req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link)); if (!crtc_timing->flags.DSC) edp_decide_link_settings(link, &link_setting, req_bw); else decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n"); return true; } DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n"); return false; } void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) { if (link->connector_signal != SIGNAL_TYPE_EDP) return; link->dc->hwss.edp_power_control(link, true); if (wait_for_hpd) link->dc->hwss.edp_wait_for_hpd_ready(link, true); if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, true); } bool edp_wait_for_t12(struct dc_link *link) { if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) { link->dc->hwss.edp_wait_for_T12(link); return true; } return false; } void edp_add_delay_for_T9(struct dc_link *link) { if (link && link->panel_config.pps.extra_delay_backlight_off > 0) fsleep(link->panel_config.pps.extra_delay_backlight_off * 1000); } bool edp_receiver_ready_T9(struct dc_link *link) { unsigned int tries = 0; unsigned char sinkstatus = 0; unsigned char edpRev = 0; enum dc_status result = DC_OK; result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ if (result == DC_OK && edpRev >= DP_EDP_12) { do { sinkstatus = 1; result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); if (sinkstatus == 0) break; if (result != DC_OK) break; udelay(100); //MAx T9 } while (++tries < 50); } return result; } bool edp_receiver_ready_T7(struct dc_link *link) { unsigned char sinkstatus = 0; unsigned char edpRev = 0; enum dc_status result = DC_OK; /* use absolute time stamp to constrain max T7*/ unsigned long long enter_timestamp = 0; unsigned long long finish_timestamp = 0; unsigned long long time_taken_in_ns = 0; result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); if (result == DC_OK && edpRev >= DP_EDP_12) { /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ enter_timestamp = dm_get_timestamp(link->ctx); do { sinkstatus = 0; result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); if (sinkstatus == 1) break; if (result != DC_OK) break; udelay(25); finish_timestamp = dm_get_timestamp(link->ctx); time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms } if (link && link->panel_config.pps.extra_t7_ms > 0) fsleep(link->panel_config.pps.extra_t7_ms * 1000); return result; } bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable) { bool ret = false; union dpcd_alpm_configuration alpm_config; if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { memset(&alpm_config, 0, sizeof(alpm_config)); alpm_config.bits.ENABLE = (enable ? true : false); ret = dm_helpers_dp_write_dpcd(link->ctx, link, DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, sizeof(alpm_config.raw)); } return ret; } static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) { int i; struct dc *dc = link->ctx->dc; struct pipe_ctx *pipe_ctx = NULL; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream) { if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) { pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; break; } } } return pipe_ctx; } bool edp_set_backlight_level(const struct dc_link *link, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { struct dc *dc = link->ctx->dc; DC_LOGGER_INIT(link->ctx->logger); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", backlight_pwm_u16_16, backlight_pwm_u16_16); if (dc_is_embedded_signal(link->connector_signal)) { struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); if (pipe_ctx) { /* Disable brightness ramping when the display is blanked * as it can hang the DMCU */ if (pipe_ctx->plane_state == NULL) frame_ramp = 0; } else { return false; } dc->hwss.set_backlight_level( pipe_ctx, backlight_pwm_u16_16, frame_ramp); } return true; } bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active, bool wait, bool force_static, const unsigned int *power_opts) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; struct dmub_psr *psr = dc->res_pool->psr; unsigned int panel_inst; if (psr == NULL && force_static) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) { // Don't enter PSR if panel is not connected return false; } /* Set power optimization flag */ if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { link->psr_settings.psr_power_opt = *power_opts; if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); } if (psr != NULL && link->psr_settings.psr_feature_enabled && force_static && psr->funcs->psr_force_static) psr->funcs->psr_force_static(psr, panel_inst); /* Enable or Disable PSR */ if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { link->psr_settings.psr_allow_active = *allow_active; if (!link->psr_settings.psr_allow_active) dc_z10_restore(dc); if (psr != NULL && link->psr_settings.psr_feature_enabled) { psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled) dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); else return false; } return true; } bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; struct dmub_psr *psr = dc->res_pool->psr; unsigned int panel_inst; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if (psr != NULL && link->psr_settings.psr_feature_enabled) psr->funcs->psr_get_state(psr, state, panel_inst); else if (dmcu != NULL && link->psr_settings.psr_feature_enabled) dmcu->funcs->get_psr_state(dmcu, state); return true; } static inline enum physical_phy_id transmitter_to_phy_id(struct dc_link *link) { struct dc_context *dc_ctx = link->ctx; enum transmitter transmitter_value = link->link_enc->transmitter; switch (transmitter_value) { case TRANSMITTER_UNIPHY_A: return PHYLD_0; case TRANSMITTER_UNIPHY_B: return PHYLD_1; case TRANSMITTER_UNIPHY_C: return PHYLD_2; case TRANSMITTER_UNIPHY_D: return PHYLD_3; case TRANSMITTER_UNIPHY_E: return PHYLD_4; case TRANSMITTER_UNIPHY_F: return PHYLD_5; case TRANSMITTER_NUTMEG_CRT: return PHYLD_6; case TRANSMITTER_TRAVIS_CRT: return PHYLD_7; case TRANSMITTER_TRAVIS_LCD: return PHYLD_8; case TRANSMITTER_UNIPHY_G: return PHYLD_9; case TRANSMITTER_COUNT: return PHYLD_COUNT; case TRANSMITTER_UNKNOWN: return PHYLD_UNKNOWN; default: DC_ERROR("Unknown transmitter value %d\n", transmitter_value); return PHYLD_UNKNOWN; } } bool edp_setup_psr(struct dc_link *link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context) { struct dc *dc; struct dmcu *dmcu; struct dmub_psr *psr; int i; unsigned int panel_inst; /* updateSinkPsrDpcdConfig*/ union dpcd_psr_configuration psr_configuration; union dpcd_sink_active_vtotal_control_mode vtotal_control = {0}; psr_context->controllerId = CONTROLLER_ID_UNDEFINED; if (!link) return false; dc = link->ctx->dc; dmcu = dc->res_pool->dmcu; psr = dc->res_pool->psr; if (!dmcu && !psr) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; memset(&psr_configuration, 0, sizeof(psr_configuration)); psr_configuration.bits.ENABLE = 1; psr_configuration.bits.CRC_VERIFICATION = 1; psr_configuration.bits.FRAME_CAPTURE_INDICATION = psr_config->psr_frame_capture_indication_req; /* Check for PSR v2*/ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { /* For PSR v2 selective update. * Indicates whether sink should start capturing * immediately following active scan line, * or starting with the 2nd active scan line. */ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; /*For PSR v2, determines whether Sink should generate * IRQ_HPD when CRC mismatch is detected. */ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; /* For PSR v2, set the bit when the Source device will * be enabling PSR2 operation. */ psr_configuration.bits.ENABLE_PSR2 = 1; /* For PSR v2, the Sink device must be able to receive * SU region updates early in the frame time. */ psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1; } dm_helpers_dp_write_dpcd( link->ctx, link, 368, &psr_configuration.raw, sizeof(psr_configuration.raw)); if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { edp_power_alpm_dpcd_enable(link, true); psr_context->su_granularity_required = psr_config->su_granularity_required; psr_context->su_y_granularity = psr_config->su_y_granularity; psr_context->line_time_in_us = psr_config->line_time_in_us; /* linux must be able to expose AMD Source DPCD definition * in order to support FreeSync PSR */ if (link->psr_settings.psr_vtotal_control_support) { psr_context->rate_control_caps = psr_config->rate_control_caps; vtotal_control.bits.ENABLE = true; core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE, &vtotal_control.raw, sizeof(vtotal_control.raw)); } } psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; psr_context->transmitterId = link->link_enc->transmitter; psr_context->engineId = link->link_enc->preferred_engine; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { /* dmcu -1 for all controller id values, * therefore +1 here */ psr_context->controllerId = dc->current_state->res_ctx. pipe_ctx[i].stream_res.tg->inst + 1; break; } } /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ psr_context->phyType = PHY_TYPE_UNIPHY; /*PhyId is associated with the transmitter id*/ psr_context->smuPhyId = transmitter_to_phy_id(link); psr_context->crtcTimingVerticalTotal = stream->timing.v_total; psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> timing.pix_clk_100hz * 100), stream->timing.v_total), stream->timing.h_total); psr_context->psrSupportedDisplayConfig = true; psr_context->psrExitLinkTrainingRequired = psr_config->psr_exit_link_training_required; psr_context->sdpTransmitLineNumDeadline = psr_config->psr_sdp_transmit_line_num_deadline; psr_context->psrFrameCaptureIndicationReq = psr_config->psr_frame_capture_indication_req; psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ psr_context->numberOfControllers = link->dc->res_pool->timing_generator_count; psr_context->rfb_update_auto_en = true; /* 2 frames before enter PSR. */ psr_context->timehyst_frames = 2; /* half a frame * (units in 100 lines, i.e. a value of 1 represents 100 lines) */ psr_context->hyst_lines = stream->timing.v_total / 2 / 100; psr_context->aux_repeats = 10; psr_context->psr_level.u32all = 0; /*skip power down the single pipe since it blocks the cstate*/ if (link->ctx->asic_id.chip_family >= FAMILY_RV) { switch (link->ctx->asic_id.chip_family) { case FAMILY_YELLOW_CARP: case AMDGPU_FAMILY_GC_10_3_6: case AMDGPU_FAMILY_GC_11_0_1: if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable) psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; break; default: psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; break; } } /* SMU will perform additional powerdown sequence. * For unsupported ASICs, set psr_level flag to skip PSR * static screen notification to SMU. * (Always set for DAL2, did not check ASIC) */ psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations; /* Complete PSR entry before aborting to prevent intermittent * freezes on certain eDPs */ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; /* Disable ALPM first for compatible non-ALPM panel now */ psr_context->psr_level.bits.DISABLE_ALPM = 0; psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1; /* Controls additional delay after remote frame capture before * continuing power down, default = 0 */ psr_context->frame_delay = 0; psr_context->dsc_slice_height = psr_config->dsc_slice_height; if (psr) { link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context, panel_inst); link->psr_settings.psr_power_opt = 0; link->psr_settings.psr_allow_active = 0; } else { link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); } /* psr_enabled == 0 indicates setup_psr did not succeed, but this * should not happen since firmware should be running at this point */ if (link->psr_settings.psr_feature_enabled == 0) ASSERT(0); return true; } void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency) { struct dc *dc = link->ctx->dc; struct dmub_psr *psr = dc->res_pool->psr; unsigned int panel_inst; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return; // PSR residency measurements only supported on DMCUB if (psr != NULL && link->psr_settings.psr_feature_enabled) psr->funcs->psr_get_residency(psr, residency, panel_inst); else *residency = 0; } bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su) { struct dc *dc = link->ctx->dc; struct dmub_psr *psr = dc->res_pool->psr; if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support) return false; psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su); return true; } bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, bool wait, bool force_static, const unsigned int *power_opts) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; if (replay == NULL && force_static) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; /* Set power optimization flag */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) { replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); link->replay_settings.replay_power_opt_active = *power_opts; } } /* Activate or deactivate Replay */ if (allow_active && link->replay_settings.replay_allow_active != *allow_active) { // TODO: Handle mux change case if force_static is set // If force_static is set, just change the replay_allow_active state directly if (replay != NULL && link->replay_settings.replay_feature_enabled) replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); link->replay_settings.replay_allow_active = *allow_active; } return true; } bool edp_get_replay_state(const struct dc_link *link, uint64_t *state) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; enum replay_state pr_state = REPLAY_STATE_0; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if (replay != NULL && link->replay_settings.replay_feature_enabled) replay->funcs->replay_get_state(replay, &pr_state, panel_inst); *state = pr_state; return true; } bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) { /* To-do: Setup Replay */ struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; int i; unsigned int panel_inst; struct replay_context replay_context = { 0 }; unsigned int lineTimeInNs = 0; union replay_enable_and_configuration replay_config; union dpcd_alpm_configuration alpm_config; replay_context.controllerId = CONTROLLER_ID_UNDEFINED; if (!link) return false; if (!replay) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel; replay_context.digbe_inst = link->link_enc->transmitter; replay_context.digfe_inst = link->link_enc->preferred_engine; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { /* dmcu -1 for all controller id values, * therefore +1 here */ replay_context.controllerId = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; break; } } lineTimeInNs = ((stream->timing.h_total * 1000000) / (stream->timing.pix_clk_100hz / 10)) + 1; replay_context.line_time_in_ns = lineTimeInNs; if (replay) link->replay_settings.replay_feature_enabled = replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); if (link->replay_settings.replay_feature_enabled) { replay_config.bits.FREESYNC_PANEL_REPLAY_MODE = 1; replay_config.bits.TIMING_DESYNC_ERROR_VERIFICATION = link->replay_settings.config.replay_timing_sync_supported; replay_config.bits.STATE_TRANSITION_ERROR_DETECTION = 1; dm_helpers_dp_write_dpcd(link->ctx, link, DP_SINK_PR_ENABLE_AND_CONFIGURATION, (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); memset(&alpm_config, 0, sizeof(alpm_config)); alpm_config.bits.ENABLE = 1; dm_helpers_dp_write_dpcd( link->ctx, link, DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, sizeof(alpm_config.raw)); } return true; } bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; if (!replay) return false; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst); link->replay_settings.coasting_vtotal = coasting_vtotal; } return true; } bool edp_replay_residency(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; unsigned int panel_inst; if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; if (replay != NULL && link->replay_settings.replay_feature_enabled) replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm); else *residency = 0; return true; } static struct abm *get_abm_from_stream_res(const struct dc_link *link) { int i; struct dc *dc = link->ctx->dc; struct abm *abm = NULL; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; struct dc_stream_state *stream = pipe_ctx.stream; if (stream && stream->link == link) { abm = pipe_ctx.stream_res.abm; break; } } return abm; } int edp_get_backlight_level(const struct dc_link *link) { struct abm *abm = get_abm_from_stream_res(link); struct panel_cntl *panel_cntl = link->panel_cntl; struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; bool fw_set_brightness = true; if (dmcu) fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) return panel_cntl->funcs->get_current_backlight(panel_cntl); else if (abm != NULL && abm->funcs->get_current_backlight != NULL) return (int) abm->funcs->get_current_backlight(abm); else return DC_ERROR_UNEXPECTED; } int edp_get_target_backlight_pwm(const struct dc_link *link) { struct abm *abm = get_abm_from_stream_res(link); if (abm == NULL || abm->funcs->get_target_backlight == NULL) return DC_ERROR_UNEXPECTED; return (int) abm->funcs->get_target_backlight(abm); }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements all generic dp link training helper functions and top * level generic training sequence. All variations of dp link training sequence * should be called inside the top level training functions in this file to * ensure the integrity of our overall training procedure across different types * of link encoding and back end hardware. */ #include "link_dp_training.h" #include "link_dp_training_8b_10b.h" #include "link_dp_training_128b_132b.h" #include "link_dp_training_auxless.h" #include "link_dp_training_dpia.h" #include "link_dp_training_fixed_vs_pe_retimer.h" #include "link_dpcd.h" #include "link/accessories/link_dp_trace.h" #include "link_dp_phy.h" #include "link_dp_capability.h" #include "link_edp_panel_control.h" #include "link/link_detection.h" #include "link/link_validation.h" #include "atomfirmware.h" #include "link_enc_cfg.h" #include "resource.h" #include "dm_helpers.h" #define DC_LOGGER \ link->ctx->logger #define POST_LT_ADJ_REQ_LIMIT 6 #define POST_LT_ADJ_REQ_TIMEOUT 200 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ void dp_log_training_result( struct dc_link *link, const struct link_training_settings *lt_settings, enum link_training_result status) { char *link_rate = "Unknown"; char *lt_result = "Unknown"; char *lt_spread = "Disabled"; switch (lt_settings->link_settings.link_rate) { case LINK_RATE_LOW: link_rate = "RBR"; break; case LINK_RATE_RATE_2: link_rate = "R2"; break; case LINK_RATE_RATE_3: link_rate = "R3"; break; case LINK_RATE_HIGH: link_rate = "HBR"; break; case LINK_RATE_RBR2: link_rate = "RBR2"; break; case LINK_RATE_RATE_6: link_rate = "R6"; break; case LINK_RATE_HIGH2: link_rate = "HBR2"; break; case LINK_RATE_RATE_8: link_rate = "R8"; break; case LINK_RATE_HIGH3: link_rate = "HBR3"; break; case LINK_RATE_UHBR10: link_rate = "UHBR10"; break; case LINK_RATE_UHBR13_5: link_rate = "UHBR13.5"; break; case LINK_RATE_UHBR20: link_rate = "UHBR20"; break; default: break; } switch (status) { case LINK_TRAINING_SUCCESS: lt_result = "pass"; break; case LINK_TRAINING_CR_FAIL_LANE0: lt_result = "CR failed lane0"; break; case LINK_TRAINING_CR_FAIL_LANE1: lt_result = "CR failed lane1"; break; case LINK_TRAINING_CR_FAIL_LANE23: lt_result = "CR failed lane23"; break; case LINK_TRAINING_EQ_FAIL_CR: lt_result = "CR failed in EQ"; break; case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: lt_result = "CR failed in EQ partially"; break; case LINK_TRAINING_EQ_FAIL_EQ: lt_result = "EQ failed"; break; case LINK_TRAINING_LQA_FAIL: lt_result = "LQA failed"; break; case LINK_TRAINING_LINK_LOSS: lt_result = "Link loss"; break; case DP_128b_132b_LT_FAILED: lt_result = "LT_FAILED received"; break; case DP_128b_132b_MAX_LOOP_COUNT_REACHED: lt_result = "max loop count reached"; break; case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: lt_result = "channel EQ timeout"; break; case DP_128b_132b_CDS_DONE_TIMEOUT: lt_result = "CDS timeout"; break; default: break; } switch (lt_settings->link_settings.link_spread) { case LINK_SPREAD_DISABLED: lt_spread = "Disabled"; break; case LINK_SPREAD_05_DOWNSPREAD_30KHZ: lt_spread = "0.5% 30KHz"; break; case LINK_SPREAD_05_DOWNSPREAD_33KHZ: lt_spread = "0.5% 33KHz"; break; default: break; } /* Connectivity log: link training */ /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", link_rate, lt_settings->link_settings.lane_count, lt_result, lt_settings->hw_lane_settings[0].VOLTAGE_SWING, lt_settings->hw_lane_settings[0].PRE_EMPHASIS, lt_spread); } uint8_t dp_initialize_scrambling_data_symbols( struct dc_link *link, enum dc_dp_training_pattern pattern) { uint8_t disable_scrabled_data_symbols = 0; switch (pattern) { case DP_TRAINING_PATTERN_SEQUENCE_1: case DP_TRAINING_PATTERN_SEQUENCE_2: case DP_TRAINING_PATTERN_SEQUENCE_3: disable_scrabled_data_symbols = 1; break; case DP_TRAINING_PATTERN_SEQUENCE_4: case DP_128b_132b_TPS1: case DP_128b_132b_TPS2: disable_scrabled_data_symbols = 0; break; default: ASSERT(0); DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", __func__, pattern); break; } return disable_scrabled_data_symbols; } enum dpcd_training_patterns dp_training_pattern_to_dpcd_training_pattern( struct dc_link *link, enum dc_dp_training_pattern pattern) { enum dpcd_training_patterns dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; switch (pattern) { case DP_TRAINING_PATTERN_SEQUENCE_1: DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS1\n", __func__); dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; break; case DP_TRAINING_PATTERN_SEQUENCE_2: DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS2\n", __func__); dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; break; case DP_TRAINING_PATTERN_SEQUENCE_3: DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS3\n", __func__); dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; break; case DP_TRAINING_PATTERN_SEQUENCE_4: DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS4\n", __func__); dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; break; case DP_128b_132b_TPS1: DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS1\n", __func__); dpcd_tr_pattern = DPCD_128b_132b_TPS1; break; case DP_128b_132b_TPS2: DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS2\n", __func__); dpcd_tr_pattern = DPCD_128b_132b_TPS2; break; case DP_128b_132b_TPS2_CDS: DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS2 CDS\n", __func__); dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; break; case DP_TRAINING_PATTERN_VIDEOIDLE: DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern videoidle\n", __func__); dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; break; default: ASSERT(0); DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", __func__, pattern); break; } return dpcd_tr_pattern; } uint8_t dp_get_nibble_at_index(const uint8_t *buf, uint32_t index) { uint8_t nibble; nibble = buf[index / 2]; if (index % 2) nibble >>= 4; else nibble &= 0x0F; return nibble; } void dp_wait_for_training_aux_rd_interval( struct dc_link *link, uint32_t wait_in_micro_secs) { fsleep(wait_in_micro_secs); DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, wait_in_micro_secs); } /* maximum pre emphasis level allowed for each voltage swing level*/ static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, PRE_EMPHASIS_LEVEL2, PRE_EMPHASIS_LEVEL1, PRE_EMPHASIS_DISABLED }; static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( enum dc_voltage_swing voltage) { enum dc_pre_emphasis pre_emphasis; pre_emphasis = PRE_EMPHASIS_MAX_LEVEL; if (voltage <= VOLTAGE_SWING_MAX_LEVEL) pre_emphasis = voltage_swing_to_pre_emphasis[voltage]; return pre_emphasis; } static void maximize_lane_settings(const struct link_training_settings *lt_settings, struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { uint32_t lane; struct dc_lane_settings max_requested; max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; /* Determine what the maximum of the requested settings are*/ for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; if (lane_settings[lane].FFE_PRESET.settings.level > max_requested.FFE_PRESET.settings.level) max_requested.FFE_PRESET.settings.level = lane_settings[lane].FFE_PRESET.settings.level; } /* make sure the requested settings are * not higher than maximum settings*/ if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL) max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL; if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; /* make sure the pre-emphasis matches the voltage swing*/ if (max_requested.PRE_EMPHASIS > get_max_pre_emphasis_for_voltage_swing( max_requested.VOLTAGE_SWING)) max_requested.PRE_EMPHASIS = get_max_pre_emphasis_for_voltage_swing( max_requested.VOLTAGE_SWING); for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; } } void dp_hw_to_dpcd_lane_settings( const struct link_training_settings *lt_settings, const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) { uint8_t lane = 0; for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) { dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = (hw_lane_settings[lane].VOLTAGE_SWING == VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = (hw_lane_settings[lane].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); } else if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) { dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = hw_lane_settings[lane].FFE_PRESET.settings.level; } } } uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) { uint8_t link_rate = 0; enum dp_link_encoding encoding = link_dp_get_encoding_format(link_settings); if (encoding == DP_128b_132b_ENCODING) switch (link_settings->link_rate) { case LINK_RATE_UHBR10: link_rate = 0x1; break; case LINK_RATE_UHBR20: link_rate = 0x2; break; case LINK_RATE_UHBR13_5: link_rate = 0x4; break; default: link_rate = 0; break; } else if (encoding == DP_8b_10b_ENCODING) link_rate = (uint8_t) link_settings->link_rate; else link_rate = 0; return link_rate; } /* Only used for channel equalization */ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) { unsigned int aux_rd_interval_us = 400; switch (dpcd_aux_read_interval) { case 0x01: aux_rd_interval_us = 4000; break; case 0x02: aux_rd_interval_us = 8000; break; case 0x03: aux_rd_interval_us = 12000; break; case 0x04: aux_rd_interval_us = 16000; break; case 0x05: aux_rd_interval_us = 32000; break; case 0x06: aux_rd_interval_us = 64000; break; default: break; } return aux_rd_interval_us; } enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status) { enum link_training_result result = LINK_TRAINING_SUCCESS; if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0) result = LINK_TRAINING_CR_FAIL_LANE0; else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0) result = LINK_TRAINING_CR_FAIL_LANE1; else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0) result = LINK_TRAINING_CR_FAIL_LANE23; else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0) result = LINK_TRAINING_CR_FAIL_LANE23; return result; } bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset) { return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0); } bool dp_is_max_vs_reached( const struct link_training_settings *lt_settings) { uint32_t lane; for (lane = 0; lane < (uint32_t)(lt_settings->link_settings.lane_count); lane++) { if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET == VOLTAGE_SWING_MAX_LEVEL) return true; } return false; } bool dp_is_cr_done(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status) { bool done = true; uint32_t lane; /*LANEx_CR_DONE bits All 1's?*/ for (lane = 0; lane < (uint32_t)(ln_count); lane++) { if (!dpcd_lane_status[lane].bits.CR_DONE_0) done = false; } return done; } bool dp_is_ch_eq_done(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status) { bool done = true; uint32_t lane; for (lane = 0; lane < (uint32_t)(ln_count); lane++) if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) done = false; return done; } bool dp_is_symbol_locked(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status) { bool locked = true; uint32_t lane; for (lane = 0; lane < (uint32_t)(ln_count); lane++) if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) locked = false; return locked; } bool dp_is_interlane_aligned(union lane_align_status_updated align_status) { return align_status.bits.INTERLANE_ALIGN_DONE == 1; } enum link_training_result dp_check_link_loss_status( struct dc_link *link, const struct link_training_settings *link_training_setting) { enum link_training_result status = LINK_TRAINING_SUCCESS; union lane_status lane_status; uint8_t dpcd_buf[6] = {0}; uint32_t lane; core_link_read_dpcd( link, DP_SINK_COUNT, (uint8_t *)(dpcd_buf), sizeof(dpcd_buf)); /*parse lane status*/ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { /* * check lanes status */ lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane); if (!lane_status.bits.CHANNEL_EQ_DONE_0 || !lane_status.bits.CR_DONE_0 || !lane_status.bits.SYMBOL_LOCKED_0) { /* if one of the channel equalization, clock * recovery or symbol lock is dropped * consider it as (link has been * dropped) dp sink status has changed */ status = LINK_TRAINING_LINK_LOSS; break; } } return status; } enum dc_status dp_get_lane_status_and_lane_adjust( struct dc_link *link, const struct link_training_settings *link_training_setting, union lane_status ln_status[LANE_COUNT_DP_MAX], union lane_align_status_updated *ln_align, union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], uint32_t offset) { unsigned int lane01_status_address = DP_LANE0_1_STATUS; uint8_t lane_adjust_offset = 4; unsigned int lane01_adjust_address; uint8_t dpcd_buf[6] = {0}; uint32_t lane; enum dc_status status; if (is_repeater(link_training_setting, offset)) { lane01_status_address = DP_LANE0_1_STATUS_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); lane_adjust_offset = 3; } status = core_link_read_dpcd( link, lane01_status_address, (uint8_t *)(dpcd_buf), sizeof(dpcd_buf)); if (status != DC_OK) { DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," " keep current lane status and lane adjust unchanged", __func__, lane01_status_address); return status; } for (lane = 0; lane < (uint32_t)(link_training_setting->link_settings.lane_count); lane++) { ln_status[lane].raw = dp_get_nibble_at_index(&dpcd_buf[0], lane); ln_adjust[lane].raw = dp_get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); } ln_align->raw = dpcd_buf[2]; if (is_repeater(link_training_setting, offset)) { DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", __func__, offset, lane01_status_address, dpcd_buf[0], lane01_status_address + 1, dpcd_buf[1]); lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", __func__, offset, lane01_adjust_address, dpcd_buf[lane_adjust_offset], lane01_adjust_address + 1, dpcd_buf[lane_adjust_offset + 1]); } else { DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", __func__, lane01_status_address, dpcd_buf[0], lane01_status_address + 1, dpcd_buf[1]); lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", __func__, lane01_adjust_address, dpcd_buf[lane_adjust_offset], lane01_adjust_address + 1, dpcd_buf[lane_adjust_offset + 1]); } return status; } static void override_lane_settings(const struct link_training_settings *lt_settings, struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { uint32_t lane; if (lt_settings->voltage_swing == NULL && lt_settings->pre_emphasis == NULL && lt_settings->ffe_preset == NULL && lt_settings->post_cursor2 == NULL) return; for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { if (lt_settings->voltage_swing) lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; if (lt_settings->pre_emphasis) lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; if (lt_settings->post_cursor2) lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; if (lt_settings->ffe_preset) lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; } } void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override) { if (!dp_is_lttpr_present(link)) return; if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) { *override = LTTPR_MODE_TRANSPARENT; } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) { *override = LTTPR_MODE_NON_TRANSPARENT; } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) { *override = LTTPR_MODE_NON_LTTPR; } DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override)); } void override_training_settings( struct dc_link *link, const struct dc_link_training_overrides *overrides, struct link_training_settings *lt_settings) { uint32_t lane; /* Override link spread */ if (!link->dp_ss_off && overrides->downspread != NULL) lt_settings->link_settings.link_spread = *overrides->downspread ? LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; /* Override lane settings */ if (overrides->voltage_swing != NULL) lt_settings->voltage_swing = overrides->voltage_swing; if (overrides->pre_emphasis != NULL) lt_settings->pre_emphasis = overrides->pre_emphasis; if (overrides->post_cursor2 != NULL) lt_settings->post_cursor2 = overrides->post_cursor2; if (overrides->ffe_preset != NULL) lt_settings->ffe_preset = overrides->ffe_preset; /* Override HW lane settings with BIOS forced values if present */ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; lt_settings->always_match_dpcd_with_hw_lane_settings = false; } for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = lt_settings->voltage_swing != NULL ? *lt_settings->voltage_swing : VOLTAGE_SWING_LEVEL0; lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = lt_settings->pre_emphasis != NULL ? *lt_settings->pre_emphasis : PRE_EMPHASIS_DISABLED; lt_settings->hw_lane_settings[lane].POST_CURSOR2 = lt_settings->post_cursor2 != NULL ? *lt_settings->post_cursor2 : POST_CURSOR2_DISABLED; } if (lt_settings->always_match_dpcd_with_hw_lane_settings) dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); /* Override training timings */ if (overrides->cr_pattern_time != NULL) lt_settings->cr_pattern_time = *overrides->cr_pattern_time; if (overrides->eq_pattern_time != NULL) lt_settings->eq_pattern_time = *overrides->eq_pattern_time; if (overrides->pattern_for_cr != NULL) lt_settings->pattern_for_cr = *overrides->pattern_for_cr; if (overrides->pattern_for_eq != NULL) lt_settings->pattern_for_eq = *overrides->pattern_for_eq; if (overrides->enhanced_framing != NULL) lt_settings->enhanced_framing = *overrides->enhanced_framing; if (link->preferred_training_settings.fec_enable != NULL) lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; /* Check DP tunnel LTTPR mode debug option. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr) lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode); } enum dc_dp_training_pattern decide_cr_training_pattern( const struct dc_link_settings *link_settings) { switch (link_dp_get_encoding_format(link_settings)) { case DP_8b_10b_ENCODING: default: return DP_TRAINING_PATTERN_SEQUENCE_1; case DP_128b_132b_ENCODING: return DP_128b_132b_TPS1; } } enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, const struct dc_link_settings *link_settings) { struct link_encoder *link_enc; struct encoder_feature_support *enc_caps; struct dpcd_caps *rx_caps = &link->dpcd_caps; enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); enc_caps = &link_enc->features; switch (link_dp_get_encoding_format(link_settings)) { case DP_8b_10b_ENCODING: if (enc_caps->flags.bits.IS_TPS4_CAPABLE && rx_caps->max_down_spread.bits.TPS4_SUPPORTED) pattern = DP_TRAINING_PATTERN_SEQUENCE_4; else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && rx_caps->max_ln_count.bits.TPS3_SUPPORTED) pattern = DP_TRAINING_PATTERN_SEQUENCE_3; else pattern = DP_TRAINING_PATTERN_SEQUENCE_2; break; case DP_128b_132b_ENCODING: pattern = DP_128b_132b_TPS2; break; default: pattern = DP_TRAINING_PATTERN_SEQUENCE_2; break; } return pattern; } enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting) { enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting); if (encoding == DP_8b_10b_ENCODING) return dp_decide_8b_10b_lttpr_mode(link); else if (encoding == DP_128b_132b_ENCODING) return dp_decide_128b_132b_lttpr_mode(link); ASSERT(0); return LTTPR_MODE_NON_LTTPR; } void dp_decide_lane_settings( const struct link_training_settings *lt_settings, const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) { uint32_t lane; for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) { hw_lane_settings[lane].VOLTAGE_SWING = (enum dc_voltage_swing)(ln_adjust[lane].bits. VOLTAGE_SWING_LANE); hw_lane_settings[lane].PRE_EMPHASIS = (enum dc_pre_emphasis)(ln_adjust[lane].bits. PRE_EMPHASIS_LANE); } else if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) { hw_lane_settings[lane].FFE_PRESET.raw = ln_adjust[lane].tx_ffe.PRESET_VALUE; } } dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); if (lt_settings->disallow_per_lane_settings) { /* we find the maximum of the requested settings across all lanes*/ /* and set this maximum for all lanes*/ maximize_lane_settings(lt_settings, hw_lane_settings); override_lane_settings(lt_settings, hw_lane_settings); if (lt_settings->always_match_dpcd_with_hw_lane_settings) dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); } } void dp_decide_training_settings( struct dc_link *link, const struct dc_link_settings *link_settings, struct link_training_settings *lt_settings) { if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) decide_8b_10b_training_settings(link, link_settings, lt_settings); else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) decide_128b_132b_training_settings(link, link_settings, lt_settings); } enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) { uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); return core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, sizeof(repeater_mode)); } static enum dc_status configure_lttpr_mode_non_transparent( struct dc_link *link, const struct link_training_settings *lt_settings) { /* aux timeout is already set to extended */ /* RESET/SET lttpr mode to enable non transparent mode */ uint8_t repeater_cnt; uint32_t aux_interval_address; uint8_t repeater_id; enum dc_status result = DC_ERROR_UNEXPECTED; uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; const struct dc *dc = link->dc; enum dp_link_encoding encoding = dc->link_srv->dp_get_encoding_format(&lt_settings->link_settings); if (encoding == DP_8b_10b_ENCODING) { DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); result = core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, sizeof(repeater_mode)); } if (result == DC_OK) { link->dpcd_caps.lttpr_caps.mode = repeater_mode; } if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; result = core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, sizeof(repeater_mode)); if (result == DC_OK) { link->dpcd_caps.lttpr_caps.mode = repeater_mode; } if (encoding == DP_8b_10b_ENCODING) { repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); /* Driver does not need to train the first hop. Skip DPCD read and clear * AUX_RD_INTERVAL for DPTX-to-DPIA hop. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); core_link_read_dpcd( link, aux_interval_address, (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; } } } return result; } enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings) { enum dc_status status = DC_OK; if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) status = configure_lttpr_mode_transparent(link); else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) status = configure_lttpr_mode_non_transparent(link, lt_settings); return status; } void repeater_training_done(struct dc_link *link, uint32_t offset) { union dpcd_training_pattern dpcd_pattern = {0}; const uint32_t dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); /* Set training not in progress*/ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; core_link_write_dpcd( link, dpcd_base_lt_offset, &dpcd_pattern.raw, 1); DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", __func__, offset, dpcd_base_lt_offset, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) { uint8_t sink_status = 0; uint8_t i; /* clear training pattern set */ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); if (encoding == DP_128b_132b_ENCODING) { /* poll for intra-hop disable */ for (i = 0; i < 10; i++) { if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) break; fsleep(1000); } } } enum dc_status dpcd_configure_channel_coding(struct dc_link *link, struct link_training_settings *lt_settings) { enum dp_link_encoding encoding = link_dp_get_encoding_format( &lt_settings->link_settings); enum dc_status status; status = core_link_write_dpcd( link, DP_MAIN_LINK_CHANNEL_CODING_SET, (uint8_t *) &encoding, 1); DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n", __func__, DP_MAIN_LINK_CHANNEL_CODING_SET, encoding); return status; } void dpcd_set_training_pattern( struct dc_link *link, enum dc_dp_training_pattern training_pattern) { union dpcd_training_pattern dpcd_pattern = {0}; dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dp_training_pattern_to_dpcd_training_pattern( link, training_pattern); core_link_write_dpcd( link, DP_TRAINING_PATTERN_SET, &dpcd_pattern.raw, 1); DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", __func__, DP_TRAINING_PATTERN_SET, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } enum dc_status dpcd_set_link_settings( struct dc_link *link, const struct link_training_settings *lt_settings) { uint8_t rate; enum dc_status status; union down_spread_ctrl downspread = {0}; union lane_count_set lane_count_set = {0}; downspread.raw = (uint8_t) (lt_settings->link_settings.link_spread); lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; if (link->ep_type == DISPLAY_ENDPOINT_PHY && lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; } status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &downspread.raw, sizeof(downspread)); status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && lt_settings->link_settings.use_link_rate_set == true) { rate = 0; /* WA for some MUX chips that will power down with eDP and lose supported * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure * MUX chip gets link rate set back before link training. */ if (link->connector_signal == SIGNAL_TYPE_EDP) { uint8_t supported_link_rates[16]; core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, supported_link_rates, sizeof(supported_link_rates)); } status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); status = core_link_write_dpcd(link, DP_LINK_RATE_SET, &lt_settings->link_settings.link_rate_set, 1); } else { rate = get_dpcd_link_rate(&lt_settings->link_settings); status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); } if (rate) { DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", __func__, DP_LINK_BW_SET, lt_settings->link_settings.link_rate, DP_LANE_COUNT_SET, lt_settings->link_settings.lane_count, lt_settings->enhanced_framing, DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); } else { DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", __func__, DP_LINK_RATE_SET, lt_settings->link_settings.link_rate_set, DP_LANE_COUNT_SET, lt_settings->link_settings.lane_count, lt_settings->enhanced_framing, DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); } return status; } enum dc_status dpcd_set_lane_settings( struct dc_link *link, const struct link_training_settings *link_training_setting, uint32_t offset) { unsigned int lane0_set_address; enum dc_status status; lane0_set_address = DP_TRAINING_LANE0_SET; if (is_repeater(link_training_setting, offset)) lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); status = core_link_write_dpcd(link, lane0_set_address, (uint8_t *)(link_training_setting->dpcd_lane_settings), link_training_setting->link_settings.lane_count); if (is_repeater(link_training_setting, offset)) { DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, offset, lane0_set_address, link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, lane0_set_address, link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } return status; } void dpcd_set_lt_pattern_and_lane_settings( struct dc_link *link, const struct link_training_settings *lt_settings, enum dc_dp_training_pattern pattern, uint32_t offset) { uint32_t dpcd_base_lt_offset; uint8_t dpcd_lt_buffer[5] = {0}; union dpcd_training_pattern dpcd_pattern = {0}; uint32_t size_in_bytes; bool edp_workaround = false; /* TODO link_prop.INTERNAL */ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; if (is_repeater(lt_settings, offset)) dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); /***************************************************************** * DpcdAddress_TrainingPatternSet *****************************************************************/ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dp_training_pattern_to_dpcd_training_pattern(link, pattern); dpcd_pattern.v1_4.SCRAMBLING_DISABLE = dp_initialize_scrambling_data_symbols(link, pattern); dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; if (is_repeater(lt_settings, offset)) { DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", __func__, offset, dpcd_base_lt_offset, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } else { DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", __func__, dpcd_base_lt_offset, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } /* concatenate everything into one buffer*/ size_in_bytes = lt_settings->link_settings.lane_count * sizeof(lt_settings->dpcd_lane_settings[0]); // 0x00103 - 0x00102 memmove( &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], lt_settings->dpcd_lane_settings, size_in_bytes); if (is_repeater(lt_settings, offset)) { if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" " 0x%X TX_FFE_PRESET_VALUE = %x\n", __func__, offset, dpcd_base_lt_offset, lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, offset, dpcd_base_lt_offset, lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", __func__, dpcd_base_lt_offset, lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, dpcd_base_lt_offset, lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } if (edp_workaround) { /* for eDP write in 2 parts because the 5-byte burst is * causing issues on some eDP panels (EPR#366724) */ core_link_write_dpcd( link, DP_TRAINING_PATTERN_SET, &dpcd_pattern.raw, sizeof(dpcd_pattern.raw)); core_link_write_dpcd( link, DP_TRAINING_LANE0_SET, (uint8_t *)(lt_settings->dpcd_lane_settings), size_in_bytes); } else if (link_dp_get_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) { core_link_write_dpcd( link, dpcd_base_lt_offset, dpcd_lt_buffer, sizeof(dpcd_lt_buffer)); } else /* write it all in (1 + number-of-lanes)-byte burst*/ core_link_write_dpcd( link, dpcd_base_lt_offset, dpcd_lt_buffer, size_in_bytes + sizeof(dpcd_pattern.raw)); } void start_clock_recovery_pattern_early(struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t offset) { DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", __func__); dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); dp_set_hw_lane_settings(link, link_res, lt_settings, offset); udelay(400); } void dp_set_hw_test_pattern( struct dc_link *link, const struct link_resource *link_res, enum dp_test_pattern test_pattern, uint8_t *custom_pattern, uint32_t custom_pattern_size) { const struct link_hwss *link_hwss = get_link_hwss(link, link_res); struct encoder_set_dp_phy_pattern_param pattern_param = {0}; pattern_param.dp_phy_pattern = test_pattern; pattern_param.custom_pattern = custom_pattern; pattern_param.custom_pattern_size = custom_pattern_size; pattern_param.dp_panel_mode = dp_get_panel_mode(link); if (link_hwss->ext.set_dp_link_test_pattern) link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); } bool dp_set_hw_training_pattern( struct dc_link *link, const struct link_resource *link_res, enum dc_dp_training_pattern pattern, uint32_t offset) { enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; switch (pattern) { case DP_TRAINING_PATTERN_SEQUENCE_1: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; break; case DP_TRAINING_PATTERN_SEQUENCE_2: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; break; case DP_TRAINING_PATTERN_SEQUENCE_3: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; break; case DP_TRAINING_PATTERN_SEQUENCE_4: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; case DP_128b_132b_TPS1: test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; break; case DP_128b_132b_TPS2: test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; break; default: break; } dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); return true; } static bool perform_post_lt_adj_req_sequence( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; uint32_t adj_req_count; uint32_t adj_req_timer; bool req_drv_setting_changed; uint32_t lane; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; req_drv_setting_changed = false; for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; adj_req_count++) { req_drv_setting_changed = false; for (adj_req_timer = 0; adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; adj_req_timer++) { dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); if (dpcd_lane_status_updated.bits. POST_LT_ADJ_REQ_IN_PROGRESS == 0) return true; if (!dp_is_cr_done(lane_count, dpcd_lane_status)) return false; if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) || !dp_is_symbol_locked(lane_count, dpcd_lane_status) || !dp_is_interlane_aligned(dpcd_lane_status_updated)) return false; for (lane = 0; lane < (uint32_t)(lane_count); lane++) { if (lt_settings-> dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { req_drv_setting_changed = true; break; } } if (req_drv_setting_changed) { dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); dp_set_drive_settings(link, link_res, lt_settings); break; } msleep(1); } if (!req_drv_setting_changed) { DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n", __func__); ASSERT(0); return true; } } DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n", __func__); ASSERT(0); return true; } static enum link_training_result dp_transition_to_video_idle( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings, enum link_training_result status) { union lane_count_set lane_count_set = {0}; /* 4. mainlink output idle pattern*/ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); /* * 5. post training adjust if required * If the upstream DPTX and downstream DPRX both support TPS4, * TPS4 must be used instead of POST_LT_ADJ_REQ. */ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { /* delay 5ms after Main Link output idle pattern and then check * DPCD 0202h. */ if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { msleep(5); status = dp_check_link_loss_status(link, lt_settings); } return status; } if (status == LINK_TRAINING_SUCCESS && perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) status = LINK_TRAINING_LQA_FAIL; lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; core_link_write_dpcd( link, DP_LANE_COUNT_SET, &lane_count_set.raw, sizeof(lane_count_set)); return status; } enum link_training_result dp_perform_link_training( struct dc_link *link, const struct link_resource *link_res, const struct dc_link_settings *link_settings, bool skip_video_pattern) { enum link_training_result status = LINK_TRAINING_SUCCESS; struct link_training_settings lt_settings = {0}; enum dp_link_encoding encoding = link_dp_get_encoding_format(link_settings); /* decide training settings */ dp_decide_training_settings( link, link_settings, &lt_settings); override_training_settings( link, &link->preferred_training_settings, &lt_settings); /* reset previous training states */ dpcd_exit_training_mode(link, encoding); /* configure link prior to entering training mode */ dpcd_configure_lttpr_mode(link, &lt_settings); dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); dpcd_configure_channel_coding(link, &lt_settings); /* enter training mode: * Per DP specs starting from here, DPTX device shall not issue * Non-LT AUX transactions inside training mode. */ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING) if (link->dc->config.use_old_fixed_vs_sequence) status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, &lt_settings); else status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings); else if (encoding == DP_8b_10b_ENCODING) status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings); else if (encoding == DP_128b_132b_ENCODING) status = dp_perform_128b_132b_link_training(link, link_res, &lt_settings); else ASSERT(0); /* exit training mode */ dpcd_exit_training_mode(link, encoding); /* switch to video idle */ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) status = dp_transition_to_video_idle(link, link_res, &lt_settings, status); /* dump debug data */ dp_log_training_result(link, &lt_settings, status); if (status != LINK_TRAINING_SUCCESS) link->ctx->dc->debug_data.ltFailCount++; return status; } bool perform_link_training_with_retries( const struct dc_link_settings *link_setting, bool skip_video_pattern, int attempts, struct pipe_ctx *pipe_ctx, enum signal_type signal, bool do_fallback) { int j; uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; enum dp_panel_mode panel_mode = dp_get_panel_mode(link); enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; struct dc_link_settings cur_link_settings = *link_setting; struct dc_link_settings max_link_settings = *link_setting; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); int fail_count = 0; bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ bool is_link_bw_min = /* RBR x 1 */ (cur_link_settings.link_rate <= LINK_RATE_LOW) && (cur_link_settings.lane_count <= LANE_COUNT_ONE); dp_trace_commit_lt_init(link); if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) /* We need to do this before the link training to ensure the idle * pattern in SST mode will be sent right after the link training */ link_hwss->setup_stream_encoder(pipe_ctx); dp_trace_set_lt_start_timestamp(link, false); j = 0; while (j < attempts && fail_count < (attempts * 10)) { DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d) @ spread = %x\n", __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, cur_link_settings.lane_count, cur_link_settings.link_spread); dp_enable_link_phy( link, &pipe_ctx->link_res, signal, pipe_ctx->clock_source->id, &cur_link_settings); if (stream->sink_patches.dppowerup_delay > 0) { int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; msleep(delay_dp_power_up_in_ms); } if (panel_mode == DP_PANEL_MODE_EDP) { struct cp_psp *cp_psp = &stream->ctx->cp_psp; if (cp_psp && cp_psp->funcs.enable_assr) { /* ASSR is bound to fail with unsigned PSP * verstage used during devlopment phase. * Report and continue with eDP panel mode to * perform eDP link training with right settings */ bool result; result = cp_psp->funcs.enable_assr(cp_psp->handle, link); if (!result && link->panel_mode != DP_PANEL_MODE_EDP) panel_mode = DP_PANEL_MODE_DEFAULT; } } dp_set_panel_mode(link, panel_mode); if (link->aux_access_disabled) { dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); return true; } else { /** @todo Consolidate USB4 DP and DPx.x training. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { status = dpia_perform_link_training( link, &pipe_ctx->link_res, &cur_link_settings, skip_video_pattern); /* Transmit idle pattern once training successful. */ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); // Update verified link settings to current one // Because DPIA LT might fallback to lower link setting. if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); } } } else { status = dp_perform_link_training( link, &pipe_ctx->link_res, &cur_link_settings, skip_video_pattern); } dp_trace_lt_total_count_increment(link, false); dp_trace_lt_result_update(link, status, false); dp_trace_set_lt_end_timestamp(link, false); if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) return true; } fail_count++; dp_trace_lt_fail_count_update(link, fail_count, false); if (link->ep_type == DISPLAY_ENDPOINT_PHY) { /* latest link training still fail or link training is aborted * skip delay and keep PHY on */ if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT)) break; } if (j == (attempts - 1)) { DC_LOG_WARNING( "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, cur_link_settings.lane_count, cur_link_settings.link_spread, status); } else { DC_LOG_HW_LINK_TRAINING( "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, cur_link_settings.lane_count, cur_link_settings.link_spread, status); } dp_disable_link_phy(link, &pipe_ctx->link_res, signal); /* Abort link training if failure due to sink being unplugged. */ if (status == LINK_TRAINING_ABORT) { enum dc_connection_type type = dc_connection_none; link_detect_connection_type(link, &type); if (type == dc_connection_none) { DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); break; } } /* Try to train again at original settings if: * - not falling back between training attempts; * - aborted previous attempt due to reasons other than sink unplug; * - successfully trained but at a link rate lower than that required by stream; * - reached minimum link bandwidth. */ if (!do_fallback || (status == LINK_TRAINING_ABORT) || (status == LINK_TRAINING_SUCCESS && is_link_bw_low) || is_link_bw_min) { j++; cur_link_settings = *link_setting; delay_between_attempts += LINK_TRAINING_RETRY_DELAY; is_link_bw_low = false; is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) && (cur_link_settings.lane_count <= LANE_COUNT_ONE); } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ uint32_t req_bw; uint32_t link_bw; enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED; decide_fallback_link_setting(link, &max_link_settings, &cur_link_settings, status); if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) link_encoding = DC_LINK_ENCODING_DP_8b_10b; else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING) link_encoding = DC_LINK_ENCODING_DP_128b_132b; /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to * minimum link bandwidth. */ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding); link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings); is_link_bw_low = (req_bw > link_bw); is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && (cur_link_settings.lane_count <= LANE_COUNT_ONE)); if (is_link_bw_low) DC_LOG_WARNING( "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", __func__, link->link_index, req_bw, link_bw); } msleep(delay_between_attempts); } return false; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements 8b/10b link training specially modified to support an * embedded retimer chip. This retimer chip is referred as fixed vs pe retimer. * Unlike native dp connection this chip requires a modified link training * protocol based on 8b/10b link training. Since this is a non standard sequence * and we must support this hardware, we decided to isolate it in its own * training sequence inside its own file. */ #include "link_dp_training_fixed_vs_pe_retimer.h" #include "link_dp_training_8b_10b.h" #include "link_dpcd.h" #include "link_dp_phy.h" #include "link_dp_capability.h" #include "link_ddc.h" #define DC_LOGGER \ link->ctx->logger void dp_fixed_vs_pe_read_lane_adjust( struct dc_link *link, union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) { const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; uint8_t dprx_vs = 0; uint8_t dprx_pe = 0; uint8_t lane; /* W/A to read lane settings requested by DPRX */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); link_query_fixed_vs_pe_retimer(link->ddc, &dprx_vs, 1); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); link_query_fixed_vs_pe_retimer(link->ddc, &dprx_pe, 1); for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; } } void dp_fixed_vs_pe_set_retimer_lane_settings( struct dc_link *link, const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], uint8_t lane_count) { const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; uint8_t lane = 0; for (lane = 0; lane < lane_count; lane++) { vendor_lttpr_write_data_vs[3] |= dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); vendor_lttpr_write_data_pe[3] |= dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); } /* Force LTTPR to output desired VS and PE */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); } static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; uint8_t toggle_rate = 0x6; uint8_t target_rate = 0x6; bool apply_toggle_rate_wa = false; uint8_t repeater_cnt; uint8_t repeater_id; /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ if (lt_settings->cr_pattern_time < 16000) lt_settings->cr_pattern_time = 16000; /* Fixed VS/PE specific: Toggle link rate */ apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); target_rate = get_dpcd_link_rate(&lt_settings->link_settings); toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; if (apply_toggle_rate_wa) lt_settings->link_settings.link_rate = toggle_rate; if (link->ctx->dc->work_arounds.lt_early_cr_pattern) start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); /* 1. set link rate, lane count and spread. */ dpcd_set_link_settings(link, lt_settings); /* Fixed VS/PE specific: Toggle link rate back*/ if (apply_toggle_rate_wa) { core_link_write_dpcd( link, DP_LINK_BW_SET, &target_rate, 1); } link->vendor_specific_lttpr_link_rate_wa = target_rate; if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { /* 2. perform link training (set link training done * to false is done as well) */ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); repeater_id--) { status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); if (status != LINK_TRAINING_SUCCESS) { repeater_training_done(link, repeater_id); break; } status = perform_8b_10b_channel_equalization_sequence(link, link_res, lt_settings, repeater_id); repeater_training_done(link, repeater_id); if (status != LINK_TRAINING_SUCCESS) break; for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lt_settings->dpcd_lane_settings[lane].raw = 0; lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; } } } if (status == LINK_TRAINING_SUCCESS) { status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); if (status == LINK_TRAINING_SUCCESS) { status = perform_8b_10b_channel_equalization_sequence(link, link_res, lt_settings, DPRX); } } return status; } enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; const uint8_t offset = dp_parse_lttpr_repeater_count( link->dpcd_caps.lttpr_caps.phy_repeater_cnt); const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; union lane_count_set lane_count_set = {0}; uint8_t toggle_rate; uint8_t rate; /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING); if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); return status; } if (offset != 0xFF) { if (offset == 2) { pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ } else if (offset > 2) { pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; } } /* Vendor specific: Reset lane settings */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* Vendor specific: Enable intercept */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); /* 1. set link rate, lane count and spread. */ downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; } core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &downspread.raw, sizeof(downspread)); core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); rate = get_dpcd_link_rate(&lt_settings->link_settings); /* Vendor specific: Toggle link rate */ toggle_rate = (rate == 0x6) ? 0xA : 0x6; if (link->vendor_specific_lttpr_link_rate_wa == rate) { core_link_write_dpcd( link, DP_LINK_BW_SET, &toggle_rate, 1); } link->vendor_specific_lttpr_link_rate_wa = rate; core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", __func__, DP_LINK_BW_SET, lt_settings->link_settings.link_rate, DP_LANE_COUNT_SET, lt_settings->link_settings.lane_count, lt_settings->enhanced_framing, DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); } /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ if (status == LINK_TRAINING_SUCCESS) { const uint8_t max_vendor_dpcd_retries = 10; uint32_t retries_cr; uint32_t retry_count; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; uint8_t i = 0; retries_cr = 0; retry_count = 0; memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); memset(&dpcd_lane_status_updated, '\0', sizeof(dpcd_lane_status_updated)); while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { /* 1. call HWSS to set lane settings */ dp_set_hw_lane_settings( link, link_res, lt_settings, 0); /* 2. update DPCD of the receiver */ if (!retry_count) { /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration. */ dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, lt_settings->pattern_for_cr, 0); /* Vendor specific: Disable intercept */ for (i = 0; i < max_vendor_dpcd_retries; i++) { if (pre_disable_intercept_delay_ms != 0) msleep(pre_disable_intercept_delay_ms); if (link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_dis[0], sizeof(vendor_lttpr_write_data_intercept_dis))) break; link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); } } else { vendor_lttpr_write_data_vs[3] = 0; vendor_lttpr_write_data_pe[3] = 0; for (lane = 0; lane < lane_count; lane++) { vendor_lttpr_write_data_vs[3] |= lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); vendor_lttpr_write_data_pe[3] |= lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); } /* Vendor specific: Update VS and PE to DPRX requested value */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); dpcd_set_lane_settings( link, lt_settings, 0); } /* 3. wait receiver to lock-on*/ wait_time_microsec = lt_settings->cr_pattern_time; dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); /* 4. Read lane status and requested drive * settings as set by the sink */ dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, 0); /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { status = LINK_TRAINING_SUCCESS; break; } /* 6. max VS reached*/ if (dp_is_max_vs_reached(lt_settings)) break; /* 7. same lane settings */ /* Note: settings are the same for all lanes, * so comparing first lane is sufficient */ if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) retries_cr++; else retries_cr = 0; /* 8. update VS/PE/PC2 in lt_settings*/ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); retry_count++; } if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { ASSERT(0); DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", __func__, LINK_TRAINING_MAX_CR_RETRY); } status = dp_get_cr_failure(lane_count, dpcd_lane_status); } /* Perform Channel EQ Sequence */ if (status == LINK_TRAINING_SUCCESS) { enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; /* Note: also check that TPS4 is a supported feature*/ tr_pattern = lt_settings->pattern_for_eq; dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); status = LINK_TRAINING_EQ_FAIL_EQ; for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; retries_ch_eq++) { dp_set_hw_lane_settings(link, link_res, lt_settings, 0); vendor_lttpr_write_data_vs[3] = 0; vendor_lttpr_write_data_pe[3] = 0; for (lane = 0; lane < lane_count; lane++) { vendor_lttpr_write_data_vs[3] |= lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); vendor_lttpr_write_data_pe[3] |= lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); } /* Vendor specific: Update VS and PE to DPRX requested value */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* 2. update DPCD*/ if (!retries_ch_eq) /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration */ dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, tr_pattern, 0); else dpcd_set_lane_settings(link, lt_settings, 0); /* 3. wait for receiver to lock-on*/ wait_time_microsec = lt_settings->eq_pattern_time; dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); /* 4. Read lane status and requested * drive settings as set by the sink */ dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, 0); /* 5. check CR done*/ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { status = LINK_TRAINING_EQ_FAIL_CR; break; } /* 6. check CHEQ done*/ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && dp_is_symbol_locked(lane_count, dpcd_lane_status) && dp_is_interlane_aligned(dpcd_lane_status_updated)) { status = LINK_TRAINING_SUCCESS; break; } /* 7. update VS/PE/PC2 in lt_settings*/ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } } return status; } enum link_training_result dp_perform_fixed_vs_pe_training_sequence( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; const uint8_t offset = dp_parse_lttpr_repeater_count( link->dpcd_caps.lttpr_caps.phy_repeater_cnt); const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x6E}; const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E}; const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01}; const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68}; uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; union lane_count_set lane_count_set = {0}; uint8_t toggle_rate; uint8_t rate; /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING); if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); return status; } if (offset != 0xFF) { if (offset == 2) { pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ } else if (offset > 2) { pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; } } /* Vendor specific: Reset lane settings */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* Vendor specific: Enable intercept */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); /* 1. set link rate, lane count and spread. */ downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; } core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &downspread.raw, sizeof(downspread)); core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); rate = get_dpcd_link_rate(&lt_settings->link_settings); /* Vendor specific: Toggle link rate */ toggle_rate = (rate == 0x6) ? 0xA : 0x6; if (link->vendor_specific_lttpr_link_rate_wa == rate) { core_link_write_dpcd( link, DP_LINK_BW_SET, &toggle_rate, 1); } link->vendor_specific_lttpr_link_rate_wa = rate; core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", __func__, DP_LINK_BW_SET, lt_settings->link_settings.link_rate, DP_LANE_COUNT_SET, lt_settings->link_settings.lane_count, lt_settings->enhanced_framing, DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); } /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ if (status == LINK_TRAINING_SUCCESS) { const uint8_t max_vendor_dpcd_retries = 10; uint32_t retries_cr; uint32_t retry_count; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; uint8_t i = 0; retries_cr = 0; retry_count = 0; memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); memset(&dpcd_lane_status_updated, '\0', sizeof(dpcd_lane_status_updated)); while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { /* 1. call HWSS to set lane settings */ dp_set_hw_lane_settings( link, link_res, lt_settings, 0); /* 2. update DPCD of the receiver */ if (!retry_count) { /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration. */ dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, lt_settings->pattern_for_cr, 0); /* Vendor specific: Disable intercept */ for (i = 0; i < max_vendor_dpcd_retries; i++) { if (pre_disable_intercept_delay_ms != 0) msleep(pre_disable_intercept_delay_ms); if (link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_dis[0], sizeof(vendor_lttpr_write_data_intercept_dis))) break; link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); } } else { vendor_lttpr_write_data_vs[3] = 0; vendor_lttpr_write_data_pe[3] = 0; for (lane = 0; lane < lane_count; lane++) { vendor_lttpr_write_data_vs[3] |= lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); vendor_lttpr_write_data_pe[3] |= lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); } /* Vendor specific: Update VS and PE to DPRX requested value */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); dpcd_set_lane_settings( link, lt_settings, 0); } /* 3. wait receiver to lock-on*/ wait_time_microsec = lt_settings->cr_pattern_time; dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); /* 4. Read lane status and requested drive * settings as set by the sink */ dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, 0); /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { status = LINK_TRAINING_SUCCESS; break; } /* 6. max VS reached*/ if (dp_is_max_vs_reached(lt_settings)) break; /* 7. same lane settings */ /* Note: settings are the same for all lanes, * so comparing first lane is sufficient */ if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) retries_cr++; else retries_cr = 0; /* 8. update VS/PE/PC2 in lt_settings*/ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); retry_count++; } if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { ASSERT(0); DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", __func__, LINK_TRAINING_MAX_CR_RETRY); } status = dp_get_cr_failure(lane_count, dpcd_lane_status); } /* Perform Channel EQ Sequence */ if (status == LINK_TRAINING_SUCCESS) { enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_adicora_eq1[0], sizeof(vendor_lttpr_write_data_adicora_eq1)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_adicora_eq2[0], sizeof(vendor_lttpr_write_data_adicora_eq2)); /* Note: also check that TPS4 is a supported feature*/ tr_pattern = lt_settings->pattern_for_eq; dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); status = LINK_TRAINING_EQ_FAIL_EQ; for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; retries_ch_eq++) { dp_set_hw_lane_settings(link, link_res, lt_settings, 0); vendor_lttpr_write_data_vs[3] = 0; vendor_lttpr_write_data_pe[3] = 0; for (lane = 0; lane < lane_count; lane++) { vendor_lttpr_write_data_vs[3] |= lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); vendor_lttpr_write_data_pe[3] |= lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); } /* Vendor specific: Update VS and PE to DPRX requested value */ link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* 2. update DPCD*/ if (!retries_ch_eq) { /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration */ dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, tr_pattern, 0); link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_adicora_eq3[0], sizeof(vendor_lttpr_write_data_adicora_eq3)); } else dpcd_set_lane_settings(link, lt_settings, 0); /* 3. wait for receiver to lock-on*/ wait_time_microsec = lt_settings->eq_pattern_time; dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); /* 4. Read lane status and requested * drive settings as set by the sink */ dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, 0); /* 5. check CR done*/ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { status = LINK_TRAINING_EQ_FAIL_CR; break; } /* 6. check CHEQ done*/ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && dp_is_symbol_locked(lane_count, dpcd_lane_status) && dp_is_interlane_aligned(dpcd_lane_status_updated)) { status = LINK_TRAINING_SUCCESS; break; } /* 7. update VS/PE/PC2 in lt_settings*/ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } } return status; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This module implements functionality for training DPIA links. */ #include "link_dp_training_dpia.h" #include "dc.h" #include "inc/core_status.h" #include "dpcd_defs.h" #include "link_dp_dpia.h" #include "link_hwss.h" #include "dm_helpers.h" #include "dmub/inc/dmub_cmd.h" #include "link_dpcd.h" #include "link_dp_phy.h" #include "link_dp_training_8b_10b.h" #include "link_dp_capability.h" #include "dc_dmub_srv.h" #define DC_LOGGER \ link->ctx->logger /* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ #define DPIA_CLK_SYNC_DELAY 16000 /* Extend interval between training status checks for manual testing. */ #define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 #define TRAINING_AUX_RD_INTERVAL 100 //us /* SET_CONFIG message types sent by driver. */ enum dpia_set_config_type { DPIA_SET_CFG_SET_LINK = 0x01, DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, DPIA_SET_CFG_SET_TRAINING = 0x18, DPIA_SET_CFG_SET_VSPE = 0x19 }; /* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ enum dpia_set_config_ts { DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ DPIA_TS_TPS1 = 0x01, DPIA_TS_TPS2 = 0x02, DPIA_TS_TPS3 = 0x03, DPIA_TS_TPS4 = 0x07, DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ }; /* SET_CONFIG message data associated with messages sent by driver. */ union dpia_set_config_data { struct { uint8_t mode : 1; uint8_t reserved : 7; } set_link; struct { uint8_t stage; } set_training; struct { uint8_t swing : 2; uint8_t max_swing_reached : 1; uint8_t pre_emph : 2; uint8_t max_pre_emph_reached : 1; uint8_t reserved : 2; } set_vspe; uint8_t raw; }; /* Configure link as prescribed in link_setting; set LTTPR mode; and * Initialize link training settings. * Abort link training if sink unplug detected. * * @param link DPIA link being trained. * @param[in] link_setting Lane count, link rate and downspread control. * @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis). */ static enum link_training_result dpia_configure_link( struct dc_link *link, const struct link_resource *link_res, const struct dc_link_settings *link_setting, struct link_training_settings *lt_settings) { enum dc_status status; bool fec_enable; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, lt_settings->lttpr_mode); dp_decide_training_settings( link, link_setting, lt_settings); dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode); status = dpcd_configure_channel_coding(link, lt_settings); if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; /* Configure lttpr mode */ status = dpcd_configure_lttpr_mode(link, lt_settings); if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; /* Set link rate, lane count and spread. */ status = dpcd_set_link_settings(link, lt_settings); if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; status = dp_set_fec_ready(link, link_res, fec_enable); if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; return LINK_TRAINING_SUCCESS; } static enum dc_status core_link_send_set_config( struct dc_link *link, uint8_t msg_type, uint8_t msg_data) { struct set_config_cmd_payload payload; enum set_config_status set_config_result = SET_CONFIG_PENDING; /* prepare set_config payload */ payload.msg_type = msg_type; payload.msg_data = msg_data; if (!link->ddc->ddc_pin && !link->aux_access_disabled && (dm_helpers_dmub_set_config_sync(link->ctx, link, &payload, &set_config_result) == -1)) { return DC_ERROR_UNEXPECTED; } /* set_config should return ACK if successful */ return (set_config_result == SET_CONFIG_ACK_RECEIVED) ? DC_OK : DC_ERROR_UNEXPECTED; } /* Build SET_CONFIG message data payload for specified message type. */ static uint8_t dpia_build_set_config_data( enum dpia_set_config_type type, struct dc_link *link, struct link_training_settings *lt_settings) { union dpia_set_config_data data; data.raw = 0; switch (type) { case DPIA_SET_CFG_SET_LINK: data.set_link.mode = lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0; break; case DPIA_SET_CFG_SET_PHY_TEST_MODE: break; case DPIA_SET_CFG_SET_VSPE: /* Assume all lanes have same drive settings. */ data.set_vspe.swing = lt_settings->hw_lane_settings[0].VOLTAGE_SWING; data.set_vspe.pre_emph = lt_settings->hw_lane_settings[0].PRE_EMPHASIS; data.set_vspe.max_swing_reached = lt_settings->hw_lane_settings[0].VOLTAGE_SWING == VOLTAGE_SWING_MAX_LEVEL ? 1 : 0; data.set_vspe.max_pre_emph_reached = lt_settings->hw_lane_settings[0].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0; break; default: ASSERT(false); /* Message type not supported by helper function. */ break; } return data.raw; } /* Convert DC training pattern to DPIA training stage. */ static enum dc_status convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern tps, enum dpia_set_config_ts *ts) { enum dc_status status = DC_OK; switch (tps) { case DP_TRAINING_PATTERN_SEQUENCE_1: *ts = DPIA_TS_TPS1; break; case DP_TRAINING_PATTERN_SEQUENCE_2: *ts = DPIA_TS_TPS2; break; case DP_TRAINING_PATTERN_SEQUENCE_3: *ts = DPIA_TS_TPS3; break; case DP_TRAINING_PATTERN_SEQUENCE_4: *ts = DPIA_TS_TPS4; break; case DP_TRAINING_PATTERN_VIDEOIDLE: *ts = DPIA_TS_DPRX_DONE; break; default: /* TPS not supported by helper function. */ ASSERT(false); *ts = DPIA_TS_DPRX_DONE; status = DC_UNSUPPORTED_VALUE; break; } return status; } /* Write training pattern to DPCD. */ static enum dc_status dpcd_set_lt_pattern( struct dc_link *link, enum dc_dp_training_pattern pattern, uint32_t hop) { union dpcd_training_pattern dpcd_pattern = {0}; uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; enum dc_status status; if (hop != DPRX) dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); /* DpcdAddress_TrainingPatternSet */ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dp_training_pattern_to_dpcd_training_pattern(link, pattern); dpcd_pattern.v1_4.SCRAMBLING_DISABLE = dp_initialize_scrambling_data_symbols(link, pattern); if (hop != DPRX) { DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", __func__, hop, dpcd_tps_offset, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } else { DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", __func__, dpcd_tps_offset, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } status = core_link_write_dpcd( link, dpcd_tps_offset, &dpcd_pattern.raw, sizeof(dpcd_pattern.raw)); return status; } /* Execute clock recovery phase of link training for specified hop in display * path.in non-transparent mode: * - Driver issues both DPCD and SET_CONFIG transactions. * - TPS1 is transmitted for any hops downstream of DPOA. * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA. * - CR for the first hop (DPTX-to-DPIA) is assumed to be successful. * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_cr_non_transparent( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t hop) { enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ enum dc_status status; uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ uint32_t retry_count = 0; uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; uint8_t set_cfg_data; enum dpia_set_config_ts ts; repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. * Fix inherited from perform_clock_recovery_sequence() - * the DP equivalent of this function: * Required for Synaptics MST hub which can put the LT in * infinite loop by switching the VS between level 0 and level 1 * continuously. */ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { /* DPTX-to-DPIA */ if (hop == repeater_cnt) { /* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that * non-transparent link training has started. * This also enables the transmission of clk_sync packets. */ set_cfg_data = dpia_build_set_config_data( DPIA_SET_CFG_SET_LINK, link, lt_settings); status = core_link_send_set_config( link, DPIA_SET_CFG_SET_LINK, set_cfg_data); /* CR for this hop is considered successful as long as * SET_CONFIG message is acknowledged by DPOA. */ if (status == DC_OK) result = LINK_TRAINING_SUCCESS; else result = LINK_TRAINING_ABORT; break; } /* DPOA-to-x */ /* Instruct DPOA to transmit TPS1 then update DPCD. */ if (retry_count == 0) { status = convert_trng_ptn_to_trng_stg(lt_settings->pattern_for_cr, &ts); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } status = core_link_send_set_config( link, DPIA_SET_CFG_SET_TRAINING, ts); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } } /* Update DPOA drive settings then DPCD. DPOA does only adjusts * drive settings for hops immediately downstream. */ if (hop == repeater_cnt - 1) { set_cfg_data = dpia_build_set_config_data( DPIA_SET_CFG_SET_VSPE, link, lt_settings); status = core_link_send_set_config( link, DPIA_SET_CFG_SET_VSPE, set_cfg_data); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } } status = dpcd_set_lane_settings(link, lt_settings, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); /* Read status and adjustment requests from DPCD. */ status = dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } /* Check if clock recovery successful. */ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__); result = LINK_TRAINING_SUCCESS; break; } result = dp_get_cr_failure(lane_count, dpcd_lane_status); if (dp_is_max_vs_reached(lt_settings)) break; /* Count number of attempts with same drive settings. * Note: settings are the same for all lanes, * so comparing first lane is sufficient. */ if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET == dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE)) retries_cr++; else retries_cr = 0; /* Update VS/PE. */ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); retry_count++; } /* Abort link training if clock recovery failed due to HPD unplug. */ if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING( "%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n - status(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, hop, result, retry_count, status); return result; } /* Execute clock recovery phase of link training in transparent LTTPR mode: * - Driver only issues DPCD transactions and leaves USB4 tunneling (SET_CONFIG) messages to DPIA. * - Driver writes TPS1 to DPCD to kick off training. * - Clock recovery (CR) for link is handled by DPOA, which reports result to DPIA on completion. * - DPIA communicates result to driver by updating CR status when driver reads DPCD. * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). */ static enum link_training_result dpia_training_cr_transparent( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; enum dc_status status; uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ uint32_t retry_count = 0; uint32_t wait_time_microsec = lt_settings->cr_pattern_time; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. * Fix inherited from perform_clock_recovery_sequence() - * the DP equivalent of this function: * Required for Synaptics MST hub which can put the LT in * infinite loop by switching the VS between level 0 and level 1 * continuously. */ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { /* Write TPS1 (not VS or PE) to DPCD to start CR phase. * DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to * start link training. */ if (retry_count == 0) { status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, DPRX); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } } dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); /* Read status and adjustment requests from DPCD. */ status = dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } /* Check if clock recovery successful. */ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__); result = LINK_TRAINING_SUCCESS; break; } result = dp_get_cr_failure(lane_count, dpcd_lane_status); if (dp_is_max_vs_reached(lt_settings)) break; /* Count number of attempts with same drive settings. * Note: settings are the same for all lanes, * so comparing first lane is sufficient. */ if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET == dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE)) retries_cr++; else retries_cr = 0; /* Update VS/PE. */ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); retry_count++; } /* Abort link training if clock recovery failed due to HPD unplug. */ if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, DPRX, result, retry_count); return result; } /* Execute clock recovery phase of link training for specified hop in display * path. * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_cr_phase( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t hop) { enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop); else result = dpia_training_cr_transparent(link, link_res, lt_settings); return result; } /* Return status read interval during equalization phase. */ static uint32_t dpia_get_eq_aux_rd_interval( const struct dc_link *link, const struct link_training_settings *lt_settings, uint32_t hop) { uint32_t wait_time_microsec; if (hop == DPRX) wait_time_microsec = lt_settings->eq_pattern_time; else wait_time_microsec = dp_translate_training_aux_read_interval( link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]); /* Check debug option for extending aux read interval. */ if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval) wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US; return wait_time_microsec; } /* Execute equalization phase of link training for specified hop in display * path in non-transparent mode: * - driver issues both DPCD and SET_CONFIG transactions. * - TPSx is transmitted for any hops downstream of DPOA. * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA. * - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful. * - DPRX EQ only reported successful when both DPRX and DPIA requirements (clk sync packets sent) fulfilled. * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_non_transparent( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t hop) { enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ uint32_t retries_eq = 0; enum dc_status status; enum dc_dp_training_pattern tr_pattern; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; uint8_t set_cfg_data; enum dpia_set_config_ts ts; /* Training pattern is TPS4 for repeater; * TPS2/3/4 for DPRX depending on what it supports. */ if (hop == DPRX) tr_pattern = lt_settings->pattern_for_eq; else tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { /* DPTX-to-DPIA equalization always successful. */ if (hop == repeater_cnt) { result = LINK_TRAINING_SUCCESS; break; } /* Instruct DPOA to transmit TPSn then update DPCD. */ if (retries_eq == 0) { status = convert_trng_ptn_to_trng_stg(tr_pattern, &ts); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } status = core_link_send_set_config( link, DPIA_SET_CFG_SET_TRAINING, ts); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } status = dpcd_set_lt_pattern(link, tr_pattern, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } } /* Update DPOA drive settings then DPCD. DPOA only adjusts * drive settings for hop immediately downstream. */ if (hop == repeater_cnt - 1) { set_cfg_data = dpia_build_set_config_data( DPIA_SET_CFG_SET_VSPE, link, lt_settings); status = core_link_send_set_config( link, DPIA_SET_CFG_SET_VSPE, set_cfg_data); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } } status = dpcd_set_lane_settings(link, lt_settings, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } /* Extend wait time on second equalisation attempt on final hop to * ensure clock sync packets have been sent. */ if (hop == DPRX && retries_eq == 1) wait_time_microsec = max(wait_time_microsec, (uint32_t) DPIA_CLK_SYNC_DELAY); else wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop); dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); /* Read status and adjustment requests from DPCD. */ status = dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } /* CR can still fail during EQ phase. Fail training if CR fails. */ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { result = LINK_TRAINING_EQ_FAIL_CR; break; } if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && dp_is_interlane_aligned(dpcd_lane_status_updated)) { result = LINK_TRAINING_SUCCESS; break; } /* Update VS/PE. */ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } /* Abort link training if equalization failed due to HPD unplug. */ if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING( "%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n - status(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, hop, result, retries_eq, status); return result; } /* Execute equalization phase of link training for specified hop in display * path in transparent LTTPR mode: * - driver only issues DPCD transactions leaves USB4 tunneling (SET_CONFIG) messages to DPIA. * - driver writes TPSx to DPCD to notify DPIA that is in equalization phase. * - equalization (EQ) for link is handled by DPOA, which reports result to DPIA on completion. * - DPIA communicates result to driver by updating EQ status when driver reads DPCD. * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_transparent( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; uint32_t retries_eq = 0; enum dc_status status; enum dc_dp_training_pattern tr_pattern = lt_settings->pattern_for_eq; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX); for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { if (retries_eq == 0) { status = dpcd_set_lt_pattern(link, tr_pattern, DPRX); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } } dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); /* Read status and adjustment requests from DPCD. */ status = dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; } /* CR can still fail during EQ phase. Fail training if CR fails. */ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { result = LINK_TRAINING_EQ_FAIL_CR; break; } if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status)) { /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4 * has to share encoders unlike DP and USBC */ if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) { result = LINK_TRAINING_SUCCESS; break; } } /* Update VS/PE. */ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } /* Abort link training if equalization failed due to HPD unplug. */ if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, DPRX, result, retries_eq); return result; } /* Execute equalization phase of link training for specified hop in display * path. * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_phase( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t hop) { enum link_training_result result; if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop); else result = dpia_training_eq_transparent(link, link_res, lt_settings); return result; } /* End training of specified hop in display path. */ static enum dc_status dpcd_clear_lt_pattern( struct dc_link *link, uint32_t hop) { union dpcd_training_pattern dpcd_pattern = {0}; uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; enum dc_status status; if (hop != DPRX) dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); status = core_link_write_dpcd( link, dpcd_tps_offset, &dpcd_pattern.raw, sizeof(dpcd_pattern.raw)); return status; } /* End training of specified hop in display path. * * In transparent LTTPR mode: * - driver clears training pattern for the specified hop in DPCD. * In non-transparent LTTPR mode: * - in addition to clearing training pattern, driver issues USB4 tunneling * (SET_CONFIG) messages to notify DPOA when training is done for first hop * (DPTX-to-DPIA) and last hop (DPRX). * * @param link DPIA link being trained. * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_end( struct dc_link *link, struct link_training_settings *lt_settings, uint32_t hop) { enum link_training_result result = LINK_TRAINING_SUCCESS; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ enum dc_status status; if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); if (hop == repeater_cnt) { /* DPTX-to-DPIA */ /* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that * DPTX-to-DPIA hop trained. No DPCD write needed for first hop. */ status = core_link_send_set_config( link, DPIA_SET_CFG_SET_TRAINING, DPIA_TS_UFP_DONE); if (status != DC_OK) result = LINK_TRAINING_ABORT; } else { /* DPOA-to-x */ /* Write 0x0 to TRAINING_PATTERN_SET */ status = dpcd_clear_lt_pattern(link, hop); if (status != DC_OK) result = LINK_TRAINING_ABORT; } /* Notify DPOA that non-transparent link training of DPRX done. */ if (hop == DPRX && result != LINK_TRAINING_ABORT) { status = core_link_send_set_config( link, DPIA_SET_CFG_SET_TRAINING, DPIA_TS_DPRX_DONE); if (status != DC_OK) result = LINK_TRAINING_ABORT; } } else { /* non-LTTPR or transparent LTTPR. */ /* Write 0x0 to TRAINING_PATTERN_SET */ status = dpcd_clear_lt_pattern(link, hop); if (status != DC_OK) result = LINK_TRAINING_ABORT; } DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, hop, result, lt_settings->lttpr_mode); return result; } /* When aborting training of specified hop in display path, clean up by: * - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET. * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0. * * @param link DPIA link being trained. * @param hop Hop in display path. DPRX = 0. */ static void dpia_training_abort( struct dc_link *link, struct link_training_settings *lt_settings, uint32_t hop) { uint8_t data = 0; uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, lt_settings->lttpr_mode, link->is_hpd_pending); /* Abandon clean-up if sink unplugged. */ if (link->is_hpd_pending) return; if (hop != DPRX) dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); core_link_write_dpcd(link, dpcd_tps_offset, &data, 1); core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1); core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1); core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data); } enum link_training_result dpia_perform_link_training( struct dc_link *link, const struct link_resource *link_res, const struct dc_link_settings *link_setting, bool skip_video_pattern) { enum link_training_result result; struct link_training_settings lt_settings = {0}; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ int8_t repeater_id; /* Current hop. */ struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings); /* Configure link as prescribed in link_setting and set LTTPR mode. */ result = dpia_configure_link(link, link_res, link_setting, &lt_settings); if (result != LINK_TRAINING_SUCCESS) return result; if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); /* Train each hop in turn starting with the one closest to DPTX. * In transparent or non-LTTPR mode, train only the final hop (DPRX). */ for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) { /* Clock recovery. */ result = dpia_training_cr_phase(link, link_res, &lt_settings, repeater_id); if (result != LINK_TRAINING_SUCCESS) break; /* Equalization. */ result = dpia_training_eq_phase(link, link_res, &lt_settings, repeater_id); if (result != LINK_TRAINING_SUCCESS) break; /* Stop training hop. */ result = dpia_training_end(link, &lt_settings, repeater_id); if (result != LINK_TRAINING_SUCCESS) break; } /* Double-check link status if training successful; gracefully abort * training of current hop if training failed due to message tunneling * failure; end training of hop if training ended conventionally and * falling back to lower bandwidth settings possible. */ if (result == LINK_TRAINING_SUCCESS) { fsleep(5000); if (!link->is_automated) result = dp_check_link_loss_status(link, &lt_settings); } else if (result == LINK_TRAINING_ABORT) dpia_training_abort(link, &lt_settings, repeater_id); else dpia_training_end(link, &lt_settings, repeater_id); return result; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements dp 8b/10b link training software policies and * sequences. */ #include "link_dp_training_8b_10b.h" #include "link_dpcd.h" #include "link_dp_phy.h" #include "link_dp_capability.h" #define DC_LOGGER \ link->ctx->logger static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, const struct dc_link_settings *link_settings) { union training_aux_rd_interval training_rd_interval; uint32_t wait_in_micro_secs = 100; memset(&training_rd_interval, 0, sizeof(training_rd_interval)); if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { core_link_read_dpcd( link, DP_TRAINING_AUX_RD_INTERVAL, (uint8_t *)&training_rd_interval, sizeof(training_rd_interval)); if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; } return wait_in_micro_secs; } static uint32_t get_eq_training_aux_rd_interval( struct dc_link *link, const struct dc_link_settings *link_settings) { union training_aux_rd_interval training_rd_interval; memset(&training_rd_interval, 0, sizeof(training_rd_interval)); if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { core_link_read_dpcd( link, DP_128B132B_TRAINING_AUX_RD_INTERVAL, (uint8_t *)&training_rd_interval, sizeof(training_rd_interval)); } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { core_link_read_dpcd( link, DP_TRAINING_AUX_RD_INTERVAL, (uint8_t *)&training_rd_interval, sizeof(training_rd_interval)); } switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { case 0: return 400; case 1: return 4000; case 2: return 8000; case 3: return 12000; case 4: return 16000; case 5: return 32000; case 6: return 64000; default: return 400; } } void decide_8b_10b_training_settings( struct dc_link *link, const struct dc_link_settings *link_setting, struct link_training_settings *lt_settings) { memset(lt_settings, '\0', sizeof(struct link_training_settings)); /* Initialize link settings */ lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; lt_settings->link_settings.link_rate = link_setting->link_rate; lt_settings->link_settings.lane_count = link_setting->lane_count; /* TODO hard coded to SS for now * lt_settings.link_settings.link_spread = * dal_display_path_is_ss_supported( * path_mode->display_path) ? * LINK_SPREAD_05_DOWNSPREAD_30KHZ : * LINK_SPREAD_DISABLED; */ lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); lt_settings->enhanced_framing = 1; lt_settings->should_set_fec_ready = true; lt_settings->disallow_per_lane_settings = true; lt_settings->always_match_dpcd_with_hw_lane_settings = true; lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) { bool is_lttpr_present = dp_is_lttpr_present(link); bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable; bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware; if (!is_lttpr_present) return LTTPR_MODE_NON_LTTPR; if (vbios_lttpr_aware) { if (vbios_lttpr_force_non_transparent) { DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); return LTTPR_MODE_NON_TRANSPARENT; } else { DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); return LTTPR_MODE_TRANSPARENT; } } if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && link->dc->caps.extended_aux_timeout_support) { DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n"); return LTTPR_MODE_NON_TRANSPARENT; } DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n"); return LTTPR_MODE_NON_LTTPR; } enum link_training_result perform_8b_10b_clock_recovery_sequence( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t offset) { uint32_t retries_cr; uint32_t retry_count; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; retries_cr = 0; retry_count = 0; memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); memset(&dpcd_lane_status_updated, '\0', sizeof(dpcd_lane_status_updated)); if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); /* najeeb - The synaptics MST hub can put the LT in * infinite loop by switching the VS */ /* between level 0 and level 1 continuously, here * we try for CR lock for LinkTrainingMaxCRRetry count*/ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { /* 1. call HWSS to set lane settings*/ dp_set_hw_lane_settings( link, link_res, lt_settings, offset); /* 2. update DPCD of the receiver*/ if (!retry_count) /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration.*/ dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, lt_settings->pattern_for_cr, offset); else dpcd_set_lane_settings( link, lt_settings, offset); /* 3. wait receiver to lock-on*/ wait_time_microsec = lt_settings->cr_pattern_time; dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); /* 4. Read lane status and requested drive * settings as set by the sink */ dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, offset); /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__); return LINK_TRAINING_SUCCESS; } /* 6. max VS reached*/ if ((link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) && dp_is_max_vs_reached(lt_settings)) break; /* 7. same lane settings*/ /* Note: settings are the same for all lanes, * so comparing first lane is sufficient*/ if ((link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) && lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) retries_cr++; else if ((link_dp_get_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) && lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) retries_cr++; else retries_cr = 0; /* 8. update VS/PE/PC2 in lt_settings*/ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); retry_count++; } if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { ASSERT(0); DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", __func__, LINK_TRAINING_MAX_CR_RETRY); } return dp_get_cr_failure(lane_count, dpcd_lane_status); } enum link_training_result perform_8b_10b_channel_equalization_sequence( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t offset) { enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; /* Note: also check that TPS4 is a supported feature*/ tr_pattern = lt_settings->pattern_for_eq; if (is_repeater(lt_settings, offset) && link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; retries_ch_eq++) { dp_set_hw_lane_settings(link, link_res, lt_settings, offset); /* 2. update DPCD*/ if (!retries_ch_eq) /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration */ dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, tr_pattern, offset); else dpcd_set_lane_settings(link, lt_settings, offset); /* 3. wait for receiver to lock-on*/ wait_time_microsec = lt_settings->eq_pattern_time; if (is_repeater(lt_settings, offset)) wait_time_microsec = dp_translate_training_aux_read_interval( link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); /* 4. Read lane status and requested * drive settings as set by the sink*/ dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, offset); /* 5. check CR done*/ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) return dpcd_lane_status[0].bits.CR_DONE_0 ? LINK_TRAINING_EQ_FAIL_CR_PARTIAL : LINK_TRAINING_EQ_FAIL_CR; /* 6. check CHEQ done*/ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && dp_is_symbol_locked(lane_count, dpcd_lane_status) && dp_is_interlane_aligned(dpcd_lane_status_updated)) return LINK_TRAINING_SUCCESS; /* 7. update VS/PE/PC2 in lt_settings*/ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } return LINK_TRAINING_EQ_FAIL_EQ; } enum link_training_result dp_perform_8b_10b_link_training( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t repeater_cnt; uint8_t repeater_id; uint8_t lane = 0; if (link->ctx->dc->work_arounds.lt_early_cr_pattern) start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); /* 1. set link rate, lane count and spread. */ dpcd_set_link_settings(link, lt_settings); if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { /* 2. perform link training (set link training done * to false is done as well) */ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); repeater_id--) { status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); if (status != LINK_TRAINING_SUCCESS) { repeater_training_done(link, repeater_id); break; } status = perform_8b_10b_channel_equalization_sequence(link, link_res, lt_settings, repeater_id); if (status == LINK_TRAINING_SUCCESS) DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); repeater_training_done(link, repeater_id); if (status != LINK_TRAINING_SUCCESS) break; for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lt_settings->dpcd_lane_settings[lane].raw = 0; lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; } } } if (status == LINK_TRAINING_SUCCESS) { status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); if (status == LINK_TRAINING_SUCCESS) { status = perform_8b_10b_channel_equalization_sequence(link, link_res, lt_settings, DPRX); if (status == LINK_TRAINING_SUCCESS) DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); } } return status; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements dp 128b/132b link training software policies and * sequences. */ #include "link_dp_training_128b_132b.h" #include "link_dp_training_8b_10b.h" #include "link_dpcd.h" #include "link_dp_phy.h" #include "link_dp_capability.h" #define DC_LOGGER \ link->ctx->logger static enum dc_status dpcd_128b_132b_set_lane_settings( struct dc_link *link, const struct link_training_settings *link_training_setting) { enum dc_status status = core_link_write_dpcd(link, DP_TRAINING_LANE0_SET, (uint8_t *)(link_training_setting->dpcd_lane_settings), sizeof(link_training_setting->dpcd_lane_settings)); DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", __func__, DP_TRAINING_LANE0_SET, link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); return status; } static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, uint32_t *interval_in_us) { union dp_128b_132b_training_aux_rd_interval dpcd_interval; uint32_t interval_unit = 0; dpcd_interval.raw = 0; core_link_read_dpcd(link, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &dpcd_interval.raw, sizeof(dpcd_interval.raw)); interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * * INTERVAL_UNIT. The maximum is 256 ms */ *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; } static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { uint8_t loop_count; uint32_t aux_rd_interval = 0; uint32_t wait_time = 0; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; enum dc_status status = DC_OK; enum link_training_result result = LINK_TRAINING_SUCCESS; /* Transmit 128b/132b_TPS1 over Main-Link */ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); /* Set TRAINING_PATTERN_SET to 01h */ dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); /* Set loop counter to start from 1 */ loop_count = 1; /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, lt_settings->pattern_for_eq, DPRX); /* poll for channel EQ done */ while (result == LINK_TRAINING_SUCCESS) { dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); wait_time += aux_rd_interval; status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); if (status != DC_OK) { result = LINK_TRAINING_ABORT; } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, dpcd_lane_status)) { /* pass */ break; } else if (loop_count >= lt_settings->eq_loop_count_limit) { result = DP_128b_132b_MAX_LOOP_COUNT_REACHED; } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { result = DP_128b_132b_LT_FAILED; } else { dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); dpcd_128b_132b_set_lane_settings(link, lt_settings); } loop_count++; } /* poll for EQ interlane align done */ while (result == LINK_TRAINING_SUCCESS) { if (status != DC_OK) { result = LINK_TRAINING_ABORT; } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { /* pass */ break; } else if (wait_time >= lt_settings->eq_wait_time_limit) { result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { result = DP_128b_132b_LT_FAILED; } else { dp_wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time); wait_time += lt_settings->eq_pattern_time; status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); } } return result; } static enum link_training_result dp_perform_128b_132b_cds_done_sequence( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { /* Assumption: assume hardware has transmitted eq pattern */ enum dc_status status = DC_OK; enum link_training_result result = LINK_TRAINING_SUCCESS; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; uint32_t wait_time = 0; /* initiate CDS done sequence */ dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); /* poll for CDS interlane align done and symbol lock */ while (result == LINK_TRAINING_SUCCESS) { dp_wait_for_training_aux_rd_interval(link, lt_settings->cds_pattern_time); wait_time += lt_settings->cds_pattern_time; status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); if (status != DC_OK) { result = LINK_TRAINING_ABORT; } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { /* pass */ break; } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { result = DP_128b_132b_LT_FAILED; } else if (wait_time >= lt_settings->cds_wait_time_limit) { result = DP_128b_132b_CDS_DONE_TIMEOUT; } } return result; } enum link_training_result dp_perform_128b_132b_link_training( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result result = LINK_TRAINING_SUCCESS; /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ if (link->dc->debug.legacy_dp2_lt) { struct link_training_settings legacy_settings; decide_8b_10b_training_settings(link, &lt_settings->link_settings, &legacy_settings); return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); } dpcd_set_link_settings(link, lt_settings); if (result == LINK_TRAINING_SUCCESS) { result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); if (result == LINK_TRAINING_SUCCESS) DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); } if (result == LINK_TRAINING_SUCCESS) { result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); if (result == LINK_TRAINING_SUCCESS) DC_LOG_HW_LINK_TRAINING("%s: CDS done.\n", __func__); } return result; } void decide_128b_132b_training_settings(struct dc_link *link, const struct dc_link_settings *link_settings, struct link_training_settings *lt_settings) { memset(lt_settings, 0, sizeof(*lt_settings)); lt_settings->link_settings = *link_settings; /* TODO: should decide link spread when populating link_settings */ lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); lt_settings->eq_pattern_time = 2500; lt_settings->eq_wait_time_limit = 400000; lt_settings->eq_loop_count_limit = 20; lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; lt_settings->cds_pattern_time = 2500; lt_settings->cds_wait_time_limit = (dp_parse_lttpr_repeater_count( link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; lt_settings->disallow_per_lane_settings = true; lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link); dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link) { enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR; if (dp_is_lttpr_present(link)) mode = LTTPR_MODE_NON_TRANSPARENT; DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode); return mode; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * * This file implements basic dpcd read/write functionality. It also does basic * dpcd range check to ensure that every dpcd request is compliant with specs * range requirements. */ #include "link_dpcd.h" #include <drm/display/drm_dp_helper.h> #include "dm_helpers.h" #define END_ADDRESS(start, size) (start + size - 1) #define ADDRESS_RANGE_SIZE(start, end) (end - start + 1) struct dpcd_address_range { uint32_t start; uint32_t end; }; static enum dc_status internal_link_read_dpcd( struct dc_link *link, uint32_t address, uint8_t *data, uint32_t size) { if (!link->aux_access_disabled && !dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size)) { return DC_ERROR_UNEXPECTED; } return DC_OK; } static enum dc_status internal_link_write_dpcd( struct dc_link *link, uint32_t address, const uint8_t *data, uint32_t size) { if (!link->aux_access_disabled && !dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size)) { return DC_ERROR_UNEXPECTED; } return DC_OK; } /* * Partition the entire DPCD address space * XXX: This partitioning must cover the entire DPCD address space, * and must contain no gaps or overlapping address ranges. */ static const struct dpcd_address_range mandatory_dpcd_partitions[] = { { 0, DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1) - 1}, { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2) - 1 }, { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3) - 1 }, { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4) - 1 }, { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5) - 1 }, { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6) - 1 }, { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7) - 1 }, { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8) - 1 }, { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 }, /* * The FEC registers are contiguous */ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 }, { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2) - 1 }, { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3) - 1 }, { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4) - 1 }, { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5) - 1 }, { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6) - 1 }, { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7) - 1 }, { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR8), DP_LTTPR_MAX_ADD }, /* all remaining DPCD addresses */ { DP_LTTPR_MAX_ADD + 1, DP_DPCD_MAX_ADD } }; static inline bool do_addresses_intersect_with_range( const struct dpcd_address_range *range, const uint32_t start_address, const uint32_t end_address) { return start_address <= range->end && end_address >= range->start; } static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint32_t size) { const uint32_t end_address = END_ADDRESS(address, size); uint32_t partition_iterator = 0; /* * find current partition * this loop spins forever if partition map above is not surjective */ while (!do_addresses_intersect_with_range(&mandatory_dpcd_partitions[partition_iterator], address, end_address)) partition_iterator++; if (end_address < mandatory_dpcd_partitions[partition_iterator].end) return size; return ADDRESS_RANGE_SIZE(address, mandatory_dpcd_partitions[partition_iterator].end); } /* * Ranges of DPCD addresses that must be read in a single transaction * XXX: Do not allow any two address ranges in this array to overlap */ static const struct dpcd_address_range mandatory_dpcd_blocks[] = { { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }}; /* * extend addresses to read all mandatory blocks together */ static void dpcd_extend_address_range( const uint32_t in_address, uint8_t * const in_data, const uint32_t in_size, uint32_t *out_address, uint8_t **out_data, uint32_t *out_size) { const uint32_t end_address = END_ADDRESS(in_address, in_size); const struct dpcd_address_range *addr_range; struct dpcd_address_range new_addr_range; uint32_t i; new_addr_range.start = in_address; new_addr_range.end = end_address; for (i = 0; i < ARRAY_SIZE(mandatory_dpcd_blocks); i++) { addr_range = &mandatory_dpcd_blocks[i]; if (addr_range->start <= in_address && addr_range->end >= in_address) new_addr_range.start = addr_range->start; if (addr_range->start <= end_address && addr_range->end >= end_address) new_addr_range.end = addr_range->end; } *out_address = in_address; *out_size = in_size; *out_data = in_data; if (new_addr_range.start != in_address || new_addr_range.end != end_address) { *out_address = new_addr_range.start; *out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end); *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL); } } /* * Reduce the AUX reply down to the values the caller requested */ static void dpcd_reduce_address_range( const uint32_t extended_address, uint8_t * const extended_data, const uint32_t extended_size, const uint32_t reduced_address, uint8_t * const reduced_data, const uint32_t reduced_size) { const uint32_t offset = reduced_address - extended_address; /* * If the address is same, address was not extended. * So we do not need to free any memory. * The data is in original buffer(reduced_data). */ if (extended_data == reduced_data) return; memcpy(&extended_data[offset], reduced_data, reduced_size); kfree(extended_data); } enum dc_status core_link_read_dpcd( struct dc_link *link, uint32_t address, uint8_t *data, uint32_t size) { uint32_t extended_address; uint32_t partitioned_address; uint8_t *extended_data; uint32_t extended_size; /* size of the remaining partitioned address space */ uint32_t size_left_to_read; enum dc_status status; /* size of the next partition to be read from */ uint32_t partition_size; uint32_t data_index = 0; dpcd_extend_address_range(address, data, size, &extended_address, &extended_data, &extended_size); partitioned_address = extended_address; size_left_to_read = extended_size; while (size_left_to_read) { partition_size = dpcd_get_next_partition_size(partitioned_address, size_left_to_read); status = internal_link_read_dpcd(link, partitioned_address, &extended_data[data_index], partition_size); if (status != DC_OK) break; partitioned_address += partition_size; data_index += partition_size; size_left_to_read -= partition_size; } dpcd_reduce_address_range(extended_address, extended_data, extended_size, address, data, size); return status; } enum dc_status core_link_write_dpcd( struct dc_link *link, uint32_t address, const uint8_t *data, uint32_t size) { uint32_t partition_size; uint32_t data_index = 0; enum dc_status status; while (size) { partition_size = dpcd_get_next_partition_size(address, size); status = internal_link_write_dpcd(link, address, &data[data_index], partition_size); if (status != DC_OK) break; address += partition_size; data_index += partition_size; size -= partition_size; } return status; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements basic dp phy functionality such as enable/disable phy * output and set lane/drive settings. This file is responsible for maintaining * and update software state representing current phy status such as current * link settings. */ #include "link_dp_phy.h" #include "link_dpcd.h" #include "link_dp_training.h" #include "link_dp_capability.h" #include "clk_mgr.h" #include "resource.h" #include "link_enc_cfg.h" #define DC_LOGGER \ link->ctx->logger void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on) { uint8_t state; state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; if (link->sync_lt_in_progress) return; core_link_write_dpcd(link, DP_SET_POWER, &state, sizeof(state)); } void dp_enable_link_phy( struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { link->cur_link_settings = *link_settings; link->dc->hwss.enable_dp_link_output(link, link_res, signal, clock_source, link_settings); dpcd_write_rx_power_ctrl(link, true); } void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { struct dc *dc = link->ctx->dc; if (!link->wa_flags.dp_keep_receiver_powered) dpcd_write_rx_power_ctrl(link, false); dc->hwss.disable_link_output(link, link_res, signal); /* Clear current link setting.*/ memset(&link->cur_link_settings, 0, sizeof(link->cur_link_settings)); if (dc->clk_mgr->funcs->notify_link_rate_change) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); } static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) { return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); } void dp_set_hw_lane_settings( struct dc_link *link, const struct link_resource *link_res, const struct link_training_settings *link_settings, uint32_t offset) { const struct link_hwss *link_hwss = get_link_hwss(link, link_res); if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset)) return; if (link_hwss->ext.set_dp_lane_settings) link_hwss->ext.set_dp_lane_settings(link, link_res, &link_settings->link_settings, link_settings->hw_lane_settings); memmove(link->cur_lane_setting, link_settings->hw_lane_settings, sizeof(link->cur_lane_setting)); } void dp_set_drive_settings( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) { /* program ASIC PHY settings*/ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); /* Notify DP sink the PHY settings from source */ dpcd_set_lane_settings(link, lt_settings, DPRX); } enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready) { /* FEC has to be "set ready" before the link training. * The policy is to always train with FEC * if the sink supports it and leave it enabled on link. * If FEC is not supported, disable it. */ struct link_encoder *link_enc = NULL; enum dc_status status = DC_OK; uint8_t fec_config = 0; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (!dp_should_enable_fec(link)) return status; if (link_enc->funcs->fec_set_ready && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { if (ready) { fec_config = 1; status = core_link_write_dpcd(link, DP_FEC_CONFIGURATION, &fec_config, sizeof(fec_config)); if (status == DC_OK) { link_enc->funcs->fec_set_ready(link_enc, true); link->fec_state = dc_link_fec_ready; } else { link_enc->funcs->fec_set_ready(link_enc, false); link->fec_state = dc_link_fec_not_ready; dm_error("dpcd write failed to set fec_ready"); } } else if (link->fec_state == dc_link_fec_ready) { fec_config = 0; status = core_link_write_dpcd(link, DP_FEC_CONFIGURATION, &fec_config, sizeof(fec_config)); link_enc->funcs->fec_set_ready(link_enc, false); link->fec_state = dc_link_fec_not_ready; } } return status; } void dp_set_fec_enable(struct dc_link *link, bool enable) { struct link_encoder *link_enc = NULL; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (!dp_should_enable_fec(link)) return; if (link_enc->funcs->fec_set_enable && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { if (link->fec_state == dc_link_fec_ready && enable) { /* Accord to DP spec, FEC enable sequence can first * be transmitted anytime after 1000 LL codes have * been transmitted on the link after link training * completion. Using 1 lane RBR should have the maximum * time for transmitting 1000 LL codes which is 6.173 us. * So use 7 microseconds delay instead. */ udelay(7); link_enc->funcs->fec_set_enable(link_enc, true); link->fec_state = dc_link_fec_enabled; } else if (link->fec_state == dc_link_fec_enabled && !enable) { link_enc->funcs->fec_set_enable(link_enc, false); link->fec_state = dc_link_fec_ready; } } }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /*********************************************************************/ // USB4 DPIA BANDWIDTH ALLOCATION LOGIC /*********************************************************************/ #include "link_dp_dpia_bw.h" #include "link_dpcd.h" #include "dc_dmub_srv.h" #define DC_LOGGER \ link->ctx->logger #define Kbps_TO_Gbps (1000 * 1000) // ------------------------------------------------------------------ // PRIVATE FUNCTIONS // ------------------------------------------------------------------ /* * Always Check the following: * - Is it USB4 link? * - Is HPD HIGH? * - Is BW Allocation Support Mode enabled on DP-Tx? */ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp) { return (tmp && DISPLAY_ENDPOINT_USB4_DPIA == tmp->ep_type && tmp->hpd_status && tmp->dpia_bw_alloc_config.bw_alloc_enabled); } static void reset_bw_alloc_struct(struct dc_link *link) { link->dpia_bw_alloc_config.bw_alloc_enabled = false; link->dpia_bw_alloc_config.sink_verified_bw = 0; link->dpia_bw_alloc_config.sink_max_bw = 0; link->dpia_bw_alloc_config.estimated_bw = 0; link->dpia_bw_alloc_config.bw_granularity = 0; link->dpia_bw_alloc_config.response_ready = false; } static uint8_t get_bw_granularity(struct dc_link *link) { uint8_t bw_granularity = 0; core_link_read_dpcd( link, DP_BW_GRANULALITY, &bw_granularity, sizeof(uint8_t)); switch (bw_granularity & 0x3) { case 0: bw_granularity = 4; break; case 1: default: bw_granularity = 2; break; } return bw_granularity; } static int get_estimated_bw(struct dc_link *link) { uint8_t bw_estimated_bw = 0; core_link_read_dpcd( link, ESTIMATED_BW, &bw_estimated_bw, sizeof(uint8_t)); return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); } static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link) { if (bw_needed > 0) *stream_allocated_bw += bw_needed; return true; } static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link) { bool ret = false; if (*stream_allocated_bw > 0) { *stream_allocated_bw -= bw_to_dealloc; ret = true; } else { //Do nothing for now ret = true; } // Unplug so reset values if (!link->hpd_status) reset_bw_alloc_struct(link); return ret; } /* * Read all New BW alloc configuration ex: estimated_bw, allocated_bw, * granuality, Driver_ID, CM_Group, & populate the BW allocation structs * for host router and dpia */ static void init_usb4_bw_struct(struct dc_link *link) { // Init the known values link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link); link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); } static uint8_t get_lowest_dpia_index(struct dc_link *link) { const struct dc *dc_struct = link->dc; uint8_t idx = 0xFF; int i; for (i = 0; i < MAX_PIPES * 2; ++i) { if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) continue; if (idx > dc_struct->links[i]->link_index) idx = dc_struct->links[i]->link_index; } return idx; } /* * Get the Max Available BW or Max Estimated BW for each Host Router * * @link: pointer to the dc_link struct instance * @type: ESTIMATD BW or MAX AVAILABLE BW * * return: response_ready flag from dc_link struct */ static int get_host_router_total_bw(struct dc_link *link, uint8_t type) { const struct dc *dc_struct = link->dc; uint8_t lowest_dpia_index = get_lowest_dpia_index(link); uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0; struct dc_link *link_temp; int total_bw = 0; int i; for (i = 0; i < MAX_PIPES * 2; ++i) { if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) continue; link_temp = dc_struct->links[i]; if (!link_temp || !link_temp->hpd_status) continue; idx_temp = (link_temp->link_index - lowest_dpia_index) / 2; if (idx_temp == idx) { if (type == HOST_ROUTER_BW_ESTIMATED) total_bw += link_temp->dpia_bw_alloc_config.estimated_bw; else if (type == HOST_ROUTER_BW_ALLOCATED) total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw; } } return total_bw; } /* * Cleanup function for when the dpia is unplugged to reset struct * and perform any required clean up * * @link: pointer to the dc_link struct instance * * return: none */ static bool dpia_bw_alloc_unplug(struct dc_link *link) { if (!link) return true; return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, link->dpia_bw_alloc_config.sink_allocated_bw, link); } static void set_usb4_req_bw_req(struct dc_link *link, int req_bw) { uint8_t requested_bw; uint32_t temp; // 1. Add check for this corner case #1 if (req_bw > link->dpia_bw_alloc_config.estimated_bw) req_bw = link->dpia_bw_alloc_config.estimated_bw; temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; requested_bw = temp / Kbps_TO_Gbps; // Always make sure to add more to account for floating points if (temp % Kbps_TO_Gbps) ++requested_bw; // 2. Add check for this corner case #2 req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) return; if (core_link_write_dpcd( link, REQUESTED_BW, &requested_bw, sizeof(uint8_t)) == DC_OK) link->dpia_bw_alloc_config.response_ready = false; // Reset flag } /* * Return the response_ready flag from dc_link struct * * @link: pointer to the dc_link struct instance * * return: response_ready flag from dc_link struct */ static bool get_cm_response_ready_flag(struct dc_link *link) { return link->dpia_bw_alloc_config.response_ready; } // ------------------------------------------------------------------ // PUBLIC FUNCTIONS // ------------------------------------------------------------------ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link) { bool ret = false; uint8_t response = 0, bw_support_dpia = 0, bw_support_cm = 0; if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status)) goto out; if (core_link_read_dpcd( link, DP_TUNNELING_CAPABILITIES, &response, sizeof(uint8_t)) == DC_OK) bw_support_dpia = (response >> 7) & 1; if (core_link_read_dpcd( link, USB4_DRIVER_BW_CAPABILITY, &response, sizeof(uint8_t)) == DC_OK) bw_support_cm = (response >> 7) & 1; /* Send request acknowledgment to Turn ON DPTX support */ if (bw_support_cm && bw_support_dpia) { response = 0x80; if (core_link_write_dpcd( link, DPTX_BW_ALLOCATION_MODE_CONTROL, &response, sizeof(uint8_t)) != DC_OK) { DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n", __func__); } else { // SUCCESS Enabled DPtx BW Allocation Mode Support link->dpia_bw_alloc_config.bw_alloc_enabled = true; DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n", __func__); ret = true; init_usb4_bw_struct(link); } } out: return ret; } void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result) { int bw_needed = 0; int estimated = 0; int host_router_total_estimated_bw = 0; if (!get_bw_alloc_proceed_flag((link))) return; switch (result) { case DPIA_BW_REQ_FAILED: DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__); // Update the new Estimated BW value updated by CM link->dpia_bw_alloc_config.estimated_bw = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw); link->dpia_bw_alloc_config.response_ready = false; /* * If FAIL then it is either: * 1. Due to DP-Tx trying to allocate more than available i.e. it failed locally * => get estimated and allocate that * 2. Due to the fact that DP-Tx tried to allocated ESTIMATED BW and failed then * CM will have to update 0xE0023 with new ESTIMATED BW value. */ break; case DPIA_BW_REQ_SUCCESS: DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__); // 1. SUCCESS 1st time before any Pruning is done // 2. SUCCESS after prev. FAIL before any Pruning is done // 3. SUCCESS after Pruning is done but before enabling link bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); // 1. if (!link->dpia_bw_alloc_config.sink_allocated_bw) { allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link); link->dpia_bw_alloc_config.sink_verified_bw = link->dpia_bw_alloc_config.sink_allocated_bw; // SUCCESS from first attempt if (link->dpia_bw_alloc_config.sink_allocated_bw > link->dpia_bw_alloc_config.sink_max_bw) link->dpia_bw_alloc_config.sink_verified_bw = link->dpia_bw_alloc_config.sink_max_bw; } // 3. else if (link->dpia_bw_alloc_config.sink_allocated_bw) { // Find out how much do we need to de-alloc if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed) deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link); else allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link); } // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA // => check if estimated_bw changed link->dpia_bw_alloc_config.response_ready = true; break; case DPIA_EST_BW_CHANGED: DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__); estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED); // 1. If due to unplug of other sink if (estimated == host_router_total_estimated_bw) { // First update the estimated & max_bw fields if (link->dpia_bw_alloc_config.estimated_bw < estimated) link->dpia_bw_alloc_config.estimated_bw = estimated; } // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw else { // We lost estimated bw usually due to plug event of other dpia link->dpia_bw_alloc_config.estimated_bw = estimated; } break; case DPIA_BW_ALLOC_CAPS_CHANGED: DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__); link->dpia_bw_alloc_config.bw_alloc_enabled = false; break; } } int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw) { int ret = 0; uint8_t timeout = 10; if (!(link && DISPLAY_ENDPOINT_USB4_DPIA == link->ep_type && link->dpia_bw_alloc_config.bw_alloc_enabled)) goto out; //1. Hot Plug if (link->hpd_status && peak_bw > 0) { // If DP over USB4 then we need to check BW allocation link->dpia_bw_alloc_config.sink_max_bw = peak_bw; set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw); do { if (!(timeout > 0)) timeout--; else break; fsleep(10 * 1000); } while (!get_cm_response_ready_flag(link)); if (!timeout) ret = 0;// ERROR TIMEOUT waiting for response for allocating bw else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0) ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED); } //2. Cold Unplug else if (!link->hpd_status) dpia_bw_alloc_unplug(link); out: return ret; } int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw) { int ret = 0; uint8_t timeout = 10; if (!get_bw_alloc_proceed_flag(link)) goto out; /* * Sometimes stream uses same timing parameters as the already * allocated max sink bw so no need to re-alloc */ if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) { set_usb4_req_bw_req(link, req_bw); do { if (!(timeout > 0)) timeout--; else break; udelay(10 * 1000); } while (!get_cm_response_ready_flag(link)); if (!timeout) ret = 0;// ERROR TIMEOUT waiting for response for allocating bw else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0) ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED); } out: return ret; } bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias) { bool ret = true; int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }; uint8_t lowest_dpia_index = 0, dpia_index = 0; uint8_t i; if (!num_dpias || num_dpias > MAX_DPIA_NUM) return ret; //Get total Host Router BW & Validate against each Host Router max BW for (i = 0; i < num_dpias; ++i) { if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled) continue; lowest_dpia_index = get_lowest_dpia_index(link[i]); if (link[i]->link_index < lowest_dpia_index) continue; dpia_index = (link[i]->link_index - lowest_dpia_index) / 2; bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i]; if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) { ret = false; break; } } return ret; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements DP HPD short pulse handling sequence according to DP * specifications * */ #include "link_dp_irq_handler.h" #include "link_dpcd.h" #include "link_dp_training.h" #include "link_dp_capability.h" #include "link_edp_panel_control.h" #include "link/accessories/link_dp_trace.h" #include "link/link_dpms.h" #include "dm_helpers.h" #define DC_LOGGER_INIT(logger) bool dp_parse_link_loss_status( struct dc_link *link, union hpd_irq_data *hpd_irq_dpcd_data) { uint8_t irq_reg_rx_power_state = 0; enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; union lane_status lane_status; uint32_t lane; bool sink_status_changed; bool return_code; sink_status_changed = false; return_code = false; if (link->cur_link_settings.lane_count == 0) return return_code; /*1. Check that Link Status changed, before re-training.*/ /*parse lane status*/ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { /* check status of lanes 0,1 * changed DpcdAddress_Lane01Status (0x202) */ lane_status.raw = dp_get_nibble_at_index( &hpd_irq_dpcd_data->bytes.lane01_status.raw, lane); if (!lane_status.bits.CHANNEL_EQ_DONE_0 || !lane_status.bits.CR_DONE_0 || !lane_status.bits.SYMBOL_LOCKED_0) { /* if one of the channel equalization, clock * recovery or symbol lock is dropped * consider it as (link has been * dropped) dp sink status has changed */ sink_status_changed = true; break; } } /* Check interlane align.*/ if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b || !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b)) { sink_status_changed = true; } else if (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { sink_status_changed = true; } if (sink_status_changed) { DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); return_code = true; /*2. Check that we can handle interrupt: Not in FS DOS, * Not in "Display Timeout" state, Link is trained. */ dpcd_result = core_link_read_dpcd(link, DP_SET_POWER, &irq_reg_rx_power_state, sizeof(irq_reg_rx_power_state)); if (dpcd_result != DC_OK) { DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", __func__); } else { if (irq_reg_rx_power_state != DP_SET_POWER_D0) return_code = false; } } return return_code; } static bool handle_hpd_irq_psr_sink(struct dc_link *link) { union dpcd_psr_configuration psr_configuration; if (!link->psr_settings.psr_feature_enabled) return false; dm_helpers_dp_read_dpcd( link->ctx, link, 368,/*DpcdAddress_PSR_Enable_Cfg*/ &psr_configuration.raw, sizeof(psr_configuration.raw)); if (psr_configuration.bits.ENABLE) { unsigned char dpcdbuf[3] = {0}; union psr_error_status psr_error_status; union psr_sink_psr_status psr_sink_psr_status; dm_helpers_dp_read_dpcd( link->ctx, link, 0x2006, /*DpcdAddress_PSR_Error_Status*/ (unsigned char *) dpcdbuf, sizeof(dpcdbuf)); /*DPCD 2006h ERROR STATUS*/ psr_error_status.raw = dpcdbuf[0]; /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/ psr_sink_psr_status.raw = dpcdbuf[2]; if (psr_error_status.bits.LINK_CRC_ERROR || psr_error_status.bits.RFB_STORAGE_ERROR || psr_error_status.bits.VSC_SDP_ERROR) { bool allow_active; /* Acknowledge and clear error bits */ dm_helpers_dp_write_dpcd( link->ctx, link, 8198,/*DpcdAddress_PSR_Error_Status*/ &psr_error_status.raw, sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ if (link->psr_settings.psr_allow_active) { allow_active = false; edp_set_psr_allow_active(link, &allow_active, true, false, NULL); allow_active = true; edp_set_psr_allow_active(link, &allow_active, true, false, NULL); } return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){ /* No error is detect, PSR is active. * We should return with IRQ_HPD handled without * checking for loss of sync since PSR would have * powered down main link. */ return true; } } return false; } static bool handle_hpd_irq_replay_sink(struct dc_link *link) { union dpcd_replay_configuration replay_configuration; /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/ union psr_error_status replay_error_status; if (!link->replay_settings.replay_feature_enabled) return false; dm_helpers_dp_read_dpcd( link->ctx, link, DP_SINK_PR_REPLAY_STATUS, &replay_configuration.raw, sizeof(replay_configuration.raw)); dm_helpers_dp_read_dpcd( link->ctx, link, DP_PSR_ERROR_STATUS, &replay_error_status.raw, sizeof(replay_error_status.raw)); link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR = replay_error_status.bits.LINK_CRC_ERROR; link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR = replay_configuration.bits.DESYNC_ERROR_STATUS; link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR = replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS; if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR || link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR || link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) { bool allow_active; /* Acknowledge and clear configuration bits */ dm_helpers_dp_write_dpcd( link->ctx, link, DP_SINK_PR_REPLAY_STATUS, &replay_configuration.raw, sizeof(replay_configuration.raw)); /* Acknowledge and clear error bits */ dm_helpers_dp_write_dpcd( link->ctx, link, DP_PSR_ERROR_STATUS,/*DpcdAddress_REPLAY_Error_Status*/ &replay_error_status.raw, sizeof(replay_error_status.raw)); /* Replay error, disable and re-enable Replay */ if (link->replay_settings.replay_allow_active) { allow_active = false; edp_set_replay_allow_active(link, &allow_active, true, false, NULL); allow_active = true; edp_set_replay_allow_active(link, &allow_active, true, false, NULL); } } return true; } void dp_handle_link_loss(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; struct dc_state *state = link->dc->current_state; uint8_t count; int i; link_get_master_pipes_with_dpms_on(link, state, &count, pipes); for (i = 0; i < count; i++) link_set_dpms_off(pipes[i]); for (i = count - 1; i >= 0; i--) { // Always use max settings here for DP 1.4a LL Compliance CTS if (link->is_automated) { pipes[i]->link_config.dp_link_settings.lane_count = link->verified_link_cap.lane_count; pipes[i]->link_config.dp_link_settings.link_rate = link->verified_link_cap.link_rate; pipes[i]->link_config.dp_link_settings.link_spread = link->verified_link_cap.link_spread; } link_set_dpms_on(link->dc->current_state, pipes[i]); } } static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data) { enum dc_status retval; union lane_align_status_updated dpcd_lane_status_updated; retval = core_link_read_dpcd( link, DP_LANE_ALIGN_STATUS_UPDATED, &dpcd_lane_status_updated.raw, sizeof(union lane_align_status_updated)); if (retval == DC_OK) { irq_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b = dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b; irq_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b = dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b; } } enum dc_status dp_read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) { static enum dc_status retval; /* The HW reads 16 bytes from 200h on HPD, * but if we get an AUX_DEFER, the HW cannot retry * and this causes the CTS tests 4.3.2.1 - 3.2.4 to * fail, so we now explicitly read 6 bytes which is * the req from the above mentioned test cases. * * For DP 1.4 we need to read those from 2002h range. */ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) retval = core_link_read_dpcd( link, DP_SINK_COUNT, irq_data->raw, sizeof(union hpd_irq_data)); else { /* Read 14 bytes in a single read and then copy only the required fields. * This is more efficient than doing it in two separate AUX reads. */ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; retval = core_link_read_dpcd( link, DP_SINK_COUNT_ESI, tmp, sizeof(tmp)); if (retval != DC_OK) return retval; irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; /* * This display doesn't have correct values in DPCD200Eh. * Read and check DPCD204h instead. */ if (link->wa_flags.read_dpcd204h_on_irq_hpd) read_dpcd204h_on_irq_hpd(link, irq_data); } return retval; } /*************************Short Pulse IRQ***************************/ bool dp_should_allow_hpd_rx_irq(const struct dc_link *link) { /* * Don't handle RX IRQ unless one of following is met: * 1) The link is established (cur_link_settings != unknown) * 2) We know we're dealing with a branch device, SST or MST */ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || is_dp_branch_device(link)) return true; return false; } bool dp_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, bool defer_handling, bool *has_left_work) { union hpd_irq_data hpd_irq_dpcd_data = {0}; union device_service_irq device_service_clear = {0}; enum dc_status result; bool status = false; if (out_link_loss) *out_link_loss = false; if (has_left_work) *has_left_work = false; /* For use cases related to down stream connection status change, * PSR and device auto test, refer to function handle_sst_hpd_irq * in DAL2.1*/ DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n", __func__, link->link_index); /* All the "handle_hpd_irq_xxx()" methods * should be called only after * dal_dpsst_ls_read_hpd_irq_data * Order of calls is important too */ result = dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data); if (out_hpd_irq_dpcd_data) *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data; if (result != DC_OK) { DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n", __func__); return false; } if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC link->is_automated = true; device_service_clear.bits.AUTOMATED_TEST = 1; core_link_write_dpcd( link, DP_DEVICE_SERVICE_IRQ_VECTOR, &device_service_clear.raw, sizeof(device_service_clear.raw)); device_service_clear.raw = 0; if (defer_handling && has_left_work) *has_left_work = true; else dc_link_dp_handle_automated_test(link); return false; } if (!dp_should_allow_hpd_rx_irq(link)) { DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", __func__, link->link_index); return false; } if (handle_hpd_irq_psr_sink(link)) /* PSR-related error was detected and handled */ return true; if (handle_hpd_irq_replay_sink(link)) /* Replay-related error was detected and handled */ return true; /* If PSR-related error handled, Main link may be off, * so do not handle as a normal sink status change interrupt. */ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { if (defer_handling && has_left_work) *has_left_work = true; return true; } /* check if we have MST msg and return since we poll for it */ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { if (defer_handling && has_left_work) *has_left_work = true; return false; } /* For now we only handle 'Downstream port status' case. * If we got sink count changed it means * Downstream port status changed, * then DM should call DC to do the detection. * NOTE: Do not handle link loss on eDP since it is internal link*/ if ((link->connector_signal != SIGNAL_TYPE_EDP) && dp_parse_link_loss_status( link, &hpd_irq_dpcd_data)) { /* Connectivity log: link loss */ CONN_DATA_LINK_LOSS(link, hpd_irq_dpcd_data.raw, sizeof(hpd_irq_dpcd_data), "Status: "); if (defer_handling && has_left_work) *has_left_work = true; else dp_handle_link_loss(link); status = false; if (out_link_loss) *out_link_loss = true; dp_trace_link_loss_increment(link); } if (link->type == dc_connection_sst_branch && hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT != link->dpcd_sink_count) status = true; /* reasons for HPD RX: * 1. Link Loss - ie Re-train the Link * 2. MST sideband message * 3. Automated Test - ie. Internal Commit * 4. CP (copy protection) - (not interesting for DM???) * 5. DRR * 6. Downstream Port status changed * -ie. Detect - this the only one * which is interesting for DM because * it must call dc_link_detect. */ return status; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * * This file implements functions that manage basic HPD components such as gpio. * It also provides wrapper functions to execute HPD related programming. This * file only manages basic HPD functionality. It doesn't manage detection or * feature or signal specific HPD behaviors. */ #include "link_hpd.h" #include "gpio_service_interface.h" bool link_get_hpd_state(struct dc_link *link) { uint32_t state; dal_gpio_lock_pin(link->hpd_gpio); dal_gpio_get_value(link->hpd_gpio, &state); dal_gpio_unlock_pin(link->hpd_gpio); return state; } void link_enable_hpd(const struct dc_link *link) { struct link_encoder *encoder = link->link_enc; if (encoder != NULL && encoder->funcs->enable_hpd != NULL) encoder->funcs->enable_hpd(encoder); } void link_disable_hpd(const struct dc_link *link) { struct link_encoder *encoder = link->link_enc; if (encoder != NULL && encoder->funcs->enable_hpd != NULL) encoder->funcs->disable_hpd(encoder); } void link_enable_hpd_filter(struct dc_link *link, bool enable) { struct gpio *hpd; if (enable) { link->is_hpd_filter_disabled = false; program_hpd_filter(link); } else { link->is_hpd_filter_disabled = true; /* Obtain HPD handle */ hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (!hpd) return; /* Setup HPD filtering */ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { struct gpio_hpd_config config; config.delay_on_connect = 0; config.delay_on_disconnect = 0; dal_irq_setup_hpd_filter(hpd, &config); dal_gpio_close(hpd); } else { ASSERT_CRITICAL(false); } /* Release HPD handle */ dal_gpio_destroy_irq(&hpd); } } struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, struct graphics_object_id link_id, struct gpio_service *gpio_service) { enum bp_result bp_result; struct graphics_object_hpd_info hpd_info; struct gpio_pin_info pin_info; if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK) return NULL; bp_result = dcb->funcs->get_gpio_pin_info(dcb, hpd_info.hpd_int_gpio_uid, &pin_info); if (bp_result != BP_RESULT_OK) { ASSERT(bp_result == BP_RESULT_NORECORD); return NULL; } return dal_gpio_service_create_irq(gpio_service, pin_info.offset, pin_info.mask); } bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high) { struct gpio *hpd_pin = link_get_hpd_gpio( link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (!hpd_pin) return false; dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); dal_gpio_get_value(hpd_pin, is_hpd_high); dal_gpio_close(hpd_pin); dal_gpio_destroy_irq(&hpd_pin); return true; } enum hpd_source_id get_hpd_line(struct dc_link *link) { struct gpio *hpd; enum hpd_source_id hpd_id; hpd_id = HPD_SOURCEID_UNKNOWN; hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (hpd) { switch (dal_irq_get_source(hpd)) { case DC_IRQ_SOURCE_HPD1: hpd_id = HPD_SOURCEID1; break; case DC_IRQ_SOURCE_HPD2: hpd_id = HPD_SOURCEID2; break; case DC_IRQ_SOURCE_HPD3: hpd_id = HPD_SOURCEID3; break; case DC_IRQ_SOURCE_HPD4: hpd_id = HPD_SOURCEID4; break; case DC_IRQ_SOURCE_HPD5: hpd_id = HPD_SOURCEID5; break; case DC_IRQ_SOURCE_HPD6: hpd_id = HPD_SOURCEID6; break; default: BREAK_TO_DEBUGGER(); break; } dal_gpio_destroy_irq(&hpd); } return hpd_id; } bool program_hpd_filter(const struct dc_link *link) { bool result = false; struct gpio *hpd; int delay_on_connect_in_ms = 0; int delay_on_disconnect_in_ms = 0; if (link->is_hpd_filter_disabled) return false; /* Verify feature is supported */ switch (link->connector_signal) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_HDMI_TYPE_A: /* Program hpd filter */ delay_on_connect_in_ms = 500; delay_on_disconnect_in_ms = 100; break; case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: /* Program hpd filter to allow DP signal to settle */ /* 500: not able to detect MST <-> SST switch as HPD is low for * only 100ms on DELL U2413 * 0: some passive dongle still show aux mode instead of i2c * 20-50: not enough to hide bouncing HPD with passive dongle. * also see intermittent i2c read issues. */ delay_on_connect_in_ms = 80; delay_on_disconnect_in_ms = 0; break; case SIGNAL_TYPE_LVDS: case SIGNAL_TYPE_EDP: default: /* Don't program hpd filter */ return false; } /* Obtain HPD handle */ hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (!hpd) return result; /* Setup HPD filtering */ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { struct gpio_hpd_config config; config.delay_on_connect = delay_on_connect_in_ms; config.delay_on_disconnect = delay_on_disconnect_in_ms; dal_irq_setup_hpd_filter(hpd, &config); dal_gpio_close(hpd); result = true; } else { ASSERT_CRITICAL(false); } /* Release HPD handle */ dal_gpio_destroy_irq(&hpd); return result; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file implements dp specific link capability retrieval sequence. It is * responsible for retrieving, parsing, overriding, deciding capability obtained * from dp link. Link capability consists of encoders, DPRXs, cables, retimers, * usb and all other possible backend capabilities. Other components should * include this header file in order to access link capability. Accessing link * capability by dereferencing dc_link outside dp_link_capability is not a * recommended method as it makes the component dependent on the underlying data * structure used to represent link capability instead of function interfaces. */ #include "link_dp_capability.h" #include "link_ddc.h" #include "link_dpcd.h" #include "link_dp_dpia.h" #include "link_dp_phy.h" #include "link_edp_panel_control.h" #include "link_dp_irq_handler.h" #include "link/accessories/link_dp_trace.h" #include "link/link_detection.h" #include "link/link_validation.h" #include "link_dp_training.h" #include "atomfirmware.h" #include "resource.h" #include "link_enc_cfg.h" #include "dc_dmub_srv.h" #include "gpio_service_interface.h" #define DC_LOGGER \ link->ctx->logger #define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) #endif #ifndef MIN #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif struct dp_lt_fallback_entry { enum dc_lane_count lane_count; enum dc_link_rate link_rate; }; static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { /* This link training fallback array is ordered by * link bandwidth from highest to lowest. * DP specs makes it a normative policy to always * choose the next highest link bandwidth during * link training fallback. */ {LANE_COUNT_FOUR, LINK_RATE_UHBR20}, {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, {LANE_COUNT_TWO, LINK_RATE_UHBR20}, {LANE_COUNT_FOUR, LINK_RATE_UHBR10}, {LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, {LANE_COUNT_FOUR, LINK_RATE_HIGH3}, {LANE_COUNT_ONE, LINK_RATE_UHBR20}, {LANE_COUNT_TWO, LINK_RATE_UHBR10}, {LANE_COUNT_FOUR, LINK_RATE_HIGH2}, {LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, {LANE_COUNT_TWO, LINK_RATE_HIGH3}, {LANE_COUNT_ONE, LINK_RATE_UHBR10}, {LANE_COUNT_TWO, LINK_RATE_HIGH2}, {LANE_COUNT_FOUR, LINK_RATE_HIGH}, {LANE_COUNT_ONE, LINK_RATE_HIGH3}, {LANE_COUNT_FOUR, LINK_RATE_LOW}, {LANE_COUNT_ONE, LINK_RATE_HIGH2}, {LANE_COUNT_TWO, LINK_RATE_HIGH}, {LANE_COUNT_TWO, LINK_RATE_LOW}, {LANE_COUNT_ONE, LINK_RATE_HIGH}, {LANE_COUNT_ONE, LINK_RATE_LOW}, }; static const struct dc_link_settings fail_safe_link_settings = { .lane_count = LANE_COUNT_ONE, .link_rate = LINK_RATE_LOW, .link_spread = LINK_SPREAD_DISABLED, }; bool is_dp_active_dongle(const struct dc_link *link) { return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); } bool is_dp_branch_device(const struct dc_link *link) { return link->dpcd_caps.is_branch_dev; } static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) { switch (bpc) { case DOWN_STREAM_MAX_8BPC: return 8; case DOWN_STREAM_MAX_10BPC: return 10; case DOWN_STREAM_MAX_12BPC: return 12; case DOWN_STREAM_MAX_16BPC: return 16; default: break; } return -1; } uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count) { switch (lttpr_repeater_count) { case 0x80: // 1 lttpr repeater return 1; case 0x40: // 2 lttpr repeaters return 2; case 0x20: // 3 lttpr repeaters return 3; case 0x10: // 4 lttpr repeaters return 4; case 0x08: // 5 lttpr repeaters return 5; case 0x04: // 6 lttpr repeaters return 6; case 0x02: // 7 lttpr repeaters return 7; case 0x01: // 8 lttpr repeaters return 8; default: break; } return 0; // invalid value } uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) { switch (bw) { case 0b001: return 9000000; case 0b010: return 18000000; case 0b011: return 24000000; case 0b100: return 32000000; case 0b101: return 40000000; case 0b110: return 48000000; } return 0; } static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) { enum dc_link_rate link_rate; // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. switch (link_rate_in_khz) { case 1620000: link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane break; case 2160000: link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane break; case 2430000: link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane break; case 2700000: link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane break; case 3240000: link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2)- 3.24 Gbps/Lane break; case 4320000: link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane break; case 5400000: link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2)- 5.40 Gbps/Lane break; case 6750000: link_rate = LINK_RATE_RATE_8; // Rate_8 - 6.75 Gbps/Lane break; case 8100000: link_rate = LINK_RATE_HIGH3; // Rate_9 (HBR3)- 8.10 Gbps/Lane break; default: link_rate = LINK_RATE_UNKNOWN; break; } return link_rate; } static union dp_cable_id intersect_cable_id( union dp_cable_id *a, union dp_cable_id *b) { union dp_cable_id out; out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, b->bits.UHBR10_20_CAPABILITY); out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, b->bits.UHBR13_5_CAPABILITY); out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); return out; } /* * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. */ static uint32_t intersect_frl_link_bw_support( const uint32_t max_supported_frl_bw_in_kbps, const union hdmi_encoded_link_bw hdmi_encoded_link_bw) { uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) if (hdmi_encoded_link_bw.bits.FRL_MODE) { if (hdmi_encoded_link_bw.bits.BW_48Gbps) supported_bw_in_kbps = 48000000; else if (hdmi_encoded_link_bw.bits.BW_40Gbps) supported_bw_in_kbps = 40000000; else if (hdmi_encoded_link_bw.bits.BW_32Gbps) supported_bw_in_kbps = 32000000; else if (hdmi_encoded_link_bw.bits.BW_24Gbps) supported_bw_in_kbps = 24000000; else if (hdmi_encoded_link_bw.bits.BW_18Gbps) supported_bw_in_kbps = 18000000; else if (hdmi_encoded_link_bw.bits.BW_9Gbps) supported_bw_in_kbps = 9000000; } return supported_bw_in_kbps; } static enum clock_source_id get_clock_source_id(struct dc_link *link) { enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED; struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; if (dp_cs != NULL) { dp_cs_id = dp_cs->id; } else { /* * dp clock source is not initialized for some reason. * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used */ ASSERT(dp_cs); } return dp_cs_id; } static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, int length) { int retry = 0; if (!link->dpcd_caps.dpcd_rev.raw) { do { dpcd_write_rx_power_ctrl(link, true); core_link_read_dpcd(link, DP_DPCD_REV, dpcd_data, length); link->dpcd_caps.dpcd_rev.raw = dpcd_data[ DP_DPCD_REV - DP_DPCD_REV]; } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); } if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { switch (link->dpcd_caps.branch_dev_id) { /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down * all internal circuits including AUX communication preventing * reading DPCD table and EDID (spec violation). * Encoder will skip DP RX power down on disable_output to * keep receiver powered all the time.*/ case DP_BRANCH_DEVICE_ID_0010FA: case DP_BRANCH_DEVICE_ID_0080E1: case DP_BRANCH_DEVICE_ID_00E04C: link->wa_flags.dp_keep_receiver_powered = true; break; /* TODO: May need work around for other dongles. */ default: link->wa_flags.dp_keep_receiver_powered = false; break; } } else link->wa_flags.dp_keep_receiver_powered = false; } bool dp_is_fec_supported(const struct dc_link *link) { /* TODO - use asic cap instead of link_enc->features * we no longer know which link enc to use for this link before commit */ struct link_encoder *link_enc = NULL; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); return (dc_is_dp_signal(link->connector_signal) && link_enc && link_enc->features.fec_supported && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE); } bool dp_should_enable_fec(const struct dc_link *link) { bool force_disable = false; if (link->fec_state == dc_link_fec_enabled) force_disable = false; else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && link->local_sink && link->local_sink->edid_caps.panel_patch.disable_fec) force_disable = true; else if (link->connector_signal == SIGNAL_TYPE_EDP && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. dsc_support.DSC_SUPPORT == false || link->panel_config.dsc.disable_dsc_edp || !link->dc->caps.edp_dsc_support)) force_disable = true; return !force_disable && dp_is_fec_supported(link); } bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx) { /* If this assert is hit then we have a link encoder dynamic management issue */ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); return (pipe_ctx->stream_res.hpo_dp_stream_enc && pipe_ctx->link_res.hpo_dp_link_enc && dc_is_dp_signal(pipe_ctx->stream->signal)); } bool dp_is_lttpr_present(struct dc_link *link) { return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); } /* in DP compliance test, DPR-120 may have * a random value in its MAX_LINK_BW dpcd field. * We map it to the maximum supported link rate that * is smaller than MAX_LINK_BW in this case. */ static enum dc_link_rate get_link_rate_from_max_link_bw( uint8_t max_link_bw) { enum dc_link_rate link_rate; if (max_link_bw >= LINK_RATE_HIGH3) { link_rate = LINK_RATE_HIGH3; } else if (max_link_bw < LINK_RATE_HIGH3 && max_link_bw >= LINK_RATE_HIGH2) { link_rate = LINK_RATE_HIGH2; } else if (max_link_bw < LINK_RATE_HIGH2 && max_link_bw >= LINK_RATE_HIGH) { link_rate = LINK_RATE_HIGH; } else if (max_link_bw < LINK_RATE_HIGH && max_link_bw >= LINK_RATE_LOW) { link_rate = LINK_RATE_LOW; } else { link_rate = LINK_RATE_UNKNOWN; } return link_rate; } static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) { enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) lttpr_max_link_rate = LINK_RATE_UHBR20; else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) lttpr_max_link_rate = LINK_RATE_UHBR13_5; else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) lttpr_max_link_rate = LINK_RATE_UHBR10; return lttpr_max_link_rate; } static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) { enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) cable_max_link_rate = LINK_RATE_UHBR20; else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) cable_max_link_rate = LINK_RATE_UHBR13_5; else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) cable_max_link_rate = LINK_RATE_UHBR10; return cable_max_link_rate; } static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) { return lane_count <= LANE_COUNT_ONE; } static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate) { return link_rate <= LINK_RATE_LOW; } static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) { switch (lane_count) { case LANE_COUNT_FOUR: return LANE_COUNT_TWO; case LANE_COUNT_TWO: return LANE_COUNT_ONE; case LANE_COUNT_ONE: return LANE_COUNT_UNKNOWN; default: return LANE_COUNT_UNKNOWN; } } static enum dc_link_rate reduce_link_rate(const struct dc_link *link, enum dc_link_rate link_rate) { // NEEDSWORK: provide some details about why this function never returns some of the // obscure link rates such as 4.32 Gbps or 3.24 Gbps and if such behavior is intended. // switch (link_rate) { case LINK_RATE_UHBR20: return LINK_RATE_UHBR13_5; case LINK_RATE_UHBR13_5: return LINK_RATE_UHBR10; case LINK_RATE_UHBR10: return LINK_RATE_HIGH3; case LINK_RATE_HIGH3: if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->debug.support_eDP1_5) return LINK_RATE_RATE_8; return LINK_RATE_HIGH2; case LINK_RATE_RATE_8: return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: return LINK_RATE_HIGH; case LINK_RATE_RATE_6: case LINK_RATE_RBR2: return LINK_RATE_HIGH; case LINK_RATE_HIGH: return LINK_RATE_LOW; case LINK_RATE_RATE_3: case LINK_RATE_RATE_2: return LINK_RATE_LOW; case LINK_RATE_LOW: default: return LINK_RATE_UNKNOWN; } } static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) { switch (lane_count) { case LANE_COUNT_ONE: return LANE_COUNT_TWO; case LANE_COUNT_TWO: return LANE_COUNT_FOUR; default: return LANE_COUNT_UNKNOWN; } } static enum dc_link_rate increase_link_rate(struct dc_link *link, enum dc_link_rate link_rate) { switch (link_rate) { case LINK_RATE_LOW: return LINK_RATE_HIGH; case LINK_RATE_HIGH: return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: return LINK_RATE_HIGH3; case LINK_RATE_HIGH3: return LINK_RATE_UHBR10; case LINK_RATE_UHBR10: /* upto DP2.x specs UHBR13.5 is the only link rate that could be * not supported by DPRX when higher link rate is supported. * so we treat it as a special case for code simplicity. When we * have new specs with more link rates like this, we should * consider a more generic solution to handle discrete link * rate capabilities. */ return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; case LINK_RATE_UHBR13_5: return LINK_RATE_UHBR20; default: return LINK_RATE_UNKNOWN; } } static bool decide_fallback_link_setting_max_bw_policy( struct dc_link *link, const struct dc_link_settings *max, struct dc_link_settings *cur, enum link_training_result training_result) { uint8_t cur_idx = 0, next_idx; bool found = false; if (training_result == LINK_TRAINING_ABORT) return false; while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) /* find current index */ if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) break; else cur_idx++; next_idx = cur_idx + 1; while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) /* find next index */ if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || dp_lt_fallbacks[next_idx].link_rate > max->link_rate) next_idx++; else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) /* upto DP2.x specs UHBR13.5 is the only link rate that * could be not supported by DPRX when higher link rate * is supported. so we treat it as a special case for * code simplicity. When we have new specs with more * link rates like this, we should consider a more * generic solution to handle discrete link rate * capabilities. */ next_idx++; else break; if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; found = true; } return found; } /* * function: set link rate and lane count fallback based * on current link setting and last link training result * return value: * true - link setting could be set * false - has reached minimum setting * and no further fallback could be done */ bool decide_fallback_link_setting( struct dc_link *link, struct dc_link_settings *max, struct dc_link_settings *cur, enum link_training_result training_result) { if (link_dp_get_encoding_format(max) == DP_128b_132b_ENCODING || link->dc->debug.force_dp2_lt_fallback_method) return decide_fallback_link_setting_max_bw_policy(link, max, cur, training_result); switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: case LINK_TRAINING_CR_FAIL_LANE1: case LINK_TRAINING_CR_FAIL_LANE23: case LINK_TRAINING_LQA_FAIL: { if (!reached_minimum_link_rate(cur->link_rate)) { cur->link_rate = reduce_link_rate(link, cur->link_rate); } else if (!reached_minimum_lane_count(cur->lane_count)) { cur->link_rate = max->link_rate; if (training_result == LINK_TRAINING_CR_FAIL_LANE0) return false; else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) cur->lane_count = LANE_COUNT_ONE; else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) cur->lane_count = LANE_COUNT_TWO; else cur->lane_count = reduce_lane_count(cur->lane_count); } else { return false; } break; } case LINK_TRAINING_EQ_FAIL_EQ: case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: { if (!reached_minimum_lane_count(cur->lane_count)) { cur->lane_count = reduce_lane_count(cur->lane_count); } else if (!reached_minimum_link_rate(cur->link_rate)) { cur->link_rate = reduce_link_rate(link, cur->link_rate); /* Reduce max link rate to avoid potential infinite loop. * Needed so that any subsequent CR_FAIL fallback can't * re-set the link rate higher than the link rate from * the latest EQ_FAIL fallback. */ max->link_rate = cur->link_rate; cur->lane_count = max->lane_count; } else { return false; } break; } case LINK_TRAINING_EQ_FAIL_CR: { if (!reached_minimum_link_rate(cur->link_rate)) { cur->link_rate = reduce_link_rate(link, cur->link_rate); /* Reduce max link rate to avoid potential infinite loop. * Needed so that any subsequent CR_FAIL fallback can't * re-set the link rate higher than the link rate from * the latest EQ_FAIL fallback. */ max->link_rate = cur->link_rate; cur->lane_count = max->lane_count; } else { return false; } break; } default: return false; } return true; } static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) { struct dc_link_settings initial_link_setting = { LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0}; struct dc_link_settings current_link_setting = initial_link_setting; uint32_t link_bw; if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap)) return false; /* search for the minimum link setting that: * 1. is supported according to the link training result * 2. could support the b/w requested by the timing */ while (current_link_setting.link_rate <= link->verified_link_cap.link_rate) { link_bw = dp_link_bandwidth_kbps( link, &current_link_setting); if (req_bw <= link_bw) { *link_setting = current_link_setting; return true; } if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { current_link_setting.lane_count = increase_lane_count( current_link_setting.lane_count); } else { current_link_setting.link_rate = increase_link_rate(link, current_link_setting.link_rate); current_link_setting.lane_count = initial_link_setting.lane_count; } } return false; } bool edp_decide_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) { struct dc_link_settings initial_link_setting; struct dc_link_settings current_link_setting; uint32_t link_bw; /* * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" */ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || link->dpcd_caps.edp_supported_link_rates_count == 0) { *link_setting = link->verified_link_cap; return true; } memset(&initial_link_setting, 0, sizeof(initial_link_setting)); initial_link_setting.lane_count = LANE_COUNT_ONE; initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; initial_link_setting.link_spread = LINK_SPREAD_DISABLED; initial_link_setting.use_link_rate_set = true; initial_link_setting.link_rate_set = 0; current_link_setting = initial_link_setting; /* search for the minimum link setting that: * 1. is supported according to the link training result * 2. could support the b/w requested by the timing */ while (current_link_setting.link_rate <= link->verified_link_cap.link_rate) { link_bw = dp_link_bandwidth_kbps( link, &current_link_setting); if (req_bw <= link_bw) { *link_setting = current_link_setting; return true; } if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { current_link_setting.lane_count = increase_lane_count( current_link_setting.lane_count); } else { if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { current_link_setting.link_rate_set++; current_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; current_link_setting.lane_count = initial_link_setting.lane_count; } else break; } } return false; } bool decide_edp_link_settings_with_dsc(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw, enum dc_link_rate max_link_rate) { struct dc_link_settings initial_link_setting; struct dc_link_settings current_link_setting; uint32_t link_bw; unsigned int policy = 0; policy = link->panel_config.dsc.force_dsc_edp_policy; if (max_link_rate == LINK_RATE_UNKNOWN) max_link_rate = link->verified_link_cap.link_rate; /* * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" */ if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || link->dpcd_caps.edp_supported_link_rates_count == 0)) { /* for DSC enabled case, we search for minimum lane count */ memset(&initial_link_setting, 0, sizeof(initial_link_setting)); initial_link_setting.lane_count = LANE_COUNT_ONE; initial_link_setting.link_rate = LINK_RATE_LOW; initial_link_setting.link_spread = LINK_SPREAD_DISABLED; initial_link_setting.use_link_rate_set = false; initial_link_setting.link_rate_set = 0; current_link_setting = initial_link_setting; if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap)) return false; /* search for the minimum link setting that: * 1. is supported according to the link training result * 2. could support the b/w requested by the timing */ while (current_link_setting.link_rate <= max_link_rate) { link_bw = dp_link_bandwidth_kbps( link, &current_link_setting); if (req_bw <= link_bw) { *link_setting = current_link_setting; return true; } if (policy) { /* minimize lane */ if (current_link_setting.link_rate < max_link_rate) { current_link_setting.link_rate = increase_link_rate(link, current_link_setting.link_rate); } else { if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { current_link_setting.lane_count = increase_lane_count( current_link_setting.lane_count); current_link_setting.link_rate = initial_link_setting.link_rate; } else break; } } else { /* minimize link rate */ if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { current_link_setting.lane_count = increase_lane_count( current_link_setting.lane_count); } else { current_link_setting.link_rate = increase_link_rate(link, current_link_setting.link_rate); current_link_setting.lane_count = initial_link_setting.lane_count; } } } return false; } /* if optimize edp link is supported */ memset(&initial_link_setting, 0, sizeof(initial_link_setting)); initial_link_setting.lane_count = LANE_COUNT_ONE; initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; initial_link_setting.link_spread = LINK_SPREAD_DISABLED; initial_link_setting.use_link_rate_set = true; initial_link_setting.link_rate_set = 0; current_link_setting = initial_link_setting; /* search for the minimum link setting that: * 1. is supported according to the link training result * 2. could support the b/w requested by the timing */ while (current_link_setting.link_rate <= max_link_rate) { link_bw = dp_link_bandwidth_kbps( link, &current_link_setting); if (req_bw <= link_bw) { *link_setting = current_link_setting; return true; } if (policy) { /* minimize lane */ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count && current_link_setting.link_rate < max_link_rate) { current_link_setting.link_rate_set++; current_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; } else { if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { current_link_setting.lane_count = increase_lane_count( current_link_setting.lane_count); current_link_setting.link_rate_set = initial_link_setting.link_rate_set; current_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; } else break; } } else { /* minimize link rate */ if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { current_link_setting.lane_count = increase_lane_count( current_link_setting.lane_count); } else { if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { current_link_setting.link_rate_set++; current_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; current_link_setting.lane_count = initial_link_setting.lane_count; } else break; } } } return false; } static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) { *link_setting = link->verified_link_cap; return true; } bool link_decide_link_settings(struct dc_stream_state *stream, struct dc_link_settings *link_setting) { struct dc_link *link = stream->link; uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link)); memset(link_setting, 0, sizeof(*link_setting)); /* if preferred is specified through AMDDP, use it, if it's enough * to drive the mode */ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) { *link_setting = link->preferred_link_setting; return true; } /* MST doesn't perform link training for now * TODO: add MST specific link training routine */ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { decide_mst_link_settings(link, link_setting); } else if (link->connector_signal == SIGNAL_TYPE_EDP) { /* enable edp link optimization for DSC eDP case */ if (stream->timing.flags.DSC) { enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; if (link->panel_config.dsc.force_dsc_edp_policy) { /* calculate link max link rate cap*/ struct dc_link_settings tmp_link_setting; struct dc_crtc_timing tmp_timing = stream->timing; uint32_t orig_req_bw; tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; tmp_timing.flags.DSC = 0; orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing, dc_link_get_highest_encoding_format(link)); edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw); max_link_rate = tmp_link_setting.link_rate; } decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate); } else { edp_decide_link_settings(link, link_setting, req_bw); } } else { decide_dp_link_settings(link, link_setting, req_bw); } return link_setting->lane_count != LANE_COUNT_UNKNOWN && link_setting->link_rate != LINK_RATE_UNKNOWN; } enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings *link_settings) { if ((link_settings->link_rate >= LINK_RATE_LOW) && (link_settings->link_rate <= LINK_RATE_HIGH3)) return DP_8b_10b_ENCODING; else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && (link_settings->link_rate <= LINK_RATE_UHBR20)) return DP_128b_132b_ENCODING; return DP_UNKNOWN_ENCODING; } enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link) { struct dc_link_settings link_settings = {0}; if (!dc_is_dp_signal(link->connector_signal)) return DP_UNKNOWN_ENCODING; if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) { link_settings = link->preferred_link_setting; } else { decide_mst_link_settings(link, &link_settings); } return link_dp_get_encoding_format(&link_settings); } static void read_dp_device_vendor_id(struct dc_link *link) { struct dp_device_vendor_id dp_id; /* read IEEE branch device id */ core_link_read_dpcd( link, DP_BRANCH_OUI, (uint8_t *)&dp_id, sizeof(dp_id)); link->dpcd_caps.branch_dev_id = (dp_id.ieee_oui[0] << 16) + (dp_id.ieee_oui[1] << 8) + dp_id.ieee_oui[2]; memmove( link->dpcd_caps.branch_dev_name, dp_id.ieee_device_id, sizeof(dp_id.ieee_device_id)); } static enum dc_status wake_up_aux_channel(struct dc_link *link) { enum dc_status status = DC_ERROR_UNEXPECTED; uint32_t aux_channel_retry_cnt = 0; uint8_t dpcd_power_state = '\0'; while (status != DC_OK && aux_channel_retry_cnt < 10) { status = core_link_read_dpcd(link, DP_SET_POWER, &dpcd_power_state, sizeof(dpcd_power_state)); /* Delay 1 ms if AUX CH is in power down state. Based on spec * section 2.3.1.2, if AUX CH may be powered down due to * write to DPCD 600h = 2. Sink AUX CH is monitoring differential * signal and may need up to 1 ms before being able to reply. */ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) { fsleep(1000); aux_channel_retry_cnt++; } } if (status != DC_OK) { dpcd_power_state = DP_SET_POWER_D0; status = core_link_write_dpcd( link, DP_SET_POWER, &dpcd_power_state, sizeof(dpcd_power_state)); dpcd_power_state = DP_SET_POWER_D3; status = core_link_write_dpcd( link, DP_SET_POWER, &dpcd_power_state, sizeof(dpcd_power_state)); DC_LOG_DC("%s: Failed to power up sink\n", __func__); return DC_ERROR_UNEXPECTED; } return DC_OK; } static void get_active_converter_info( uint8_t data, struct dc_link *link) { union dp_downstream_port_present ds_port = { .byte = data }; memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); /* decode converter info*/ if (!ds_port.fields.PORT_PRESENT) { link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); link->dpcd_caps.is_branch_dev = false; return; } /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; switch (ds_port.fields.PORT_TYPE) { case DOWNSTREAM_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; break; case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: /* At this point we don't know is it DVI or HDMI or DP++, * assume DVI.*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; break; default: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; break; } if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ union dwnstream_port_caps_byte0 *port_caps = (union dwnstream_port_caps_byte0 *)det_caps; if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, det_caps, sizeof(det_caps)) == DC_OK) { switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { /*Handle DP case as DONGLE_NONE*/ case DOWN_STREAM_DETAILED_DP: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; break; case DOWN_STREAM_DETAILED_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; break; case DOWN_STREAM_DETAILED_DVI: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; break; case DOWN_STREAM_DETAILED_HDMI: case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_HDMI_CONVERTER; link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type; if (ds_port.fields.DETAILED_CAPS) { union dwnstream_port_caps_byte3_hdmi hdmi_caps = {.raw = det_caps[3] }; union dwnstream_port_caps_byte2 hdmi_color_caps = {.raw = det_caps[2] }; link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = det_caps[1] * 2500; link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; /*YCBCR capability only for HDMI case*/ if (port_caps->bits.DWN_STRM_PORTX_TYPE == DOWN_STREAM_DETAILED_HDMI) { link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = hdmi_caps.bits.YCrCr422_PASS_THROUGH; link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = hdmi_caps.bits.YCrCr420_PASS_THROUGH; link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = hdmi_caps.bits.YCrCr422_CONVERSION; link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = hdmi_caps.bits.YCrCr420_CONVERSION; } link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = translate_dpcd_max_bpc( hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); if (link->dc->caps.dp_hdmi21_pcon_support) { union hdmi_encoded_link_bw hdmi_encoded_link_bw; link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = link_bw_kbps_from_raw_frl_link_rate_data( hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); // Intersect reported max link bw support with the supported link rate post FRL link training if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, hdmi_encoded_link_bw); } if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) link->dpcd_caps.dongle_caps.extendedCapValid = true; } if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) link->dpcd_caps.dongle_caps.extendedCapValid = true; } break; } } } set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); { struct dp_sink_hw_fw_revision dp_hw_fw_revision; core_link_read_dpcd( link, DP_BRANCH_REVISION_START, (uint8_t *)&dp_hw_fw_revision, sizeof(dp_hw_fw_revision)); link->dpcd_caps.branch_hw_revision = dp_hw_fw_revision.ieee_hw_rev; memmove( link->dpcd_caps.branch_fw_revision, dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); } if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { union dp_dfp_cap_ext dfp_cap_ext; memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); core_link_read_dpcd( link, DP_DFP_CAPABILITY_EXTENSION_SUPPORT, dfp_cap_ext.raw, sizeof(dfp_cap_ext.raw)); link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = dfp_cap_ext.fields.max_video_h_active_width[0] + (dfp_cap_ext.fields.max_video_h_active_width[1] << 8); link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = dfp_cap_ext.fields.max_video_v_active_height[0] + (dfp_cap_ext.fields.max_video_v_active_height[1] << 8); link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = dfp_cap_ext.fields.encoding_format_caps; link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = dfp_cap_ext.fields.rgb_color_depth_caps; link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = dfp_cap_ext.fields.ycbcr444_color_depth_caps; link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = dfp_cap_ext.fields.ycbcr422_color_depth_caps; link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = dfp_cap_ext.fields.ycbcr420_color_depth_caps; DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); } } static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, struct dc_link_settings *link_settings) { /* Temporary Renoir-specific workaround PHY will sometimes be in bad * state on hotplugging display from certain USB-C dongle, so add extra * cycle of enabling and disabling the PHY before first link training. */ struct link_resource link_res = {0}; enum clock_source_id dp_cs_id = get_clock_source_id(link); dp_enable_link_phy(link, &link_res, link->connector_signal, dp_cs_id, link_settings); dp_disable_link_phy(link, &link_res, link->connector_signal); } bool dp_overwrite_extended_receiver_cap(struct dc_link *link) { uint8_t dpcd_data[16]; uint32_t read_dpcd_retry_cnt = 3; enum dc_status status = DC_ERROR_UNEXPECTED; union dp_downstream_port_present ds_port = { 0 }; union down_stream_port_count down_strm_port_count; union edp_configuration_cap edp_config_cap; int i; for (i = 0; i < read_dpcd_retry_cnt; i++) { status = core_link_read_dpcd( link, DP_DPCD_REV, dpcd_data, sizeof(dpcd_data)); if (status == DC_OK) break; } link->dpcd_caps.dpcd_rev.raw = dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) return false; ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - DP_DPCD_REV]; get_active_converter_info(ds_port.byte, link); down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - DP_DPCD_REV]; link->dpcd_caps.allow_invalid_MSA_timing_param = down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; link->dpcd_caps.max_ln_count.raw = dpcd_data[ DP_MAX_LANE_COUNT - DP_DPCD_REV]; link->dpcd_caps.max_down_spread.raw = dpcd_data[ DP_MAX_DOWNSPREAD - DP_DPCD_REV]; link->reported_link_cap.lane_count = link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; link->reported_link_cap.link_rate = dpcd_data[ DP_MAX_LINK_RATE - DP_DPCD_REV]; link->reported_link_cap.link_spread = link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; edp_config_cap.raw = dpcd_data[ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; link->dpcd_caps.panel_mode_edp = edp_config_cap.bits.ALT_SCRAMBLER_RESET; link->dpcd_caps.dpcd_display_control_capable = edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; return true; } void dpcd_set_source_specific_data(struct dc_link *link) { if (!link->dc->vendor_signature.is_valid) { enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED; struct dpcd_amd_signature amd_signature = {0}; struct dpcd_amd_device_id amd_device_id = {0}; amd_device_id.device_id_byte1 = (uint8_t)(link->ctx->asic_id.chip_id); amd_device_id.device_id_byte2 = (uint8_t)(link->ctx->asic_id.chip_id >> 8); amd_device_id.dce_version = (uint8_t)(link->ctx->dce_version); amd_device_id.dal_version_byte1 = 0x0; // needed? where to get? amd_device_id.dal_version_byte2 = 0x0; // needed? where to get? core_link_read_dpcd(link, DP_SOURCE_OUI, (uint8_t *)(&amd_signature), sizeof(amd_signature)); if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) && (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) && (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) { amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; core_link_write_dpcd(link, DP_SOURCE_OUI, (uint8_t *)(&amd_signature), sizeof(amd_signature)); } core_link_write_dpcd(link, DP_SOURCE_OUI+0x03, (uint8_t *)(&amd_device_id), sizeof(amd_device_id)); if (link->ctx->dce_version >= DCN_VERSION_2_0 && link->dc->caps.min_horizontal_blanking_period != 0) { uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; result_write_min_hblank = core_link_write_dpcd(link, DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), sizeof(hblank_size)); } DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", result_write_min_hblank, link->link_index, link->ctx->dce_version, DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, link->dc->caps.min_horizontal_blanking_period, link->dpcd_caps.branch_dev_id, link->dpcd_caps.branch_dev_name[0], link->dpcd_caps.branch_dev_name[1], link->dpcd_caps.branch_dev_name[2], link->dpcd_caps.branch_dev_name[3], link->dpcd_caps.branch_dev_name[4], link->dpcd_caps.branch_dev_name[5]); } else { core_link_write_dpcd(link, DP_SOURCE_OUI, link->dc->vendor_signature.data.raw, sizeof(link->dc->vendor_signature.data.raw)); } } void dpcd_write_cable_id_to_dprx(struct dc_link *link) { if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || link->dpcd_caps.cable_id.raw == 0 || link->dprx_states.cable_id_written) return; core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, &link->dpcd_caps.cable_id.raw, sizeof(link->dpcd_caps.cable_id.raw)); link->dprx_states.cable_id_written = 1; } static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) { union dmub_rb_cmd cmd; if (!link->ctx->dmub_srv || link->ep_type != DISPLAY_ENDPOINT_PHY || link->link_enc->features.flags.bits.DP_IS_USB_C == 0) return false; memset(&cmd, 0, sizeof(cmd)); cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( link->dc, link->link_enc->transmitter); if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.cable_id.header.ret_status == 1) { cable_id->raw = cmd.cable_id.data.output_raw; DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); } return cmd.cable_id.header.ret_status == 1; } static void retrieve_cable_id(struct dc_link *link) { union dp_cable_id usbc_cable_id; link->dpcd_caps.cable_id.raw = 0; core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, &link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); if (get_usbc_cable_id(link, &usbc_cable_id)) link->dpcd_caps.cable_id = intersect_cable_id( &link->dpcd_caps.cable_id, &usbc_cable_id); } bool read_is_mst_supported(struct dc_link *link) { bool mst = false; enum dc_status st = DC_OK; union dpcd_rev rev; union mstm_cap cap; if (link->preferred_training_settings.mst_enable && *link->preferred_training_settings.mst_enable == false) { return false; } rev.raw = 0; cap.raw = 0; st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, sizeof(rev)); if (st == DC_OK && rev.raw >= DPCD_REV_12) { st = core_link_read_dpcd(link, DP_MSTM_CAP, &cap.raw, sizeof(cap)); if (st == DC_OK && cap.bits.MST_CAP == 1) mst = true; } return mst; } /* Read additional sink caps defined in source specific DPCD area * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) * TODO: Add FS caps and read from DP_SOURCE_SINK_FS_CAP as well */ static bool dpcd_read_sink_ext_caps(struct dc_link *link) { uint8_t dpcd_data = 0; uint8_t edp_general_cap2 = 0; if (!link) return false; if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) return false; link->dpcd_sink_ext_caps.raw = dpcd_data; if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK) return false; link->dpcd_caps.panel_luminance_control = (edp_general_cap2 & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) != 0; return true; } enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) { uint8_t lttpr_dpcd_data[8]; enum dc_status status; bool is_lttpr_present; /* Logic to determine LTTPR support*/ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support) return DC_NOT_SUPPORTED; /* By reading LTTPR capability, RX assumes that we will enable * LTTPR extended aux timeout if LTTPR is present. */ status = core_link_read_dpcd( link, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, lttpr_dpcd_data, sizeof(lttpr_dpcd_data)); link->dpcd_caps.lttpr_caps.revision.raw = lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; link->dpcd_caps.lttpr_caps.max_link_rate = lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; link->dpcd_caps.lttpr_caps.phy_repeater_cnt = lttpr_dpcd_data[DP_PHY_REPEATER_CNT - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; link->dpcd_caps.lttpr_caps.max_lane_count = lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; link->dpcd_caps.lttpr_caps.mode = lttpr_dpcd_data[DP_PHY_REPEATER_MODE - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; link->dpcd_caps.lttpr_caps.max_ext_timeout = lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; /* If this chip cap is set, at least one retimer must exist in the chain * Override count to 1 if we receive a known bad count (0 or an invalid value) */ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { ASSERT(0); link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); } /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ is_lttpr_present = dp_is_lttpr_present(link); if (is_lttpr_present) CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); return status; } static bool retrieve_link_cap(struct dc_link *link) { /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, * which means size 16 will be good for both of those DPCD register block reads */ uint8_t dpcd_data[16]; /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. */ uint8_t dpcd_dprx_data = '\0'; struct dp_device_vendor_id sink_id; union down_stream_port_count down_strm_port_count; union edp_configuration_cap edp_config_cap; union dp_downstream_port_present ds_port = { 0 }; enum dc_status status = DC_ERROR_UNEXPECTED; uint32_t read_dpcd_retry_cnt = 3; int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; const uint32_t post_oui_delay = 30; // 30ms bool is_fec_supported = false; bool is_dsc_basic_supported = false; bool is_dsc_passthrough_supported = false; memset(dpcd_data, '\0', sizeof(dpcd_data)); memset(&down_strm_port_count, '\0', sizeof(union down_stream_port_count)); memset(&edp_config_cap, '\0', sizeof(union edp_configuration_cap)); /* if extended timeout is supported in hardware, * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer * CTS 4.2.1.1 regression introduced by CTS specs requirement update. */ try_to_configure_aux_timeout(link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); status = dp_retrieve_lttpr_cap(link); if (status != DC_OK) { status = wake_up_aux_channel(link); if (status == DC_OK) dp_retrieve_lttpr_cap(link); else return false; } if (dp_is_lttpr_present(link)) configure_lttpr_mode_transparent(link); /* Read DP tunneling information. */ status = dpcd_get_tunneling_device_data(link); dpcd_set_source_specific_data(link); /* Sink may need to configure internals based on vendor, so allow some * time before proceeding with possibly vendor specific transactions */ msleep(post_oui_delay); for (i = 0; i < read_dpcd_retry_cnt; i++) { status = core_link_read_dpcd( link, DP_DPCD_REV, dpcd_data, sizeof(dpcd_data)); if (status == DC_OK) break; } if (status != DC_OK) { dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); return false; } if (!dp_is_lttpr_present(link)) try_to_configure_aux_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); { union training_aux_rd_interval aux_rd_interval; aux_rd_interval.raw = dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; link->dpcd_caps.ext_receiver_cap_field_present = aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { uint8_t ext_cap_data[16]; memset(ext_cap_data, '\0', sizeof(ext_cap_data)); for (i = 0; i < read_dpcd_retry_cnt; i++) { status = core_link_read_dpcd( link, DP_DP13_DPCD_REV, ext_cap_data, sizeof(ext_cap_data)); if (status == DC_OK) { memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); break; } } if (status != DC_OK) dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); } } link->dpcd_caps.dpcd_rev.raw = dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; if (link->dpcd_caps.ext_receiver_cap_field_present) { for (i = 0; i < read_dpcd_retry_cnt; i++) { status = core_link_read_dpcd( link, DP_DPRX_FEATURE_ENUMERATION_LIST, &dpcd_dprx_data, sizeof(dpcd_dprx_data)); if (status == DC_OK) break; } link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data; if (status != DC_OK) dm_error("%s: Read DPRX caps data failed.\n", __func__); /* AdaptiveSyncCapability */ dpcd_dprx_data = 0; for (i = 0; i < read_dpcd_retry_cnt; i++) { status = core_link_read_dpcd( link, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, &dpcd_dprx_data, sizeof(dpcd_dprx_data)); if (status == DC_OK) break; } link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data; if (status != DC_OK) dm_error("%s: Read DPRX caps data failed. Addr:%#x\n", __func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1); } else { link->dpcd_caps.dprx_feature.raw = 0; } /* Error condition checking... * It is impossible for Sink to report Max Lane Count = 0. * It is possible for Sink to report Max Link Rate = 0, if it is * an eDP device that is reporting specialized link rates in the * SUPPORTED_LINK_RATE table. */ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) return false; ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - DP_DPCD_REV]; read_dp_device_vendor_id(link); /* TODO - decouple raw mst capability from policy decision */ link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable)); get_active_converter_info(ds_port.byte, link); dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - DP_DPCD_REV]; link->dpcd_caps.allow_invalid_MSA_timing_param = down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; link->dpcd_caps.max_ln_count.raw = dpcd_data[ DP_MAX_LANE_COUNT - DP_DPCD_REV]; link->dpcd_caps.max_down_spread.raw = dpcd_data[ DP_MAX_DOWNSPREAD - DP_DPCD_REV]; link->reported_link_cap.lane_count = link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); link->reported_link_cap.link_spread = link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; edp_config_cap.raw = dpcd_data[ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; link->dpcd_caps.panel_mode_edp = edp_config_cap.bits.ALT_SCRAMBLER_RESET; link->dpcd_caps.dpcd_display_control_capable = edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; link->test_pattern_enabled = false; link->compliance_test_state.raw = 0; /* read sink count */ core_link_read_dpcd(link, DP_SINK_COUNT, &link->dpcd_caps.sink_count.raw, sizeof(link->dpcd_caps.sink_count.raw)); /* read sink ieee oui */ core_link_read_dpcd(link, DP_SINK_OUI, (uint8_t *)(&sink_id), sizeof(sink_id)); link->dpcd_caps.sink_dev_id = (sink_id.ieee_oui[0] << 16) + (sink_id.ieee_oui[1] << 8) + (sink_id.ieee_oui[2]); memmove( link->dpcd_caps.sink_dev_id_str, sink_id.ieee_device_id, sizeof(sink_id.ieee_device_id)); core_link_read_dpcd( link, DP_SINK_HW_REVISION_START, (uint8_t *)&dp_hw_fw_revision, sizeof(dp_hw_fw_revision)); link->dpcd_caps.sink_hw_revision = dp_hw_fw_revision.ieee_hw_rev; memmove( link->dpcd_caps.sink_fw_revision, dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); /* Quirk for Retina panels: wrong DP_MAX_LINK_RATE */ { uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; uint8_t fwrev_mbp_2018[] = { 7, 4 }; uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; /* We also check for the firmware revision as 16,1 models have an * identical device id and are incorrectly quirked otherwise. */ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, sizeof(str_mbp_2018)) && (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, sizeof(fwrev_mbp_2018)) || !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, sizeof(fwrev_mbp_2018_vega)))) { link->reported_link_cap.link_rate = LINK_RATE_RBR2; } } memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { status = core_link_read_dpcd( link, DP_FEC_CAPABILITY, &link->dpcd_caps.fec_cap.raw, sizeof(link->dpcd_caps.fec_cap.raw)); status = core_link_read_dpcd( link, DP_DSC_SUPPORT, link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); if (status == DC_OK) { is_fec_supported = link->dpcd_caps.fec_cap.bits.FEC_CAPABLE; is_dsc_basic_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT; is_dsc_passthrough_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT; DC_LOG_DC("%s: FEC_Sink_Support: %s\n", __func__, str_yes_no(is_fec_supported)); DC_LOG_DC("%s: DSC_Basic_Sink_Support: %s\n", __func__, str_yes_no(is_dsc_basic_supported)); DC_LOG_DC("%s: DSC_Passthrough_Sink_Support: %s\n", __func__, str_yes_no(is_dsc_passthrough_supported)); } if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { status = core_link_read_dpcd( link, DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); } /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode * only if required. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around && link->dpcd_caps.is_branch_dev && link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. * Clear FEC and DSC capabilities as a work around if that is not the case. */ link->wa_flags.dpia_forced_tbt3_mode = true; memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); } else link->wa_flags.dpia_forced_tbt3_mode = false; } if (!dpcd_read_sink_ext_caps(link)) link->dpcd_sink_ext_caps.raw = 0; if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); core_link_read_dpcd(link, DP_128B132B_SUPPORTED_LINK_RATES, &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) link->reported_link_cap.link_rate = LINK_RATE_UHBR20; else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) link->reported_link_cap.link_rate = LINK_RATE_UHBR10; else dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", link->reported_link_cap.link_rate / 100, link->reported_link_cap.link_rate % 100); core_link_read_dpcd(link, DP_SINK_VIDEO_FALLBACK_FORMATS, &link->dpcd_caps.fallback_formats.raw, sizeof(link->dpcd_caps.fallback_formats.raw)); DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); if (link->dpcd_caps.fallback_formats.raw == 0) { DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; } core_link_read_dpcd(link, DP_FEC_CAPABILITY_1, &link->dpcd_caps.fec_cap1.raw, sizeof(link->dpcd_caps.fec_cap1.raw)); DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) DC_LOG_DP2("\tFEC aggregated error counters are supported"); } retrieve_cable_id(link); dpcd_write_cable_id_to_dprx(link); /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); return true; } bool detect_dp_sink_caps(struct dc_link *link) { return retrieve_link_cap(link); } void detect_edp_sink_caps(struct dc_link *link) { uint8_t supported_link_rates[16]; uint32_t entry; uint32_t link_rate_in_khz; enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; uint8_t backlight_adj_cap; uint8_t general_edp_cap; retrieve_link_cap(link); link->dpcd_caps.edp_supported_link_rates_count = 0; memset(supported_link_rates, 0, sizeof(supported_link_rates)); /* * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" */ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && (link->panel_config.ilr.optimize_edp_link_rate || link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { // Read DPCD 00010h - 0001Fh 16 bytes at one shot core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, supported_link_rates, sizeof(supported_link_rates)); for (entry = 0; entry < 16; entry += 2) { // DPCD register reports per-lane link rate = 16-bit link rate capability // value X 200 kHz. Need multiplier to find link rate in kHz. link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + supported_link_rates[entry]) * 200; DC_LOG_DC("%s: eDP v1.4 supported sink rates: [%d] %d kHz\n", __func__, entry / 2, link_rate_in_khz); if (link_rate_in_khz != 0) { link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; link->dpcd_caps.edp_supported_link_rates_count++; if (link->reported_link_cap.link_rate < link_rate) link->reported_link_cap.link_rate = link_rate; } } } core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, &backlight_adj_cap, sizeof(backlight_adj_cap)); link->dpcd_caps.dynamic_backlight_capable_edp = (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, &general_edp_cap, sizeof(general_edp_cap)); link->dpcd_caps.set_power_state_capable_edp = (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; set_default_brightness_aux(link); core_link_read_dpcd(link, DP_EDP_DPCD_REV, &link->dpcd_caps.edp_rev, sizeof(link->dpcd_caps.edp_rev)); /* * PSR is only valid for eDP v1.3 or higher. */ if (link->dpcd_caps.edp_rev >= DP_EDP_13) { core_link_read_dpcd(link, DP_PSR_SUPPORT, &link->dpcd_caps.psr_info.psr_version, sizeof(link->dpcd_caps.psr_info.psr_version)); if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY, &link->dpcd_caps.psr_info.force_psrsu_cap, sizeof(link->dpcd_caps.psr_info.force_psrsu_cap)); core_link_read_dpcd(link, DP_PSR_CAPS, &link->dpcd_caps.psr_info.psr_dpcd_caps.raw, sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); } } /* * ALPM is only valid for eDP v1.4 or higher. */ if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, &link->dpcd_caps.alpm_caps.raw, sizeof(link->dpcd_caps.alpm_caps.raw)); /* * Read REPLAY info */ core_link_read_dpcd(link, DP_SINK_PR_PIXEL_DEVIATION_PER_LINE, &link->dpcd_caps.pr_info.pixel_deviation_per_line, sizeof(link->dpcd_caps.pr_info.pixel_deviation_per_line)); core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE, &link->dpcd_caps.pr_info.max_deviation_line, sizeof(link->dpcd_caps.pr_info.max_deviation_line)); } bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) { struct link_encoder *link_enc = NULL; if (!max_link_enc_cap) { DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); return false; } link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (link_enc && link_enc->funcs->get_max_link_cap) { link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); return true; } DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); max_link_enc_cap->lane_count = 1; max_link_enc_cap->link_rate = 6; return false; } const struct dc_link_settings *dp_get_verified_link_cap( const struct dc_link *link) { if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) return &link->preferred_link_setting; return &link->verified_link_cap; } struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; enum dc_link_rate lttpr_max_link_rate; enum dc_link_rate cable_max_link_rate; struct link_encoder *link_enc = NULL; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); /* get max link encoder capability */ if (link_enc) link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) max_link_cap.lane_count = link->reported_link_cap.lane_count; if (link->reported_link_cap.link_rate < max_link_cap.link_rate) max_link_cap.link_rate = link->reported_link_cap.link_rate; if (link->reported_link_cap.link_spread < max_link_cap.link_spread) max_link_cap.link_spread = link->reported_link_cap.link_spread; /* Lower link settings based on cable attributes * Cable ID is a DP2 feature to identify max certified link rate that * a cable can carry. The cable identification method requires both * cable and display hardware support. Since the specs comes late, it is * anticipated that the first round of DP2 cables and displays may not * be fully compatible to reliably return cable ID data. Therefore the * decision of our cable id policy is that if the cable can return non * zero cable id data, we will take cable's link rate capability into * account. However if we get zero data, the cable link rate capability * is considered inconclusive. In this case, we will not take cable's * capability into account to avoid of over limiting hardware capability * from users. The max overall link rate capability is still determined * after actual dp pre-training. Cable id is considered as an auxiliary * method of determining max link bandwidth capability. */ cable_max_link_rate = get_cable_max_link_rate(link); if (!link->dc->debug.ignore_cable_id && cable_max_link_rate != LINK_RATE_UNKNOWN && cable_max_link_rate < max_link_cap.link_rate) max_link_cap.link_rate = cable_max_link_rate; /* account for lttpr repeaters cap * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). */ if (dp_is_lttpr_present(link)) { if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; lttpr_max_link_rate = get_lttpr_max_link_rate(link); if (lttpr_max_link_rate < max_link_cap.link_rate) max_link_cap.link_rate = lttpr_max_link_rate; DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", __func__, max_link_cap.lane_count, max_link_cap.link_rate); } if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && link->dc->debug.disable_uhbr) max_link_cap.link_rate = LINK_RATE_HIGH3; return max_link_cap; } static bool dp_verify_link_cap( struct dc_link *link, struct dc_link_settings *known_limit_link_setting, int *fail_count) { struct dc_link_settings cur_link_settings = {0}; struct dc_link_settings max_link_settings = *known_limit_link_setting; bool success = false; bool skip_video_pattern; enum clock_source_id dp_cs_id = get_clock_source_id(link); enum link_training_result status = LINK_TRAINING_SUCCESS; union hpd_irq_data irq_data; struct link_resource link_res; memset(&irq_data, 0, sizeof(irq_data)); cur_link_settings = max_link_settings; /* Grant extended timeout request */ if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); } do { if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) continue; skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; dp_enable_link_phy( link, &link_res, link->connector_signal, dp_cs_id, &cur_link_settings); status = dp_perform_link_training( link, &link_res, &cur_link_settings, skip_video_pattern); if (status == LINK_TRAINING_SUCCESS) { success = true; fsleep(1000); if (dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK && dp_parse_link_loss_status( link, &irq_data)) (*fail_count)++; } else if (status == LINK_TRAINING_LINK_LOSS) { success = true; (*fail_count)++; } else { (*fail_count)++; } dp_trace_lt_total_count_increment(link, true); dp_trace_lt_result_update(link, status, true); dp_disable_link_phy(link, &link_res, link->connector_signal); } while (!success && decide_fallback_link_setting(link, &max_link_settings, &cur_link_settings, status)); link->verified_link_cap = success ? cur_link_settings : fail_safe_link_settings; return success; } bool dp_verify_link_cap_with_retries( struct dc_link *link, struct dc_link_settings *known_limit_link_setting, int attempts) { int i = 0; bool success = false; int fail_count = 0; struct dc_link_settings last_verified_link_cap = fail_safe_link_settings; dp_trace_detect_lt_init(link); if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && link->dc->debug.usbc_combo_phy_reset_wa) apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); dp_trace_set_lt_start_timestamp(link, false); for (i = 0; i < attempts; i++) { enum dc_connection_type type = dc_connection_none; memset(&link->verified_link_cap, 0, sizeof(struct dc_link_settings)); if (!link_detect_connection_type(link, &type) || type == dc_connection_none) { link->verified_link_cap = fail_safe_link_settings; break; } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) { last_verified_link_cap = link->verified_link_cap; if (fail_count == 0) { success = true; break; } } else { link->verified_link_cap = last_verified_link_cap; } fsleep(10 * 1000); } dp_trace_lt_fail_count_update(link, fail_count, true); dp_trace_set_lt_end_timestamp(link, true); return success; } /* * Check if there is a native DP or passive DP-HDMI dongle connected */ bool dp_is_sink_present(struct dc_link *link) { enum gpio_result gpio_result; uint32_t clock_pin = 0; uint8_t retry = 0; struct ddc *ddc; enum connector_id connector_id = dal_graphics_object_id_get_connector_id(link->link_id); bool present = ((connector_id == CONNECTOR_ID_DISPLAY_PORT) || (connector_id == CONNECTOR_ID_EDP) || (connector_id == CONNECTOR_ID_USBC)); ddc = get_ddc_pin(link->ddc); if (!ddc) { BREAK_TO_DEBUGGER(); return present; } /* Open GPIO and set it to I2C mode */ /* Note: this GpioMode_Input will be converted * to GpioConfigType_I2cAuxDualMode in GPIO component, * which indicates we need additional delay */ if (dal_ddc_open(ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) { dal_ddc_close(ddc); return present; } /* * Read GPIO: DP sink is present if both clock and data pins are zero * * [W/A] plug-unplug DP cable, sometimes customer board has * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI * then monitor can't br light up. Add retry 3 times * But in real passive dongle, it need additional 3ms to detect */ do { gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); ASSERT(gpio_result == GPIO_RESULT_OK); if (clock_pin) fsleep(1000); else break; } while (retry++ < 3); present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; dal_ddc_close(ddc); return present; }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * * This file implements generic display communication protocols such as i2c, aux * and scdc. The file should not contain any specific applications of these * protocols such as display capability query, detection, or handshaking such as * link training. */ #include "link_ddc.h" #include "vector.h" #include "dce/dce_aux.h" #include "dal_asic_id.h" #include "link_dpcd.h" #include "dm_helpers.h" #include "atomfirmware.h" #define DC_LOGGER_INIT(logger) static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga"; /* DP to Dual link DVI converter */ static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa"; static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2"; struct i2c_payloads { struct vector payloads; }; struct aux_payloads { struct vector payloads; }; static bool i2c_payloads_create( struct dc_context *ctx, struct i2c_payloads *payloads, uint32_t count) { if (dal_vector_construct( &payloads->payloads, ctx, count, sizeof(struct i2c_payload))) return true; return false; } static struct i2c_payload *i2c_payloads_get(struct i2c_payloads *p) { return (struct i2c_payload *)p->payloads.container; } static uint32_t i2c_payloads_get_count(struct i2c_payloads *p) { return p->payloads.count; } static void i2c_payloads_destroy(struct i2c_payloads *p) { if (!p) return; dal_vector_destruct(&p->payloads); } #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) static void i2c_payloads_add( struct i2c_payloads *payloads, uint32_t address, uint32_t len, uint8_t *data, bool write) { uint32_t payload_size = EDID_SEGMENT_SIZE; uint32_t pos; for (pos = 0; pos < len; pos += payload_size) { struct i2c_payload payload = { .write = write, .address = address, .length = DDC_MIN(payload_size, len - pos), .data = data + pos }; dal_vector_append(&payloads->payloads, &payload); } } static void ddc_service_construct( struct ddc_service *ddc_service, struct ddc_service_init_data *init_data) { enum connector_id connector_id = dal_graphics_object_id_get_connector_id(init_data->id); struct gpio_service *gpio_service = init_data->ctx->gpio_service; struct graphics_object_i2c_info i2c_info; struct gpio_ddc_hw_info hw_info; struct dc_bios *dcb = init_data->ctx->dc_bios; ddc_service->link = init_data->link; ddc_service->ctx = init_data->ctx; if (init_data->is_dpia_link || dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info) != BP_RESULT_OK) { ddc_service->ddc_pin = NULL; } else { DC_LOGGER_INIT(ddc_service->ctx->logger); DC_LOG_DC("BIOS object table - i2c_line: %d", i2c_info.i2c_line); DC_LOG_DC("BIOS object table - i2c_engine_id: %d", i2c_info.i2c_engine_id); hw_info.ddc_channel = i2c_info.i2c_line; if (ddc_service->link != NULL) hw_info.hw_supported = i2c_info.i2c_hw_assist; else hw_info.hw_supported = false; ddc_service->ddc_pin = dal_gpio_create_ddc( gpio_service, i2c_info.gpio_info.clk_a_register_index, 1 << i2c_info.gpio_info.clk_a_shift, &hw_info); } ddc_service->flags.EDID_QUERY_DONE_ONCE = false; ddc_service->flags.FORCE_READ_REPEATED_START = false; ddc_service->flags.EDID_STRESS_READ = false; ddc_service->flags.IS_INTERNAL_DISPLAY = connector_id == CONNECTOR_ID_EDP || connector_id == CONNECTOR_ID_LVDS; ddc_service->wa.raw = 0; } struct ddc_service *link_create_ddc_service( struct ddc_service_init_data *init_data) { struct ddc_service *ddc_service; ddc_service = kzalloc(sizeof(struct ddc_service), GFP_KERNEL); if (!ddc_service) return NULL; ddc_service_construct(ddc_service, init_data); return ddc_service; } static void ddc_service_destruct(struct ddc_service *ddc) { if (ddc->ddc_pin) dal_gpio_destroy_ddc(&ddc->ddc_pin); } void link_destroy_ddc_service(struct ddc_service **ddc) { if (!ddc || !*ddc) { BREAK_TO_DEBUGGER(); return; } ddc_service_destruct(*ddc); kfree(*ddc); *ddc = NULL; } void set_ddc_transaction_type( struct ddc_service *ddc, enum ddc_transaction_type type) { ddc->transaction_type = type; } bool link_is_in_aux_transaction_mode(struct ddc_service *ddc) { switch (ddc->transaction_type) { case DDC_TRANSACTION_TYPE_I2C_OVER_AUX: case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER: case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER: return true; default: break; } return false; } void set_dongle_type(struct ddc_service *ddc, enum display_dongle_type dongle_type) { ddc->dongle_type = dongle_type; } static uint32_t defer_delay_converter_wa( struct ddc_service *ddc, uint32_t defer_delay) { struct dc_link *link = ddc->link; if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER && link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 && (link->dpcd_caps.branch_fw_revision[0] < 0x01 || (link->dpcd_caps.branch_fw_revision[0] == 0x01 && link->dpcd_caps.branch_fw_revision[1] < 0x40)) && !memcmp(link->dpcd_caps.branch_dev_name, DP_VGA_DONGLE_BRANCH_DEV_NAME, sizeof(link->dpcd_caps.branch_dev_name))) return defer_delay > DPVGA_DONGLE_AUX_DEFER_WA_DELAY ? defer_delay : DPVGA_DONGLE_AUX_DEFER_WA_DELAY; if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 && !memcmp(link->dpcd_caps.branch_dev_name, DP_DVI_CONVERTER_ID_4, sizeof(link->dpcd_caps.branch_dev_name))) return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ? defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY; if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_006037 && !memcmp(link->dpcd_caps.branch_dev_name, DP_DVI_CONVERTER_ID_5, sizeof(link->dpcd_caps.branch_dev_name))) return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY_1MS ? I2C_OVER_AUX_DEFER_WA_DELAY_1MS : defer_delay; return defer_delay; } #define DP_TRANSLATOR_DELAY 5 uint32_t link_get_aux_defer_delay(struct ddc_service *ddc) { uint32_t defer_delay = 0; switch (ddc->transaction_type) { case DDC_TRANSACTION_TYPE_I2C_OVER_AUX: if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) || (DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) || (DISPLAY_DONGLE_DP_HDMI_CONVERTER == ddc->dongle_type)) { defer_delay = DP_TRANSLATOR_DELAY; defer_delay = defer_delay_converter_wa(ddc, defer_delay); } else /*sink has a delay different from an Active Converter*/ defer_delay = 0; break; case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER: defer_delay = DP_TRANSLATOR_DELAY; break; default: break; } return defer_delay; } static bool submit_aux_command(struct ddc_service *ddc, struct aux_payload *payload) { uint32_t retrieved = 0; bool ret = false; if (!ddc) return false; if (!payload) return false; do { struct aux_payload current_payload; bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= payload->length; uint32_t payload_length = is_end_of_payload ? payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; current_payload.address = payload->address; current_payload.data = &payload->data[retrieved]; current_payload.defer_delay = payload->defer_delay; current_payload.i2c_over_aux = payload->i2c_over_aux; current_payload.length = payload_length; /* set mot (middle of transaction) to false if it is the last payload */ current_payload.mot = is_end_of_payload ? payload->mot:true; current_payload.write_status_update = false; current_payload.reply = payload->reply; current_payload.write = payload->write; ret = link_aux_transfer_with_retries_no_mutex(ddc, &current_payload); retrieved += payload_length; } while (retrieved < payload->length && ret == true); return ret; } bool link_query_ddc_data( struct ddc_service *ddc, uint32_t address, uint8_t *write_buf, uint32_t write_size, uint8_t *read_buf, uint32_t read_size) { bool success = true; uint32_t payload_size = link_is_in_aux_transaction_mode(ddc) ? DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; uint32_t write_payloads = (write_size + payload_size - 1) / payload_size; uint32_t read_payloads = (read_size + payload_size - 1) / payload_size; uint32_t payloads_num = write_payloads + read_payloads; if (!payloads_num) return false; if (link_is_in_aux_transaction_mode(ddc)) { struct aux_payload payload; payload.i2c_over_aux = true; payload.address = address; payload.reply = NULL; payload.defer_delay = link_get_aux_defer_delay(ddc); payload.write_status_update = false; if (write_size != 0) { payload.write = true; /* should not set mot (middle of transaction) to 0 * if there are pending read payloads */ payload.mot = !(read_size == 0); payload.length = write_size; payload.data = write_buf; success = submit_aux_command(ddc, &payload); } if (read_size != 0 && success) { payload.write = false; /* should set mot (middle of transaction) to 0 * since it is the last payload to send */ payload.mot = false; payload.length = read_size; payload.data = read_buf; success = submit_aux_command(ddc, &payload); } } else { struct i2c_command command = {0}; struct i2c_payloads payloads; if (!i2c_payloads_create(ddc->ctx, &payloads, payloads_num)) return false; command.payloads = i2c_payloads_get(&payloads); command.number_of_payloads = 0; command.engine = DDC_I2C_COMMAND_ENGINE; command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; i2c_payloads_add( &payloads, address, write_size, write_buf, true); i2c_payloads_add( &payloads, address, read_size, read_buf, false); command.number_of_payloads = i2c_payloads_get_count(&payloads); success = dm_helpers_submit_i2c( ddc->ctx, ddc->link, &command); i2c_payloads_destroy(&payloads); } return success; } int link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result) { if (ddc->ctx->dc->debug.enable_dmub_aux_for_legacy_ddc || !ddc->ddc_pin) { return dce_aux_transfer_dmub_raw(ddc, payload, operation_result); } else { return dce_aux_transfer_raw(ddc, payload, operation_result); } } uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link) { uint32_t vendor_lttpr_write_address = 0xF004F; uint8_t offset; switch (link->dpcd_caps.lttpr_caps.phy_repeater_cnt) { case 0x80: // 1 lttpr repeater offset = 1; break; case 0x40: // 2 lttpr repeaters offset = 2; break; case 0x20: // 3 lttpr repeaters offset = 3; break; case 0x10: // 4 lttpr repeaters offset = 4; break; case 0x08: // 5 lttpr repeaters offset = 5; break; case 0x04: // 6 lttpr repeaters offset = 6; break; case 0x02: // 7 lttpr repeaters offset = 7; break; case 0x01: // 8 lttpr repeaters offset = 8; break; default: offset = 0xFF; } if (offset != 0xFF) { vendor_lttpr_write_address += ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); } return vendor_lttpr_write_address; } uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link) { return link_get_fixed_vs_pe_retimer_write_address(link) + 4; } bool link_configure_fixed_vs_pe_retimer(struct ddc_service *ddc, const uint8_t *data, uint32_t length) { struct aux_payload write_payload = { .i2c_over_aux = false, .write = true, .address = link_get_fixed_vs_pe_retimer_write_address(ddc->link), .length = length, .data = (uint8_t *) data, .reply = NULL, .mot = I2C_MOT_UNDEF, .write_status_update = false, .defer_delay = 0, }; return link_aux_transfer_with_retries_no_mutex(ddc, &write_payload); } bool link_query_fixed_vs_pe_retimer(struct ddc_service *ddc, uint8_t *data, uint32_t length) { struct aux_payload read_payload = { .i2c_over_aux = false, .write = false, .address = link_get_fixed_vs_pe_retimer_read_address(ddc->link), .length = length, .data = data, .reply = NULL, .mot = I2C_MOT_UNDEF, .write_status_update = false, .defer_delay = 0, }; return link_aux_transfer_with_retries_no_mutex(ddc, &read_payload); } bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, struct aux_payload *payload) { return dce_aux_transfer_with_retries(ddc, payload); } bool try_to_configure_aux_timeout(struct ddc_service *ddc, uint32_t timeout) { bool result = false; struct ddc *ddc_pin = ddc->ddc_pin; if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa && ddc->ctx->dce_version == DCN_VERSION_3_1) { /* Fixed VS workaround for AUX timeout */ const uint32_t fixed_vs_address = 0xF004F; const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc}; core_link_write_dpcd(ddc->link, fixed_vs_address, fixed_vs_data, sizeof(fixed_vs_data)); timeout = 3072; } /* Do not try to access nonexistent DDC pin. */ if (ddc->link->ep_type != DISPLAY_ENDPOINT_PHY) return true; if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) { ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout); result = true; } return result; } struct ddc *get_ddc_pin(struct ddc_service *ddc_service) { return ddc_service->ddc_pin; } void write_scdc_data(struct ddc_service *ddc_service, uint32_t pix_clk, bool lte_340_scramble) { bool over_340_mhz = pix_clk > 340000 ? 1 : 0; uint8_t slave_address = HDMI_SCDC_ADDRESS; uint8_t offset = HDMI_SCDC_SINK_VERSION; uint8_t sink_version = 0; uint8_t write_buffer[2] = {0}; /*Lower than 340 Scramble bit from SCDC caps*/ if (ddc_service->link->local_sink && ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) return; link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &sink_version, sizeof(sink_version)); if (sink_version == 1) { /*Source Version = 1*/ write_buffer[0] = HDMI_SCDC_SOURCE_VERSION; write_buffer[1] = 1; link_query_ddc_data(ddc_service, slave_address, write_buffer, sizeof(write_buffer), NULL, 0); /*Read Request from SCDC caps*/ } write_buffer[0] = HDMI_SCDC_TMDS_CONFIG; if (over_340_mhz) { write_buffer[1] = 3; } else if (lte_340_scramble) { write_buffer[1] = 1; } else { write_buffer[1] = 0; } link_query_ddc_data(ddc_service, slave_address, write_buffer, sizeof(write_buffer), NULL, 0); } void read_scdc_data(struct ddc_service *ddc_service) { uint8_t slave_address = HDMI_SCDC_ADDRESS; uint8_t offset = HDMI_SCDC_TMDS_CONFIG; uint8_t tmds_config = 0; if (ddc_service->link->local_sink && ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) return; link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &tmds_config, sizeof(tmds_config)); if (tmds_config & 0x1) { union hdmi_scdc_status_flags_data status_data = {0}; uint8_t scramble_status = 0; offset = HDMI_SCDC_SCRAMBLER_STATUS; link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &scramble_status, sizeof(scramble_status)); offset = HDMI_SCDC_STATUS_FLAGS; link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &status_data.byte, sizeof(status_data.byte)); } }
linux-master
drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "virtual_stream_encoder.h" static void virtual_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) {} static void virtual_stream_encoder_hdmi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, int actual_pix_clk_khz, bool enable_audio) {} static void virtual_stream_encoder_dvi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, bool is_dual_link) {} static void virtual_stream_encoder_set_throttled_vcp_size( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp) {} static void virtual_stream_encoder_update_hdmi_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) {} static void virtual_stream_encoder_stop_hdmi_info_packets( struct stream_encoder *enc) {} static void virtual_stream_encoder_set_avmute( struct stream_encoder *enc, bool enable) {} static void virtual_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) {} static void virtual_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc) {} static void virtual_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc) {} static void virtual_stream_encoder_dp_unblank( struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) {} static void virtual_audio_mute_control( struct stream_encoder *enc, bool mute) {} static void virtual_stream_encoder_reset_hdmi_stream_attribute( struct stream_encoder *enc) {} static void virtual_enc_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) {} static void virtual_dig_connect_to_otg( struct stream_encoder *enc, int tg_inst) {} static void virtual_setup_stereo_sync( struct stream_encoder *enc, int tg_inst, bool enable) {} static void virtual_stream_encoder_set_dsc_pps_info_packet( struct stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps, bool immediate_update) {} static const struct stream_encoder_funcs virtual_str_enc_funcs = { .dp_set_odm_combine = virtual_enc_dp_set_odm_combine, .dp_set_stream_attribute = virtual_stream_encoder_dp_set_stream_attribute, .hdmi_set_stream_attribute = virtual_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = virtual_stream_encoder_dvi_set_stream_attribute, .set_throttled_vcp_size = virtual_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = virtual_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = virtual_stream_encoder_stop_hdmi_info_packets, .update_dp_info_packets = virtual_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = virtual_stream_encoder_stop_dp_info_packets, .dp_blank = virtual_stream_encoder_dp_blank, .dp_unblank = virtual_stream_encoder_dp_unblank, .audio_mute_control = virtual_audio_mute_control, .set_avmute = virtual_stream_encoder_set_avmute, .hdmi_reset_stream_attribute = virtual_stream_encoder_reset_hdmi_stream_attribute, .dig_connect_to_otg = virtual_dig_connect_to_otg, .setup_stereo_sync = virtual_setup_stereo_sync, .dp_set_dsc_pps_info_packet = virtual_stream_encoder_set_dsc_pps_info_packet, }; bool virtual_stream_encoder_construct( struct stream_encoder *enc, struct dc_context *ctx, struct dc_bios *bp) { if (!enc) return false; if (!bp) return false; enc->funcs = &virtual_str_enc_funcs; enc->ctx = ctx; enc->id = ENGINE_ID_VIRTUAL; enc->bp = bp; return true; } struct stream_encoder *virtual_stream_encoder_create( struct dc_context *ctx, struct dc_bios *bp) { struct stream_encoder *enc = kzalloc(sizeof(*enc), GFP_KERNEL); if (!enc) return NULL; if (virtual_stream_encoder_construct(enc, ctx, bp)) return enc; BREAK_TO_DEBUGGER(); kfree(enc); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dm_services_types.h" #include "virtual_link_encoder.h" static bool virtual_link_encoder_validate_output_with_stream( struct link_encoder *enc, const struct dc_stream_state *stream) { return true; } static void virtual_link_encoder_hw_init(struct link_encoder *enc) {} static void virtual_link_encoder_setup( struct link_encoder *enc, enum signal_type signal) {} static void virtual_link_encoder_enable_tmds_output( struct link_encoder *enc, enum clock_source_id clock_source, enum dc_color_depth color_depth, enum signal_type signal, uint32_t pixel_clock) {} static void virtual_link_encoder_enable_dp_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) {} static void virtual_link_encoder_enable_dp_mst_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) {} static void virtual_link_encoder_disable_output( struct link_encoder *link_enc, enum signal_type signal) {} static void virtual_link_encoder_dp_set_lane_settings( struct link_encoder *enc, const struct dc_link_settings *link_settings, const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) {} static void virtual_link_encoder_dp_set_phy_pattern( struct link_encoder *enc, const struct encoder_set_dp_phy_pattern_param *param) {} static void virtual_link_encoder_update_mst_stream_allocation_table( struct link_encoder *enc, const struct link_mst_stream_allocation_table *table) {} static void virtual_link_encoder_connect_dig_be_to_fe( struct link_encoder *enc, enum engine_id engine, bool connect) {} static void virtual_link_encoder_destroy(struct link_encoder **enc) { kfree(*enc); *enc = NULL; } static void virtual_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { /* Set Default link settings */ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH, LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0}; *link_settings = max_link_cap; } static const struct link_encoder_funcs virtual_lnk_enc_funcs = { .validate_output_with_stream = virtual_link_encoder_validate_output_with_stream, .hw_init = virtual_link_encoder_hw_init, .setup = virtual_link_encoder_setup, .enable_tmds_output = virtual_link_encoder_enable_tmds_output, .enable_dp_output = virtual_link_encoder_enable_dp_output, .enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output, .disable_output = virtual_link_encoder_disable_output, .get_max_link_cap = virtual_link_encoder_get_max_link_cap, .dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern, .update_mst_stream_allocation_table = virtual_link_encoder_update_mst_stream_allocation_table, .connect_dig_be_to_fe = virtual_link_encoder_connect_dig_be_to_fe, .destroy = virtual_link_encoder_destroy }; bool virtual_link_encoder_construct( struct link_encoder *enc, const struct encoder_init_data *init_data) { enc->funcs = &virtual_lnk_enc_funcs; enc->ctx = init_data->ctx; enc->id = init_data->encoder; enc->hpd_source = init_data->hpd_source; enc->connector = init_data->connector; enc->transmitter = init_data->transmitter; enc->output_signals = SIGNAL_TYPE_VIRTUAL; enc->preferred_engine = ENGINE_ID_VIRTUAL; return true; }
linux-master
drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "virtual_link_hwss.h" void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx) { } void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx) { } void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx) { } static void virtual_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { } static const struct link_hwss virtual_link_hwss = { .setup_stream_encoder = virtual_setup_stream_encoder, .reset_stream_encoder = virtual_reset_stream_encoder, .setup_stream_attribute = virtual_setup_stream_attribute, .disable_link_output = virtual_disable_link_output, }; const struct link_hwss *get_virtual_link_hwss(void) { return &virtual_link_hwss; }
linux-master
drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dm_helpers.h" #include "core_types.h" #include "resource.h" #include "dccg.h" #include "dce/dce_hwseq.h" #include "dcn30/dcn30_cm_common.h" #include "reg_helper.h" #include "abm.h" #include "hubp.h" #include "dchubbub.h" #include "timing_generator.h" #include "opp.h" #include "ipp.h" #include "mpc.h" #include "mcif_wb.h" #include "dc_dmub_srv.h" #include "link_hwss.h" #include "dpcd_defs.h" #include "dcn32_hwseq.h" #include "clk_mgr.h" #include "dsc.h" #include "dcn20/dcn20_optc.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32_resource.h" #include "link.h" #define DC_LOGGER_INIT(logger) #define CTX \ hws->ctx #define REG(reg)\ hws->regs->reg #define DC_LOGGER \ dc->ctx->logger #undef FN #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name void dcn32_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; uint32_t org_ip_request_cntl = 0; if (hws->ctx->dc->debug.disable_dsc_power_gate) return; REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); switch (dsc_inst) { case 0: /* DSC0 */ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 1: /* DSC1 */ REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 2: /* DSC2 */ REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 3: /* DSC3 */ REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; default: BREAK_TO_DEBUGGER(); break; } if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } void dcn32_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { bool force_on = true; /* disable power gating */ uint32_t org_ip_request_cntl = 0; if (enable) force_on = false; REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); /* DCHUBP0/1/2/3 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); /* DCS0/1/2/3 */ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } void dcn32_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; if (hws->ctx->dc->debug.disable_hubp_power_gate) return; if (REG(DOMAIN0_PG_CONFIG) == 0) return; switch (hubp_inst) { case 0: REG_SET(DOMAIN0_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 1: REG_SET(DOMAIN1_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 2: REG_SET(DOMAIN2_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 3: REG_SET(DOMAIN3_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; default: BREAK_TO_DEBUGGER(); break; } } static bool dcn32_check_no_memory_request_for_cab(struct dc *dc) { int i; /* First, check no-memory-request case */ for (i = 0; i < dc->current_state->stream_count; i++) { if ((dc->current_state->stream_status[i].plane_count) && (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)) /* Fail eligibility on a visible stream */ break; } if (i == dc->current_state->stream_count) return true; return false; } /* This function loops through every surface that needs to be cached in CAB for SS, * and calculates the total number of ways required to store all surfaces (primary, * meta, cursor). */ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) { int i; uint8_t num_ways = 0; uint32_t mall_ss_size_bytes = 0; mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; // TODO add additional logic for PSR active stream exclusion optimization // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes; // Include cursor size for CAB allocation for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i]; if (!pipe->stream || !pipe->plane_state) continue; mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false); } // Convert number of cache lines required to number of ways if (dc->debug.force_mall_ss_num_ways > 0) { num_ways = dc->debug.force_mall_ss_num_ways; } else { num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes); } return num_ways; } bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) { union dmub_rb_cmd cmd; uint8_t ways, i; int j; bool mall_ss_unsupported = false; struct dc_plane_state *plane = NULL; if (!dc->ctx->dmub_srv) return false; for (i = 0; i < dc->current_state->stream_count; i++) { /* MALL SS messaging is not supported with PSR at this time */ if (dc->current_state->streams[i] != NULL && dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) return false; } if (enable) { if (dc->current_state) { /* 1. Check no memory request case for CAB. * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message */ if (dcn32_check_no_memory_request_for_cab(dc)) { /* Enable no-memory-requests case */ memset(&cmd, 0, sizeof(cmd)); cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } /* 2. Check if all surfaces can fit in CAB. * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message * and configure HUBP's to fetch from MALL */ ways = dcn32_calculate_cab_allocation(dc, dc->current_state); /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo, * or TMZ surface, don't try to enter MALL. */ for (i = 0; i < dc->current_state->stream_count; i++) { for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { plane = dc->current_state->stream_status[i].plane_states[j]; if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO || plane->address.tmz_surface) { mall_ss_unsupported = true; break; } } if (mall_ss_unsupported) break; } if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) { memset(&cmd, 0, sizeof(cmd)); cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); cmd.cab.cab_alloc_ways = ways; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } } return false; } /* Disable CAB */ memset(&cmd, 0, sizeof(cmd)); cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } /* Send DMCUB message with SubVP pipe info * - For each pipe in context, populate payload with required SubVP information * if the pipe is using SubVP for MCLK switch * - This function must be called while the DMUB HW lock is acquired by driver */ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context) { int i; bool enable_subvp = false; if (!dc->ctx || !dc->ctx->dmub_srv) return; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { // There is at least 1 SubVP pipe, so enable SubVP enable_subvp = true; break; } } dc_dmub_setup_subvp_dmub_command(dc, context, enable_subvp); } /* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and: * 1. Any full update for any SubVP main pipe * 2. Any immediate flip for any SubVP pipe * 3. Any flip for DRR pipe * 4. If SubVP was previously in use (i.e. in old context) */ void dcn32_subvp_pipe_control_lock(struct dc *dc, struct dc_state *context, bool lock, bool should_lock_all_pipes, struct pipe_ctx *top_pipe_to_program, bool subvp_prev_use) { unsigned int i = 0; bool subvp_immediate_flip = false; bool subvp_in_use = false; struct pipe_ctx *pipe; for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_in_use = true; break; } } if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) { if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN && top_pipe_to_program->plane_state->flip_immediate) subvp_immediate_flip = true; } // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. if ((subvp_in_use && (should_lock_all_pipes || subvp_immediate_flip)) || (!subvp_in_use && subvp_prev_use)) { union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; if (!lock) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN && should_lock_all_pipes) pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); } } hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; hw_lock_cmd.bits.lock = lock; hw_lock_cmd.bits.should_release = !lock; dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); } } void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) { struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; bool lock = params->subvp_pipe_control_lock_fast_params.lock; struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx; bool subvp_immediate_flip = false; if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && pipe_ctx->plane_state->flip_immediate) subvp_immediate_flip = true; } // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. if (subvp_immediate_flip) { union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; hw_lock_cmd.bits.lock = lock; hw_lock_cmd.bits.should_release = !lock; dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); } } bool dcn32_set_mpc_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = false; const struct pwl_params *shaper_lut = NULL; //get the shaper lut params if (stream->func_shaper) { if (stream->func_shaper->type == TF_TYPE_HWPWL) shaper_lut = &stream->func_shaper->pwl; else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper, &dpp_base->shaper_params, true); shaper_lut = &dpp_base->shaper_params; } } if (stream->lut3d_func && stream->lut3d_func->state.bits.initialized == 1) { result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, mpcc_id); result = mpc->funcs->program_shaper(mpc, shaper_lut, mpcc_id); } return result; } bool dcn32_set_mcm_luts( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = true; struct pwl_params *lut_params = NULL; // 1D LUT if (plane_state->blend_tf) { if (plane_state->blend_tf->type == TF_TYPE_HWPWL) lut_params = &plane_state->blend_tf->pwl; else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->blend_tf, &dpp_base->regamma_params, false); lut_params = &dpp_base->regamma_params; } } result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); // Shaper if (plane_state->in_shaper_func) { if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) lut_params = &plane_state->in_shaper_func->pwl; else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { // TODO: dpp_base replace ASSERT(false); cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->in_shaper_func, &dpp_base->shaper_params, true); lut_params = &dpp_base->shaper_params; } } result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); // 3D if (plane_state->lut3d_func && plane_state->lut3d_func->state.bits.initialized == 1) result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func->lut_3d, mpcc_id); else result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); return result; } bool dcn32_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dce_hwseq *hws = dc->hwseq; struct mpc *mpc = dc->res_pool->mpc; struct dpp *dpp_base = pipe_ctx->plane_res.dpp; enum dc_transfer_func_predefined tf; bool result = true; struct pwl_params *params = NULL; if (mpc == NULL || plane_state == NULL) return false; tf = TRANSFER_FUNCTION_UNITY; if (plane_state->in_transfer_func && plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) tf = plane_state->in_transfer_func->tf; dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); if (plane_state->in_transfer_func) { if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) params = &plane_state->in_transfer_func->pwl; else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, &dpp_base->degamma_params, false)) params = &dpp_base->degamma_params; } dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); if (pipe_ctx->stream_res.opp && pipe_ctx->stream_res.opp->ctx && hws->funcs.set_mcm_luts) result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state); return result; } bool dcn32_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; struct pwl_params *params = NULL; bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { /*program shaper and 3dlut in MPC*/ ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { if (stream->out_transfer_func->type == TF_TYPE_HWPWL) params = &stream->out_transfer_func->pwl; else if (pipe_ctx->stream->out_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && cm3_helper_translate_curve_to_hw_format( stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* there are no ROM LUTs in OUTGAM */ if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } } mpc->funcs->set_output_gamma(mpc, mpcc_id, params); return ret; } /* Program P-State force value according to if pipe is using SubVP / FPO or not: * 1. Reset P-State force on all pipes first * 2. For each main pipe, force P-State disallow (P-State allow moderated by DMUB) */ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) { int i; /* Unforce p-state for each pipe if it is not FPO or SubVP. * For FPO and SubVP, if it's already forced disallow, leave * it as disallow. */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); } /* Today only FPO uses cursor P-State force. Only clear cursor P-State force * if it's not FPO. */ if (!pipe->stream || !pipe->stream->fpo_in_use) { if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false); } } /* Loop through each pipe -- for each subvp main pipe force p-state allow equal to false. */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); } if (pipe->stream && pipe->stream->fpo_in_use) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); /* For now only force cursor p-state disallow for FPO * Needs to be added for subvp once FW side gets updated */ if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, true); } } } /* Update MALL_SEL register based on if pipe / plane * is a phantom pipe, main pipe, and if using MALL * for SS. */ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) { int i; unsigned int num_ways = dcn32_calculate_cab_allocation(dc, context); bool cache_cursor = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; switch (hubp->curs_attr.color_format) { case CURSOR_MODE_MONO: cursor_size /= 2; break; case CURSOR_MODE_COLOR_1BIT_AND: case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: cursor_size *= 4; break; case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: default: cursor_size *= 8; break; } if (cursor_size > 16384) cache_cursor = true; if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { hubp->funcs->hubp_update_mall_sel(hubp, 1, false); } else { // MALL not supported with Stereo3D hubp->funcs->hubp_update_mall_sel(hubp, num_ways <= dc->caps.cache_num_ways && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED && pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO && !pipe->plane_state->address.tmz_surface ? 2 : 0, cache_cursor); } } } } /* Program the sub-viewport pipe configuration after the main / phantom pipes * have been programmed in hardware. * 1. Update force P-State for all the main pipes (disallow P-state) * 2. Update MALL_SEL register * 3. Program FORCE_ONE_ROW_FOR_FRAME for main subvp pipes */ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) { int i; struct dce_hwseq *hws = dc->hwseq; // Don't force p-state disallow -- can't block dummy p-state // Update MALL_SEL register for each pipe if (hws && hws->funcs.update_mall_sel) hws->funcs.update_mall_sel(dc, context); // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) { /* TODO - remove setting CURSOR_REQ_MODE to 0 for legacy cases * - need to investigate single pipe MPO + SubVP case to * see if CURSOR_REQ_MODE will be back to 1 for SubVP * when it should be 0 for MPO */ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { hubp->funcs->hubp_prepare_subvp_buffering(hubp, true); } } } } static void dcn32_initialize_min_clocks(struct dc *dc) { struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ; clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000; clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; clocks->fclk_p_state_change_support = true; clocks->p_state_change_support = true; if (dc->debug.disable_boot_optimizations) { clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; } else { /* Even though DPG_EN = 1 for the connected display, it still requires the * correct timing so we cannot set DISPCLK to min freq or it could cause * audio corruption. Read current DISPCLK from DENTIST and request the same * freq to ensure that the timing is valid and unchanged. */ clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr); } dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, dc->current_state, true); } void dcn32_init_hw(struct dc *dc) { struct abm **abms = dc->res_pool->multiple_abms; struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // Initialize the dccg if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); if (!dcb->funcs->is_accelerated_mode(dcb)) { hws->funcs.bios_golden_init(dc); hws->funcs.disable_vga(dc->hwseq); } // Set default OPTC memory power states if (dc->debug.enable_mem_low_power.bits.optc) { // Shutdown when unassigned and light sleep in VBLANK REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); } if (dc->debug.enable_mem_low_power.bits.vga) { // Power down VGA memory REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); } if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; if (res_pool->dccg && res_pool->hubbub) { (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, &res_pool->ref_clocks.dccg_ref_clock_inKhz); (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, res_pool->ref_clocks.dccg_ref_clock_inKhz, &res_pool->ref_clocks.dchub_ref_clock_inKhz); } else { // Not all ASICs have DCCG sw component res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); for (i = 0; i < dc->link_count; i++) { /* Power up AND update implementation according to the * required signal (which may be different from the * default signal on connector). */ struct dc_link *link = dc->links[i]; link->link_enc->funcs->hw_init(link->link_enc); /* Check for enabled DIG to identify enabled display */ if (link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { link->link_status.link_active = true; link->phy_state.symclk_state = SYMCLK_ON_TX_ON; if (link->link_enc->funcs->fec_is_active && link->link_enc->funcs->fec_is_active(link->link_enc)) link->fec_state = dc_link_fec_enabled; } } /* enable_power_gating_plane before dsc_pg_control because * FORCEON = 1 with hw default value on bootup, resume from s3 */ if (hws->funcs.enable_power_gating_plane) hws->funcs.enable_power_gating_plane(dc->hwseq, true); /* we want to turn off all dp displays before doing detection */ dc->link_srv->blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. * Otherwise, if taking control is not possible, we need to power * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { /* Disable boot optimizations means power down everything including PHY, DIG, * and OTG (i.e. the boot is not optimized because we do a full power down). */ if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations) dc->hwss.enable_accelerated_mode(dc, dc->current_state); else hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); dcn32_initialize_min_clocks(dc); /* On HW init, allow idle optimizations after pipes have been turned off. * * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state * is reset (i.e. not in idle at the time hw init is called), but software state * still has idle_optimizations = true, so we must disable idle optimizations first * (i.e. set false), then re-enable (set true). */ dc_allow_idle_optimizations(dc, false); dc_allow_idle_optimizations(dc, true); } /* In headless boot cases, DIG may be turned * on which causes HW/SW discrepancies. * To avoid this, power down hardware on boot * if DIG is turned on and seamless boot not enabled */ if (!dc->config.seamless_boot_edp_requested) { struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_link *edp_link; dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num) { for (i = 0; i < edp_num; i++) { edp_link = edp_links[i]; if (edp_link->link_enc->funcs->is_dig_enabled && edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && dc->hwss.edp_backlight_control && dc->hwss.power_down && dc->hwss.edp_power_control) { dc->hwss.edp_backlight_control(edp_link, false); dc->hwss.power_down(dc); dc->hwss.edp_power_control(edp_link, false); } } } else { for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; if (link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && dc->hwss.power_down) { dc->hwss.power_down(dc); break; } } } } for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; audio->funcs->hw_init(audio); } for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; if (link->panel_cntl) backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL) abms[i]->funcs->abm_init(abms[i], backlight); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ REG_WRITE(DIO_MEM_PWR_CTRL, 0); if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); if (dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); if (dc->res_pool->hubbub->funcs->force_pstate_change_control) dc->res_pool->hubbub->funcs->force_pstate_change_control( dc->res_pool->hubbub, false, false); if (dc->res_pool->hubbub->funcs->init_crb) dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0) dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc); // Get DMCUB capabilities if (dc->ctx->dmub_srv) { dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } } static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, int opp_cnt) { bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); int flow_ctrl_cnt; if (opp_cnt >= 2) hblank_halved = true; flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - stream->timing.h_border_left - stream->timing.h_border_right; if (hblank_halved) flow_ctrl_cnt /= 2; /* ODM combine 4:1 case */ if (opp_cnt == 4) flow_ctrl_cnt /= 2; return flow_ctrl_cnt; } static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; ASSERT(dsc); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; if (enable) { struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; ASSERT(odm_dsc); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; dsc_cfg.pic_width *= opp_cnt; optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in OPTC */ DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, optc_dsc_mode, dsc_optc_cfg.bytes_per_pixel, dsc_optc_cfg.slice_width); } else { /* disable DSC in OPTC */ pipe_ctx->stream_res.tg->funcs->set_dsc_config( pipe_ctx->stream_res.tg, OPTC_DSC_DISABLED, 0, 0); /* disable DSC block */ dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { ASSERT(odm_pipe->stream_res.dsc); odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); } } } /* * Given any pipe_ctx, return the total ODM combine factor, and optionally return * the OPPids which are used * */ static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances) { unsigned int opp_count = 1; struct pipe_ctx *odm_pipe; /* First get to the top pipe */ for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe) ; /* First pipe is always used */ if (opp_instances) opp_instances[0] = odm_pipe->stream_res.opp->inst; /* Find and count odm pipes, if any */ for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { if (opp_instances) opp_instances[opp_count] = odm_pipe->stream_res.opp->inst; opp_count++; } return opp_count; } void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); struct mpc_dwb_flow_control flow_control; struct mpc *mpc = dc->res_pool->mpc; int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); if (opp_cnt > 1) pipe_ctx->stream_res.tg->funcs->set_odm_combine( pipe_ctx->stream_res.tg, opp_inst, opp_cnt, &pipe_ctx->stream->timing); else pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; flow_control.flow_ctrl_mode = 0; flow_control.flow_ctrl_cnt0 = 0x80; flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); if (mpc->funcs->set_out_rate_control) { for (i = 0; i < opp_cnt; ++i) { mpc->funcs->set_out_rate_control( mpc, opp_inst[i], true, rate_control_2x_pclk, &flow_control); } } for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, true); } if (pipe_ctx->stream_res.dsc) { struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC); /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */ if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe->stream_res.dsc) { struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc; /* disconnect DSC block from stream */ dsc->funcs->dsc_disconnect(dsc); } } } unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) { struct dc_stream_state *stream = pipe_ctx->stream; unsigned int odm_combine_factor = 0; bool two_pix_per_container = false; two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_1; } else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) { *k1_div = PIXEL_RATE_DIV_BY_1; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) *k2_div = PIXEL_RATE_DIV_BY_2; else *k2_div = PIXEL_RATE_DIV_BY_4; } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { if (two_pix_per_container) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_2; } else { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_4; if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) *k2_div = PIXEL_RATE_DIV_BY_2; } } if ((*k1_div == PIXEL_RATE_DIV_NA) && (*k2_div == PIXEL_RATE_DIV_NA)) ASSERT(false); return odm_combine_factor; } void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) { uint32_t pix_per_cycle = 1; uint32_t odm_combine_factor = 1; if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc) return; odm_combine_factor = get_odm_config(pipe_ctx, NULL); if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1 || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) pix_per_cycle = 2; if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode) pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc, pix_per_cycle); } void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) { unsigned int i; struct pipe_ctx *pipe = NULL; bool otg_disabled[MAX_PIPES] = {false}; for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (!resource_is_pipe_type(pipe, OTG_MASTER)) continue; if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); otg_disabled[i] = true; } } hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (otg_disabled[i]) pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); } } void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { struct encoder_unblank_param params = {0}; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dce_hwseq *hws = link->dc->hwseq; struct pipe_ctx *odm_pipe; uint32_t pix_per_cycle = 1; params.opp_cnt = 1; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) params.opp_cnt++; /* only 3 items below are used by unblank */ params.timing = pipe_ctx->stream->timing; params.link_settings.link_rate = link_settings->link_rate; if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( pipe_ctx->stream_res.hpo_dp_stream_enc, pipe_ctx->stream_res.tg->inst); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1 || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) { params.timing.pix_clk_100hz /= 2; pix_per_cycle = 2; } pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( pipe_ctx->stream_res.stream_enc, pix_per_cycle > 1); pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params); } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) hws->funcs.edp_backlight_control(link, true); } bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) return false; if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dc->debug.enable_dp_dig_pixel_rate_div_policy) return true; return false; } static void apply_symclk_on_tx_off_wa(struct dc_link *link) { /* There are use cases where SYMCLK is referenced by OTG. For instance * for TMDS signal, OTG relies SYMCLK even if TX video output is off. * However current link interface will power off PHY when disabling link * output. This will turn off SYMCLK generated by PHY. The workaround is * to identify such case where SYMCLK is still in use by OTG when we * power off PHY. When this is detected, we will temporarily power PHY * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling * program_pix_clk interface. When OTG is disabled, we will then power * off PHY by calling disable link output again. * * In future dcn generations, we plan to rework transmitter control * interface so that we could have an option to set SYMCLK ON TX OFF * state in one step without this workaround */ struct dc *dc = link->ctx->dc; struct pipe_ctx *pipe_ctx = NULL; uint8_t i; if (link->phy_state.symclk_ref_cnts.otg > 0) { for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && pipe_ctx->stream->link == link) { pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, dc->link_srv->dp_get_encoding_format( &pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings); link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; break; } } } } void dcn32_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { struct dc *dc = link->ctx->dc; const struct link_hwss *link_hwss = get_link_hwss(link, link_res); struct dmcu *dmcu = dc->res_pool->dmcu; if (signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, false); else if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); link_hwss->disable_link_output(link, link_res, signal); link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; if (signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_power_control(link, false); else if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); apply_symclk_on_tx_off_wa(link); } /* For SubVP the main pipe can have a viewport position change * without a full update. In this case we must also update the * viewport positions for the phantom pipe accordingly. */ void dcn32_update_phantom_vp_position(struct dc *dc, struct dc_state *context, struct pipe_ctx *phantom_pipe) { uint32_t i; struct dc_plane_state *phantom_plane = phantom_pipe->plane_state; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN && pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) { if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) { phantom_plane->src_rect.x = pipe->plane_state->src_rect.x; phantom_plane->src_rect.y = pipe->plane_state->src_rect.y; phantom_plane->clip_rect.x = pipe->plane_state->clip_rect.x; phantom_plane->dst_rect.x = pipe->plane_state->dst_rect.x; phantom_plane->dst_rect.y = pipe->plane_state->dst_rect.y; phantom_pipe->plane_state->update_flags.bits.position_change = 1; resource_build_scaling_params(phantom_pipe); return; } } } } /* Treat the phantom pipe as if it needs to be fully enabled. * If the pipe was previously in use but not phantom, it would * have been disabled earlier in the sequence so we need to run * the full enable sequence. */ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) { phantom_pipe->update_flags.raw = 0; if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { phantom_pipe->update_flags.bits.enable = 1; phantom_pipe->update_flags.bits.mpcc = 1; phantom_pipe->update_flags.bits.dppclk = 1; phantom_pipe->update_flags.bits.hubp_interdependent = 1; phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; phantom_pipe->update_flags.bits.gamut_remap = 1; phantom_pipe->update_flags.bits.scaler = 1; phantom_pipe->update_flags.bits.viewport = 1; phantom_pipe->update_flags.bits.det_size = 1; if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { phantom_pipe->update_flags.bits.odm = 1; phantom_pipe->update_flags.bits.global_sync = 1; } } } } bool dcn32_dsc_pg_status( struct dce_hwseq *hws, unsigned int dsc_inst) { uint32_t pwr_status = 0; switch (dsc_inst) { case 0: /* DSC0 */ REG_GET(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status); break; case 1: /* DSC1 */ REG_GET(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status); break; case 2: /* DSC2 */ REG_GET(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status); break; case 3: /* DSC3 */ REG_GET(DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status); break; default: BREAK_TO_DEBUGGER(); break; } return pwr_status == 0; } void dcn32_update_dsc_pg(struct dc *dc, struct dc_state *context, bool safe_to_disable) { struct dce_hwseq *hws = dc->hwseq; int i; for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { struct display_stream_compressor *dsc = dc->res_pool->dscs[i]; bool is_dsc_ungated = hws->funcs.dsc_pg_status(hws, dsc->inst); if (context->res_ctx.is_dsc_acquired[i]) { if (!is_dsc_ungated) { hws->funcs.dsc_pg_control(hws, dsc->inst, true); } } else if (safe_to_disable) { if (is_dsc_ungated) { hws->funcs.dsc_pg_control(hws, dsc->inst, false); } } } } void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) { unsigned int i; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; /* If an active, non-phantom pipe is being transitioned into a phantom * pipe, wait for the double buffer update to complete first before we do * ANY phantom pipe programming. */ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { old_pipe->stream_res.tg->funcs->wait_for_state( old_pipe->stream_res.tg, CRTC_STATE_VBLANK); old_pipe->stream_res.tg->funcs->wait_for_state( old_pipe->stream_res.tg, CRTC_STATE_VACTIVE); } } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { // If old context or new context has phantom pipes, apply // the phantom timings now. We can't change the phantom // pipe configuration safely without driver acquiring // the DMCUB lock first. dc->hwss.apply_ctx_to_hw(dc, context); break; } } } /* Blank pixel data during initialization */ void dcn32_init_blank( struct dc *dc, struct timing_generator *tg) { struct dce_hwseq *hws = dc->hwseq; enum dc_color_space color_space; struct tg_color black_color = {0}; struct output_pixel_processor *opp = NULL; struct output_pixel_processor *bottom_opp = NULL; uint32_t num_opps, opp_id_src0, opp_id_src1; uint32_t otg_active_width, otg_active_height; uint32_t i; /* program opp dpg blank color */ color_space = COLOR_SPACE_SRGB; color_space_to_black_color(dc, color_space, &black_color); /* get the OTG active size */ tg->funcs->get_otg_active_size(tg, &otg_active_width, &otg_active_height); /* get the OPTC source */ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) { ASSERT(false); return; } for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) { opp = dc->res_pool->opps[i]; break; } } if (num_opps == 2) { otg_active_width = otg_active_width / 2; if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) { ASSERT(false); return; } for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src1) { bottom_opp = dc->res_pool->opps[i]; break; } } } if (opp && opp->funcs->opp_set_disp_pattern_generator) opp->funcs->opp_set_disp_pattern_generator( opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, otg_active_height, 0); if (num_opps == 2) { if (bottom_opp && bottom_opp->funcs->opp_set_disp_pattern_generator) { bottom_opp->funcs->opp_set_disp_pattern_generator( bottom_opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, otg_active_height, 0); hws->funcs.wait_for_blank_complete(bottom_opp); } } if (opp) hws->funcs.wait_for_blank_complete(opp); }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "link_encoder.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32_dio_link_encoder.h" #include "stream_encoder.h" #include "dc_bios_types.h" #include "link_enc_cfg.h" #include "gpio_service_interface.h" #ifndef MIN #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif #define CTX \ enc10->base.ctx #define DC_LOGGER \ enc10->base.ctx->logger #define REG(reg)\ (enc10->link_regs->reg) #undef FN #define FN(reg_name, field_name) \ enc10->link_shift->field_name, enc10->link_mask->field_name #define AUX_REG(reg)\ (enc10->aux_regs->reg) #define AUX_REG_READ(reg_name) \ dm_read_reg(CTX, AUX_REG(reg_name)) #define AUX_REG_WRITE(reg_name, val) \ dm_write_reg(CTX, AUX_REG(reg_name), val) void enc32_hw_init(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); /* 00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2 01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4 02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8 03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16 04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32 05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64 06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128 07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256 */ /* AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0, AUX_RX_START_WINDOW = 1 [6:4] AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8] AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1 AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1 AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0 AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1 AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1 AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 AUX_RX_DETECTION_THRESHOLD [30:28] = 1 */ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk // 27MHz -> 0xd // 100MHz -> 0x32 // 48MHz -> 0x18 // Set TMDS_CTL0 to 1. This is a legacy setting. REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1); dcn10_aux_initialize(enc10); } void dcn32_link_encoder_enable_dp_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source); return; } } static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); uint32_t dp_alt_mode_disable = 0; bool is_usb_c_alt_mode = false; if (enc->features.flags.bits.DP_IS_USB_C) { /* if value == 1 alt mode is disabled, otherwise it is enabled */ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); is_usb_c_alt_mode = (dp_alt_mode_disable == 0); } return is_usb_c_alt_mode; } static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); uint32_t is_in_usb_c_dp4_mode = 0; dcn10_link_encoder_get_max_link_cap(enc, link_settings); /* in usb c dp2 mode, max lane count is 2 */ if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); if (!is_in_usb_c_dp4_mode) link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } } static const struct link_encoder_funcs dcn32_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = dcn30_link_encoder_validate_output_with_stream, .hw_init = enc32_hw_init, .setup = dcn10_link_encoder_setup, .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, .enable_dp_output = dcn32_link_encoder_enable_dp_output, .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output, .disable_output = dcn10_link_encoder_disable_output, .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, .update_mst_stream_allocation_table = dcn10_link_encoder_update_mst_stream_allocation_table, .psr_program_dp_dphy_fast_training = dcn10_psr_program_dp_dphy_fast_training, .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, .enable_hpd = dcn10_link_encoder_enable_hpd, .disable_hpd = dcn10_link_encoder_disable_hpd, .is_dig_enabled = dcn10_is_dig_enabled, .destroy = dcn10_link_encoder_destroy, .fec_set_enable = enc2_fec_set_enable, .fec_set_ready = enc2_fec_set_ready, .fec_is_active = enc2_fec_is_active, .get_dig_frontend = dcn10_get_dig_frontend, .get_dig_mode = dcn10_get_dig_mode, .is_in_alt_mode = dcn32_link_encoder_is_in_alt_mode, .get_max_link_cap = dcn32_link_encoder_get_max_link_cap, .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux, }; void dcn32_link_encoder_construct( struct dcn20_link_encoder *enc20, const struct encoder_init_data *init_data, const struct encoder_feature_support *enc_features, const struct dcn10_link_enc_registers *link_regs, const struct dcn10_link_enc_aux_registers *aux_regs, const struct dcn10_link_enc_hpd_registers *hpd_regs, const struct dcn10_link_enc_shift *link_shift, const struct dcn10_link_enc_mask *link_mask) { struct bp_connector_speed_cap_info bp_cap_info = {0}; const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; enum bp_result result = BP_RESULT_OK; struct dcn10_link_encoder *enc10 = &enc20->enc10; enc10->base.funcs = &dcn32_link_enc_funcs; enc10->base.ctx = init_data->ctx; enc10->base.id = init_data->encoder; enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; if (enc10->base.connector.id == CONNECTOR_ID_USBC) enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; enc10->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin * while doing the DP sink detect */ /* if (dal_adapter_service_is_feature_supported(as, FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) enc10->base.features.flags.bits. DP_SINK_DETECT_POLL_DATA_PIN = true;*/ enc10->base.output_signals = SIGNAL_TYPE_DVI_SINGLE_LINK | SIGNAL_TYPE_DVI_DUAL_LINK | SIGNAL_TYPE_LVDS | SIGNAL_TYPE_DISPLAY_PORT | SIGNAL_TYPE_DISPLAY_PORT_MST | SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; enc10->link_regs = link_regs; enc10->aux_regs = aux_regs; enc10->hpd_regs = hpd_regs; enc10->link_shift = link_shift; enc10->link_mask = link_mask; switch (enc10->base.transmitter) { case TRANSMITTER_UNIPHY_A: enc10->base.preferred_engine = ENGINE_ID_DIGA; break; case TRANSMITTER_UNIPHY_B: enc10->base.preferred_engine = ENGINE_ID_DIGB; break; case TRANSMITTER_UNIPHY_C: enc10->base.preferred_engine = ENGINE_ID_DIGC; break; case TRANSMITTER_UNIPHY_D: enc10->base.preferred_engine = ENGINE_ID_DIGD; break; case TRANSMITTER_UNIPHY_E: enc10->base.preferred_engine = ENGINE_ID_DIGE; break; default: ASSERT_CRITICAL(false); enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; } /* default to one to mirror Windows behavior */ enc10->base.features.flags.bits.HDMI_6GB_EN = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, enc10->base.connector, &bp_cap_info); /* Override features with DCE-specific values */ if (result == BP_RESULT_OK) { enc10->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc10->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; enc10->base.features.flags.bits.IS_DP2_CAPABLE = 1; enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN; enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN; enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN; } else { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); } if (enc10->base.ctx->dc->debug.hdmi20_disable) { enc10->base.features.flags.bits.HDMI_6GB_EN = 0; } }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20/dcn20_hwseq.h" #include "dcn21/dcn21_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dcn31/dcn31_hwseq.h" #include "dcn32_hwseq.h" #include "dcn32_init.h" static const struct hw_sequencer_funcs dcn32_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn32_init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, .program_output_csc = dcn20_program_output_csc, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, .update_info_frame = dcn31_update_info_frame, .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, .enable_stream = dcn20_enable_stream, .disable_stream = dce110_disable_stream, .unblank_stream = dcn32_unblank_stream, .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn30_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn30_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .edp_wait_for_T12 = dce110_edp_wait_for_T12, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, .program_triplebuffer = dcn20_program_triple_buffer, .enable_writeback = dcn30_enable_writeback, .disable_writeback = dcn30_disable_writeback, .update_writeback = dcn30_update_writeback, .mmhubbub_warmup = dcn30_mmhubbub_warmup, .dmdata_status_done = dcn20_dmdata_status_done, .program_dmdata_engine = dcn30_program_dmdata_engine, .set_dmdata_attributes = dcn20_set_dmdata_attributes, .init_sys_ctx = dcn20_init_sys_ctx, .init_vm_ctx = dcn20_init_vm_ctx, .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, .apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations, .does_plane_fit_in_mall = NULL, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .hardware_release = dcn30_hardware_release, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dcn32_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, }; static const struct hwseq_private_funcs dcn32_private_funcs = { .init_pipes = dcn10_init_pipes, .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn32_set_input_transfer_func, .set_output_transfer_func = dcn32_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn32_init_blank, .disable_vga = dcn20_disable_vga, .bios_golden_init = dcn10_bios_golden_init, .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn10_plane_atomic_power_down, .enable_power_gating_plane = dcn32_enable_power_gating_plane, .hubp_pg_control = dcn32_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn32_update_odm, .dsc_pg_control = dcn32_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .set_hdr_multiplier = dcn10_set_hdr_multiplier, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_mcm_luts = dcn32_set_mcm_luts, .program_mall_pipe_config = dcn32_program_mall_pipe_config, .update_force_pstate = dcn32_update_force_pstate, .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, }; void dcn32_hw_sequencer_init_functions(struct dc *dc) { dc->hwss = dcn32_funcs; dc->hwseq->funcs = dcn32_private_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32_hpo_dp_link_encoder.h" #include "reg_helper.h" #include "stream_encoder.h" #define DC_LOGGER \ enc3->base.ctx->logger #define REG(reg)\ (enc3->regs->reg) #undef FN #define FN(reg_name, field_name) \ enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name #define CTX \ enc3->base.ctx static bool dcn32_hpo_dp_link_enc_is_in_alt_mode( struct hpo_dp_link_encoder *enc) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); uint32_t dp_alt_mode_disable = 0; ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E)); /* if value == 1 alt mode is disabled, otherwise it is enabled */ REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); return (dp_alt_mode_disable == 0); } static struct hpo_dp_link_encoder_funcs dcn32_hpo_dp_link_encoder_funcs = { .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output, .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output, .link_enable = dcn31_hpo_dp_link_enc_enable, .link_disable = dcn31_hpo_dp_link_enc_disable, .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern, .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table, .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size, .is_in_alt_mode = dcn32_hpo_dp_link_enc_is_in_alt_mode, .read_state = dcn31_hpo_dp_link_enc_read_state, .set_ffe = dcn31_hpo_dp_link_enc_set_ffe, }; void hpo_dp_link_encoder32_construct(struct dcn31_hpo_dp_link_encoder *enc31, struct dc_context *ctx, uint32_t inst, const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask) { enc31->base.ctx = ctx; enc31->base.inst = inst; enc31->base.funcs = &dcn32_hpo_dp_link_encoder_funcs; enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN; enc31->base.transmitter = TRANSMITTER_UNKNOWN; enc31->regs = hpo_le_regs; enc31->hpo_le_shift = hpo_le_shift; enc31->hpo_le_mask = hpo_le_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" #include "dcn32_dpp.h" #include "basics/conversion.h" #include "dcn30/dcn30_cm_common.h" /* Compute the maximum number of lines that we can fit in the line buffer */ static void dscl32_calc_lb_num_partitions( const struct scaler_data *scl_data, enum lb_memory_config lb_config, int *num_part_y, int *num_part_c) { int memory_line_size_y, memory_line_size_c, memory_line_size_a, lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; int line_size = scl_data->viewport.width < scl_data->recout.width ? scl_data->viewport.width : scl_data->recout.width; int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? scl_data->viewport_c.width : scl_data->recout.width; if (line_size == 0) line_size = 1; if (line_size_c == 0) line_size_c = 1; memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ if (lb_config == LB_MEMORY_CONFIG_1) { lb_memory_size = 970; lb_memory_size_c = 970; lb_memory_size_a = 970; } else if (lb_config == LB_MEMORY_CONFIG_2) { lb_memory_size = 1290; lb_memory_size_c = 1290; lb_memory_size_a = 1290; } else if (lb_config == LB_MEMORY_CONFIG_3) { if (scl_data->viewport.width == scl_data->h_active && scl_data->viewport.height == scl_data->v_active) { /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ /* use increased LB size for calculation only if Scaler not enabled */ lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170; lb_memory_size_c = 970 + 1290; lb_memory_size_a = 970 + 1290 + 1170; } else { /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ lb_memory_size = 970 + 1290 + 484 + 484 + 484; lb_memory_size_c = 970 + 1290; lb_memory_size_a = 970 + 1290 + 484; } } else { if (scl_data->viewport.width == scl_data->h_active && scl_data->viewport.height == scl_data->v_active) { /* use increased LB size for calculation only if Scaler not enabled */ lb_memory_size = 970 + 1290 + 1170; lb_memory_size_c = 970 + 1290 + 1170; lb_memory_size_a = 970 + 1290 + 1170; } else { lb_memory_size = 970 + 1290 + 484; lb_memory_size_c = 970 + 1290 + 484; lb_memory_size_a = 970 + 1290 + 484; } } *num_part_y = lb_memory_size / memory_line_size_y; *num_part_c = lb_memory_size_c / memory_line_size_c; num_partitions_a = lb_memory_size_a / memory_line_size_a; if (scl_data->lb_params.alpha_en && (num_partitions_a < *num_part_y)) *num_part_y = num_partitions_a; if (*num_part_y > 32) *num_part_y = 32; if (*num_part_c > 32) *num_part_c = 32; } static struct dpp_funcs dcn32_dpp_funcs = { .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, .dpp_read_state = dpp30_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, .dpp_set_csc_adjustment = NULL, .dpp_set_csc_default = NULL, .dpp_program_regamma_pwl = NULL, .dpp_set_pre_degam = dpp3_set_pre_degam, .dpp_program_input_lut = NULL, .dpp_full_bypass = dpp1_full_bypass, .dpp_setup = dpp3_cnv_setup, .dpp_program_degamma_pwl = NULL, .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, .dpp_program_cm_bias = dpp3_program_cm_bias, .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) .dpp_program_bias_and_scale = NULL, .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, .set_cursor_attributes = dpp3_set_cursor_attributes, .set_cursor_position = dpp1_set_cursor_position, .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, }; static struct dpp_caps dcn32_dpp_cap = { .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, .max_lb_partitions = 31, .dscl_calc_lb_num_partitions = dscl32_calc_lb_num_partitions, }; bool dpp32_construct( struct dcn3_dpp *dpp, struct dc_context *ctx, uint32_t inst, const struct dcn3_dpp_registers *tf_regs, const struct dcn3_dpp_shift *tf_shift, const struct dcn3_dpp_mask *tf_mask) { dpp->base.ctx = ctx; dpp->base.inst = inst; dpp->base.funcs = &dcn32_dpp_funcs; dpp->base.caps = &dcn32_dpp_cap; dpp->tf_regs = tf_regs; dpp->tf_shift = tf_shift; dpp->tf_mask = tf_mask; return true; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "dcn32_dccg.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) #define REG(reg) \ (dccg_dcn->regs->reg) #undef FN #define FN(reg_name, field_name) \ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name #define CTX \ dccg_dcn->base.ctx #define DC_LOGGER \ dccg->ctx->logger static void dccg32_trigger_dio_fifo_resync( struct dccg *dccg) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); uint32_t dispclk_rdivider_value = 0; REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); /* Not valid for the WDIVIDER to be set to 0 */ if (dispclk_rdivider_value != 0) REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } static void dccg32_get_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, enum pixel_rate_div *k1, enum pixel_rate_div *k2) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA; *k1 = PIXEL_RATE_DIV_NA; *k2 = PIXEL_RATE_DIV_NA; switch (otg_inst) { case 0: REG_GET_2(OTG_PIXEL_RATE_DIV, OTG0_PIXEL_RATE_DIVK1, &val_k1, OTG0_PIXEL_RATE_DIVK2, &val_k2); break; case 1: REG_GET_2(OTG_PIXEL_RATE_DIV, OTG1_PIXEL_RATE_DIVK1, &val_k1, OTG1_PIXEL_RATE_DIVK2, &val_k2); break; case 2: REG_GET_2(OTG_PIXEL_RATE_DIV, OTG2_PIXEL_RATE_DIVK1, &val_k1, OTG2_PIXEL_RATE_DIVK2, &val_k2); break; case 3: REG_GET_2(OTG_PIXEL_RATE_DIV, OTG3_PIXEL_RATE_DIVK1, &val_k1, OTG3_PIXEL_RATE_DIVK2, &val_k2); break; default: BREAK_TO_DEBUGGER(); return; } *k1 = (enum pixel_rate_div)val_k1; *k2 = (enum pixel_rate_div)val_k2; } static void dccg32_set_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, enum pixel_rate_div k1, enum pixel_rate_div k2) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA; // Don't program 0xF into the register field. Not valid since // K1 / K2 field is only 1 / 2 bits wide if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) { BREAK_TO_DEBUGGER(); return; } dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2); if (k1 == cur_k1 && k2 == cur_k2) return; switch (otg_inst) { case 0: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG0_PIXEL_RATE_DIVK1, k1, OTG0_PIXEL_RATE_DIVK2, k2); break; case 1: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG1_PIXEL_RATE_DIVK1, k1, OTG1_PIXEL_RATE_DIVK2, k2); break; case 2: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG2_PIXEL_RATE_DIVK1, k1, OTG2_PIXEL_RATE_DIVK2, k2); break; case 3: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG3_PIXEL_RATE_DIVK1, k1, OTG3_PIXEL_RATE_DIVK2, k2); break; default: BREAK_TO_DEBUGGER(); return; } } static void dccg32_set_dtbclk_p_src( struct dccg *dccg, enum streamclk_source src, uint32_t otg_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); uint32_t p_src_sel = 0; /* selects dprefclk */ if (src == DTBCLK0) p_src_sel = 2; /* selects dtbclk0 */ switch (otg_inst) { case 0: if (src == REFCLK) REG_UPDATE(DTBCLK_P_CNTL, DTBCLK_P0_EN, 0); else REG_UPDATE_2(DTBCLK_P_CNTL, DTBCLK_P0_SRC_SEL, p_src_sel, DTBCLK_P0_EN, 1); break; case 1: if (src == REFCLK) REG_UPDATE(DTBCLK_P_CNTL, DTBCLK_P1_EN, 0); else REG_UPDATE_2(DTBCLK_P_CNTL, DTBCLK_P1_SRC_SEL, p_src_sel, DTBCLK_P1_EN, 1); break; case 2: if (src == REFCLK) REG_UPDATE(DTBCLK_P_CNTL, DTBCLK_P2_EN, 0); else REG_UPDATE_2(DTBCLK_P_CNTL, DTBCLK_P2_SRC_SEL, p_src_sel, DTBCLK_P2_EN, 1); break; case 3: if (src == REFCLK) REG_UPDATE(DTBCLK_P_CNTL, DTBCLK_P3_EN, 0); else REG_UPDATE_2(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, p_src_sel, DTBCLK_P3_EN, 1); break; default: BREAK_TO_DEBUGGER(); return; } } /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ static void dccg32_set_dtbclk_dto( struct dccg *dccg, const struct dtbclk_dto_params *params) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); /* DTO Output Rate / Pixel Rate = 1/4 */ int req_dtbclk_khz = params->pixclk_khz / 4; if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; // phase / modulo = dtbclk / dtbclk ref modulo = params->ref_dtbclk_khz * 1000; phase = req_dtbclk_khz * 1000; REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo); REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase); REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_ENABLE[params->otg_inst], 1); REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1, 1, 100); /* program OTG_PIXEL_RATE_DIV for DIVK1 and DIVK2 fields */ dccg32_set_pixel_rate_div(dccg, params->otg_inst, PIXEL_RATE_DIV_BY_1, PIXEL_RATE_DIV_BY_1); /* The recommended programming sequence to enable DTBCLK DTO to generate * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should * be set only after DTO is enabled */ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], PIPE_DTO_SRC_SEL[params->otg_inst], 2); } else { REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_ENABLE[params->otg_inst], 0, PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1); REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); } } static void dccg32_set_valid_pixel_rate( struct dccg *dccg, int ref_dtbclk_khz, int otg_inst, int pixclk_khz) { struct dtbclk_dto_params dto_params = {0}; dto_params.ref_dtbclk_khz = ref_dtbclk_khz; dto_params.otg_inst = otg_inst; dto_params.pixclk_khz = pixclk_khz; dto_params.is_hdmi = true; dccg32_set_dtbclk_dto(dccg, &dto_params); } static void dccg32_get_dccg_ref_freq(struct dccg *dccg, unsigned int xtalin_freq_inKhz, unsigned int *dccg_ref_freq_inKhz) { /* * Assume refclk is sourced from xtalin * expect 100MHz */ *dccg_ref_freq_inKhz = xtalin_freq_inKhz; return; } static void dccg32_set_dpstreamclk( struct dccg *dccg, enum streamclk_source src, int otg_inst, int dp_hpo_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); /* set the dtbclk_p source */ /* always program refclk as DTBCLK. No use-case expected to require DPREFCLK as refclk */ dccg32_set_dtbclk_p_src(dccg, DTBCLK0, otg_inst); /* enabled to select one of the DTBCLKs for pipe */ switch (dp_hpo_inst) { case 0: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst); break; case 1: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst); break; case 2: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst); break; case 3: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst); break; default: BREAK_TO_DEBUGGER(); return; } } static void dccg32_otg_add_pixel(struct dccg *dccg, uint32_t otg_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst], OTG_ADD_PIXEL[otg_inst], 1); } static void dccg32_otg_drop_pixel(struct dccg *dccg, uint32_t otg_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst], OTG_DROP_PIXEL[otg_inst], 1); } static const struct dccg_funcs dccg32_funcs = { .update_dpp_dto = dccg2_update_dpp_dto, .get_dccg_ref_freq = dccg32_get_dccg_ref_freq, .dccg_init = dccg31_init, .set_dpstreamclk = dccg32_set_dpstreamclk, .enable_symclk32_se = dccg31_enable_symclk32_se, .disable_symclk32_se = dccg31_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, .set_physymclk = dccg31_set_physymclk, .set_dtbclk_dto = dccg32_set_dtbclk_dto, .set_valid_pixel_rate = dccg32_set_valid_pixel_rate, .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, .otg_add_pixel = dccg32_otg_add_pixel, .otg_drop_pixel = dccg32_otg_drop_pixel, .set_pixel_rate_div = dccg32_set_pixel_rate_div, .trigger_dio_fifo_resync = dccg32_trigger_dio_fifo_resync, }; struct dccg *dccg32_create( struct dc_context *ctx, const struct dccg_registers *regs, const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask) { struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); struct dccg *base; if (dccg_dcn == NULL) { BREAK_TO_DEBUGGER(); return NULL; } base = &dccg_dcn->base; base->ctx = ctx; base->funcs = &dccg32_funcs; dccg_dcn->regs = regs; dccg_dcn->dccg_shift = dccg_shift; dccg_dcn->dccg_mask = dccg_mask; return &dccg_dcn->base; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn32_optc.h" #include "dcn30/dcn30_optc.h" #include "dcn31/dcn31_optc.h" #include "reg_helper.h" #include "dc.h" #include "dcn_calc_math.h" #include "dc_dmub_srv.h" #define REG(reg)\ optc1->tg_regs->reg #define CTX \ optc1->base.ctx #undef FN #define FN(reg_name, field_name) \ optc1->tg_shift->field_name, optc1->tg_mask->field_name static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); uint32_t memory_mask = 0; int h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; int mpcc_hactive = h_active / opp_cnt; /* Each memory instance is 2048x(32x2) bits to support half line of 4096 */ int odm_mem_count = (h_active + 2047) / 2048; /* * display <= 4k : 2 memories + 2 pipes * 4k < display <= 8k : 4 memories + 2 pipes * 8k < display <= 12k : 6 memories + 4 pipes */ if (opp_cnt == 4) { if (odm_mem_count <= 2) memory_mask = 0x3; else if (odm_mem_count <= 4) memory_mask = 0xf; else memory_mask = 0x3f; } else { if (odm_mem_count <= 2) memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); else if (odm_mem_count <= 4) memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); else memory_mask = 0x77; } REG_SET(OPTC_MEMORY_CONFIG, 0, OPTC_MEM_SEL, memory_mask); if (opp_cnt == 2) { REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 1, OPTC_SEG0_SRC_SEL, opp_id[0], OPTC_SEG1_SRC_SEL, opp_id[1]); } else if (opp_cnt == 4) { REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 3, OPTC_SEG0_SRC_SEL, opp_id[0], OPTC_SEG1_SRC_SEL, opp_id[1], OPTC_SEG2_SRC_SEL, opp_id[2], OPTC_SEG3_SRC_SEL, opp_id[3]); } REG_UPDATE(OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mpcc_hactive); REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); optc1->opp_count = opp_cnt; } void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0); } /** * optc32_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. * * @optc: timing_generator instance. * * Return: If CRTC is enabled, return true. */ static bool optc32_enable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ REG_UPDATE(OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, optc->inst); /* VTG enable first is for HW workaround */ REG_UPDATE(CONTROL, VTG0_ENABLE, 1); REG_SEQ_START(); /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 2, OTG_MASTER_EN, 1); REG_SEQ_SUBMIT(); REG_SEQ_WAIT_DONE(); return true; } /* disable_crtc */ static bool optc32_disable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); /* disable otg request until end of the first line * in the vertical blank region */ REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); REG_UPDATE(CONTROL, VTG0_ENABLE, 0); /* CRTC disabled, so disable clock. */ REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 150000); return true; } static void optc32_phantom_crtc_post_enable(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); /* Disable immediately. */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0); /* CRTC disabled, so disable clock. */ REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); } static void optc32_disable_phantom_otg(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); } static void optc32_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); enum h_timing_div_mode h_div = H_TIMING_NO_DIV; REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 0, OPTC_SEG0_SRC_SEL, optc->inst, OPTC_SEG1_SRC_SEL, 0xf, OPTC_SEG2_SRC_SEL, 0xf, OPTC_SEG3_SRC_SEL, 0xf ); h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing); REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, h_div); REG_SET(OPTC_MEMORY_CONFIG, 0, OPTC_MEM_SEL, 0); optc1->opp_count = 1; } static void optc32_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); struct dc *dc = optc->ctx->dc; if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst); else { /* * MIN_MASK_EN is gone and MASK is now always enabled. * * To get it to it work with manual trigger we need to make sure * we program the correct bit. */ REG_UPDATE_4(OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, 1, OTG_V_TOTAL_MAX_SEL, 1, OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ // Setup manual flow control for EOF via TRIG_A optc->funcs->setup_manual_trigger(optc); } } static void optc32_set_drr( struct timing_generator *optc, const struct drr_params *params) { struct optc *optc1 = DCN10TG_FROM_TG(optc); if (params != NULL && params->vertical_total_max > 0 && params->vertical_total_min > 0) { if (params->vertical_total_mid != 0) { REG_SET(OTG_V_TOTAL_MID, 0, OTG_V_TOTAL_MID, params->vertical_total_mid - 1); REG_UPDATE_2(OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, OTG_VTOTAL_MID_FRAME_NUM, (uint8_t)params->vertical_total_mid_frame_num); } optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); } optc32_setup_manual_trigger(optc); } static struct timing_generator_funcs dcn32_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, .program_global_sync = optc1_program_global_sync, .enable_crtc = optc32_enable_crtc, .disable_crtc = optc32_disable_crtc, .phantom_crtc_post_enable = optc32_phantom_crtc_post_enable, .disable_phantom_crtc = optc32_disable_phantom_otg, /* used by enable_timing_synchronization. Not need for FPGA */ .is_counter_moving = optc1_is_counter_moving, .get_position = optc1_get_position, .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, .set_blank_color = optc3_program_blank_color, .did_triggered_reset_occur = optc1_did_triggered_reset_occur, .triplebuffer_lock = optc3_triplebuffer_lock, .triplebuffer_unlock = optc2_triplebuffer_unlock, .enable_reset_trigger = optc1_enable_reset_trigger, .enable_crtc_reset = optc1_enable_crtc_reset, .disable_reset_trigger = optc1_disable_reset_trigger, .lock = optc3_lock, .unlock = optc1_unlock, .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, .enable_optc_clock = optc1_enable_optc_clock, .set_drr = optc32_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc3_set_vtotal_min_max, .set_static_screen_control = optc1_set_static_screen_control, .program_stereo = optc1_program_stereo, .is_stereo_left_eye = optc1_is_stereo_left_eye, .tg_init = optc3_tg_init, .is_tg_enabled = optc1_is_tg_enabled, .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, .clear_optc_underflow = optc1_clear_optc_underflow, .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, .configure_crc = optc1_configure_crc, .set_dsc_config = optc3_set_dsc_config, .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc32_set_odm_bypass, .set_odm_combine = optc32_set_odm_combine, .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, .set_drr_trigger_window = optc3_set_drr_trigger_window, .set_vtotal_change_limit = optc3_set_vtotal_change_limit, .set_gsl = optc2_set_gsl, .set_gsl_source_select = optc2_set_gsl_source_select, .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, }; void dcn32_timing_generator_init(struct optc *optc1) { optc1->base.funcs = &dcn32_tg_funcs; optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; optc1->min_h_blank = 32; optc1->min_v_blank = 3; optc1->min_v_blank_interlace = 5; optc1->min_h_sync_width = 4; optc1->min_v_sync_width = 1; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "dcn30/dcn30_mpc.h" #include "dcn30/dcn30_cm_common.h" #include "dcn32_mpc.h" #include "basics/conversion.h" #include "dcn10/dcn10_cm_common.h" #include "dc.h" #define REG(reg)\ mpc30->mpc_regs->reg #define CTX \ mpc30->base.ctx #undef FN #define FN(reg_name, field_name) \ mpc30->mpc_shift->field_name, mpc30->mpc_mask->field_name void mpc32_mpc_init(struct mpc *mpc) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int mpcc_id; mpc1_mpc_init(mpc); if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) { for (mpcc_id = 0; mpcc_id < mpc30->num_mpcc; mpcc_id++) { REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE, 3); REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE, 3); REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE, 3); } } if (mpc30->mpc_mask->MPCC_OGAM_MEM_LOW_PWR_MODE) { for (mpcc_id = 0; mpcc_id < mpc30->num_mpcc; mpcc_id++) REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_LOW_PWR_MODE, 3); } } } void mpc32_power_on_blnd_lut( struct mpc *mpc, uint32_t mpcc_id, bool power_on) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) { if (power_on) { REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0); REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5); } else if (!mpc->ctx->dc->debug.disable_mem_low_power) { ASSERT(false); /* TODO: change to mpc * dpp_base->ctx->dc->optimized_required = true; * dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; */ } } else { REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_FORCE, power_on == true ? 0 : 1); } } static enum dc_lut_mode mpc32_get_post1dlut_current(struct mpc *mpc, uint32_t mpcc_id) { enum dc_lut_mode mode; uint32_t mode_current = 0; uint32_t in_use = 0; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); REG_GET(MPCC_MCM_1DLUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_MODE_CURRENT, &mode_current); REG_GET(MPCC_MCM_1DLUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_SELECT_CURRENT, &in_use); switch (mode_current) { case 0: case 1: mode = LUT_BYPASS; break; case 2: if (in_use == 0) mode = LUT_RAM_A; else mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } return mode; } void mpc32_configure_post1dlut( struct mpc *mpc, uint32_t mpcc_id, bool is_ram_a) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); //TODO: this REG_UPDATE_2(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 7, MPCC_MCM_1DLUT_LUT_HOST_SEL, is_ram_a == true ? 0 : 1); REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); } static void mpc32_post1dlut_get_reg_field( struct dcn30_mpc *mpc, struct dcn3_xfer_func_reg *reg) { reg->shifts.exp_region0_lut_offset = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET; reg->masks.exp_region0_lut_offset = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET; reg->shifts.exp_region0_num_segments = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS; reg->masks.exp_region0_num_segments = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS; reg->shifts.exp_region1_lut_offset = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET; reg->masks.exp_region1_lut_offset = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET; reg->shifts.exp_region1_num_segments = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS; reg->masks.exp_region1_num_segments = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS; reg->shifts.field_region_end = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B; reg->masks.field_region_end = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B; reg->shifts.field_region_end_slope = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B; reg->masks.field_region_end_slope = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B; reg->shifts.field_region_end_base = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B; reg->masks.field_region_end_base = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B; reg->shifts.field_region_linear_slope = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B; reg->masks.field_region_linear_slope = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B; reg->shifts.exp_region_start = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B; reg->masks.exp_region_start = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B; reg->shifts.exp_resion_start_segment = mpc->mpc_shift->MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B; reg->masks.exp_resion_start_segment = mpc->mpc_mask->MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B; } /*program blnd lut RAM A*/ void mpc32_program_post1dluta_settings( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_params *params) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); struct dcn3_xfer_func_reg gam_regs; mpc32_post1dlut_get_reg_field(mpc30, &gam_regs); gam_regs.start_cntl_b = REG(MPCC_MCM_1DLUT_RAMA_START_CNTL_B[mpcc_id]); gam_regs.start_cntl_g = REG(MPCC_MCM_1DLUT_RAMA_START_CNTL_G[mpcc_id]); gam_regs.start_cntl_r = REG(MPCC_MCM_1DLUT_RAMA_START_CNTL_R[mpcc_id]); gam_regs.start_slope_cntl_b = REG(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B[mpcc_id]); gam_regs.start_slope_cntl_g = REG(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G[mpcc_id]); gam_regs.start_slope_cntl_r = REG(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R[mpcc_id]); gam_regs.start_end_cntl1_b = REG(MPCC_MCM_1DLUT_RAMA_END_CNTL1_B[mpcc_id]); gam_regs.start_end_cntl2_b = REG(MPCC_MCM_1DLUT_RAMA_END_CNTL2_B[mpcc_id]); gam_regs.start_end_cntl1_g = REG(MPCC_MCM_1DLUT_RAMA_END_CNTL1_G[mpcc_id]); gam_regs.start_end_cntl2_g = REG(MPCC_MCM_1DLUT_RAMA_END_CNTL2_G[mpcc_id]); gam_regs.start_end_cntl1_r = REG(MPCC_MCM_1DLUT_RAMA_END_CNTL1_R[mpcc_id]); gam_regs.start_end_cntl2_r = REG(MPCC_MCM_1DLUT_RAMA_END_CNTL2_R[mpcc_id]); gam_regs.region_start = REG(MPCC_MCM_1DLUT_RAMA_REGION_0_1[mpcc_id]); gam_regs.region_end = REG(MPCC_MCM_1DLUT_RAMA_REGION_32_33[mpcc_id]); cm_helper_program_gamcor_xfer_func(mpc->ctx, params, &gam_regs); } /*program blnd lut RAM B*/ void mpc32_program_post1dlutb_settings( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_params *params) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); struct dcn3_xfer_func_reg gam_regs; mpc32_post1dlut_get_reg_field(mpc30, &gam_regs); gam_regs.start_cntl_b = REG(MPCC_MCM_1DLUT_RAMB_START_CNTL_B[mpcc_id]); gam_regs.start_cntl_g = REG(MPCC_MCM_1DLUT_RAMB_START_CNTL_G[mpcc_id]); gam_regs.start_cntl_r = REG(MPCC_MCM_1DLUT_RAMB_START_CNTL_R[mpcc_id]); gam_regs.start_slope_cntl_b = REG(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B[mpcc_id]); gam_regs.start_slope_cntl_g = REG(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G[mpcc_id]); gam_regs.start_slope_cntl_r = REG(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R[mpcc_id]); gam_regs.start_end_cntl1_b = REG(MPCC_MCM_1DLUT_RAMB_END_CNTL1_B[mpcc_id]); gam_regs.start_end_cntl2_b = REG(MPCC_MCM_1DLUT_RAMB_END_CNTL2_B[mpcc_id]); gam_regs.start_end_cntl1_g = REG(MPCC_MCM_1DLUT_RAMB_END_CNTL1_G[mpcc_id]); gam_regs.start_end_cntl2_g = REG(MPCC_MCM_1DLUT_RAMB_END_CNTL2_G[mpcc_id]); gam_regs.start_end_cntl1_r = REG(MPCC_MCM_1DLUT_RAMB_END_CNTL1_R[mpcc_id]); gam_regs.start_end_cntl2_r = REG(MPCC_MCM_1DLUT_RAMB_END_CNTL2_R[mpcc_id]); gam_regs.region_start = REG(MPCC_MCM_1DLUT_RAMB_REGION_0_1[mpcc_id]); gam_regs.region_end = REG(MPCC_MCM_1DLUT_RAMB_REGION_32_33[mpcc_id]); cm_helper_program_gamcor_xfer_func(mpc->ctx, params, &gam_regs); } void mpc32_program_post1dlut_pwl( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_result_data *rgb, uint32_t num) { uint32_t i; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; if (is_rgb_equal(rgb, num)) { for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); } else { REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 4); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 2); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].green_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_green); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 1); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].blue_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_blue); } } bool mpc32_program_post1dlut( struct mpc *mpc, const struct pwl_params *params, uint32_t mpcc_id) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); if (params == NULL) { REG_SET(MPCC_MCM_1DLUT_CONTROL[mpcc_id], 0, MPCC_MCM_1DLUT_MODE, 0); if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) mpc32_power_on_blnd_lut(mpc, mpcc_id, false); return false; } current_mode = mpc32_get_post1dlut_current(mpc, mpcc_id); if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_B) next_mode = LUT_RAM_A; else next_mode = LUT_RAM_B; mpc32_power_on_blnd_lut(mpc, mpcc_id, true); mpc32_configure_post1dlut(mpc, mpcc_id, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) mpc32_program_post1dluta_settings(mpc, mpcc_id, params); else mpc32_program_post1dlutb_settings(mpc, mpcc_id, params); mpc32_program_post1dlut_pwl( mpc, mpcc_id, params->rgb_resulted, params->hw_points_num); REG_UPDATE_2(MPCC_MCM_1DLUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_MODE, 2, MPCC_MCM_1DLUT_SELECT, next_mode == LUT_RAM_A ? 0 : 1); return true; } static enum dc_lut_mode mpc32_get_shaper_current(struct mpc *mpc, uint32_t mpcc_id) { enum dc_lut_mode mode; uint32_t state_mode; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); REG_GET(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_MODE_CURRENT, &state_mode); switch (state_mode) { case 0: mode = LUT_BYPASS; break; case 1: mode = LUT_RAM_A; break; case 2: mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } return mode; } void mpc32_configure_shaper_lut( struct mpc *mpc, bool is_ram_a, uint32_t mpcc_id) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); REG_UPDATE(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK[mpcc_id], MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, 7); REG_UPDATE(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK[mpcc_id], MPCC_MCM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); REG_SET(MPCC_MCM_SHAPER_LUT_INDEX[mpcc_id], 0, MPCC_MCM_SHAPER_LUT_INDEX, 0); } void mpc32_program_shaper_luta_settings( struct mpc *mpc, const struct pwl_params *params, uint32_t mpcc_id) { const struct gamma_curve *curve; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); REG_SET_2(MPCC_MCM_SHAPER_RAMA_START_CNTL_B[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(MPCC_MCM_SHAPER_RAMA_START_CNTL_G[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(MPCC_MCM_SHAPER_RAMA_START_CNTL_R[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(MPCC_MCM_SHAPER_RAMA_END_CNTL_B[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); REG_SET_2(MPCC_MCM_SHAPER_RAMA_END_CNTL_G[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y); REG_SET_2(MPCC_MCM_SHAPER_RAMA_END_CNTL_R[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); curve = params->arr_curve_points; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); } void mpc32_program_shaper_lutb_settings( struct mpc *mpc, const struct pwl_params *params, uint32_t mpcc_id) { const struct gamma_curve *curve; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y); REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x, MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); curve = params->arr_curve_points; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); } void mpc32_program_shaper_lut( struct mpc *mpc, const struct pwl_result_data *rgb, uint32_t num, uint32_t mpcc_id) { uint32_t i, red, green, blue; uint32_t red_delta, green_delta, blue_delta; uint32_t red_value, green_value, blue_value; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); for (i = 0 ; i < num; i++) { red = rgb[i].red_reg; green = rgb[i].green_reg; blue = rgb[i].blue_reg; red_delta = rgb[i].delta_red_reg; green_delta = rgb[i].delta_green_reg; blue_delta = rgb[i].delta_blue_reg; red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); REG_SET(MPCC_MCM_SHAPER_LUT_DATA[mpcc_id], 0, MPCC_MCM_SHAPER_LUT_DATA, red_value); REG_SET(MPCC_MCM_SHAPER_LUT_DATA[mpcc_id], 0, MPCC_MCM_SHAPER_LUT_DATA, green_value); REG_SET(MPCC_MCM_SHAPER_LUT_DATA[mpcc_id], 0, MPCC_MCM_SHAPER_LUT_DATA, blue_value); } } void mpc32_power_on_shaper_3dlut( struct mpc *mpc, uint32_t mpcc_id, bool power_on) { uint32_t power_status_shaper = 2; uint32_t power_status_3dlut = 2; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int max_retries = 10; REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_3DLUT_MEM_PWR_DIS, power_on == true ? 1:0); /* wait for memory to fully power up */ if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_SHAPER_MEM_PWR_STATE, 0, 1, max_retries); REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_3DLUT_MEM_PWR_STATE, 0, 1, max_retries); } /*read status is not mandatory, it is just for debugging*/ REG_GET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_SHAPER_MEM_PWR_STATE, &power_status_shaper); REG_GET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_3DLUT_MEM_PWR_STATE, &power_status_3dlut); if (power_status_shaper != 0 && power_on == true) BREAK_TO_DEBUGGER(); if (power_status_3dlut != 0 && power_on == true) BREAK_TO_DEBUGGER(); } bool mpc32_program_shaper( struct mpc *mpc, const struct pwl_params *params, uint32_t mpcc_id) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); if (params == NULL) { REG_SET(MPCC_MCM_SHAPER_CONTROL[mpcc_id], 0, MPCC_MCM_SHAPER_LUT_MODE, 0); return false; } if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true); current_mode = mpc32_get_shaper_current(mpc, mpcc_id); if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) next_mode = LUT_RAM_B; else next_mode = LUT_RAM_A; mpc32_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, mpcc_id); if (next_mode == LUT_RAM_A) mpc32_program_shaper_luta_settings(mpc, params, mpcc_id); else mpc32_program_shaper_lutb_settings(mpc, params, mpcc_id); mpc32_program_shaper_lut( mpc, params->rgb_resulted, params->hw_points_num, mpcc_id); REG_SET(MPCC_MCM_SHAPER_CONTROL[mpcc_id], 0, MPCC_MCM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); mpc32_power_on_shaper_3dlut(mpc, mpcc_id, false); return true; } static enum dc_lut_mode get3dlut_config( struct mpc *mpc, bool *is_17x17x17, bool *is_12bits_color_channel, int mpcc_id) { uint32_t i_mode, i_enable_10bits, lut_size; enum dc_lut_mode mode; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE_CURRENT, &i_mode); REG_GET(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_30BIT_EN, &i_enable_10bits); switch (i_mode) { case 0: mode = LUT_BYPASS; break; case 1: mode = LUT_RAM_A; break; case 2: mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } if (i_enable_10bits > 0) *is_12bits_color_channel = false; else *is_12bits_color_channel = true; REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, &lut_size); if (lut_size == 0) *is_17x17x17 = true; else *is_17x17x17 = false; return mode; } void mpc32_select_3dlut_ram( struct mpc *mpc, enum dc_lut_mode mode, bool is_color_channel_12bits, uint32_t mpcc_id) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); REG_UPDATE_2(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, MPCC_MCM_3DLUT_30BIT_EN, is_color_channel_12bits == true ? 0:1); } void mpc32_select_3dlut_ram_mask( struct mpc *mpc, uint32_t ram_selection_mask, uint32_t mpcc_id) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_WRITE_EN_MASK, ram_selection_mask); REG_SET(MPCC_MCM_3DLUT_INDEX[mpcc_id], 0, MPCC_MCM_3DLUT_INDEX, 0); } void mpc32_set3dlut_ram12( struct mpc *mpc, const struct dc_rgb *lut, uint32_t entries, uint32_t mpcc_id) { uint32_t i, red, green, blue, red1, green1, blue1; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); for (i = 0 ; i < entries; i += 2) { red = lut[i].red<<4; green = lut[i].green<<4; blue = lut[i].blue<<4; red1 = lut[i+1].red<<4; green1 = lut[i+1].green<<4; blue1 = lut[i+1].blue<<4; REG_SET_2(MPCC_MCM_3DLUT_DATA[mpcc_id], 0, MPCC_MCM_3DLUT_DATA0, red, MPCC_MCM_3DLUT_DATA1, red1); REG_SET_2(MPCC_MCM_3DLUT_DATA[mpcc_id], 0, MPCC_MCM_3DLUT_DATA0, green, MPCC_MCM_3DLUT_DATA1, green1); REG_SET_2(MPCC_MCM_3DLUT_DATA[mpcc_id], 0, MPCC_MCM_3DLUT_DATA0, blue, MPCC_MCM_3DLUT_DATA1, blue1); } } void mpc32_set3dlut_ram10( struct mpc *mpc, const struct dc_rgb *lut, uint32_t entries, uint32_t mpcc_id) { uint32_t i, red, green, blue, value; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); for (i = 0; i < entries; i++) { red = lut[i].red; green = lut[i].green; blue = lut[i].blue; //should we shift red 22bit and green 12? value = (red<<20) | (green<<10) | blue; REG_SET(MPCC_MCM_3DLUT_DATA_30BIT[mpcc_id], 0, MPCC_MCM_3DLUT_DATA_30BIT, value); } } static void mpc32_set_3dlut_mode( struct mpc *mpc, enum dc_lut_mode mode, bool is_color_channel_12bits, bool is_lut_size17x17x17, uint32_t mpcc_id) { uint32_t lut_mode; struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); // set default 3DLUT to pre-blend // TODO: implement movable CM location REG_UPDATE(MPCC_MOVABLE_CM_LOCATION_CONTROL[mpcc_id], MPCC_MOVABLE_CM_LOCATION_CNTL, 0); if (mode == LUT_BYPASS) lut_mode = 0; else if (mode == LUT_RAM_A) lut_mode = 1; else lut_mode = 2; REG_UPDATE_2(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE, lut_mode, MPCC_MCM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); } bool mpc32_program_3dlut( struct mpc *mpc, const struct tetrahedral_params *params, int mpcc_id) { enum dc_lut_mode mode; bool is_17x17x17; bool is_12bits_color_channel; const struct dc_rgb *lut0; const struct dc_rgb *lut1; const struct dc_rgb *lut2; const struct dc_rgb *lut3; int lut_size0; int lut_size; if (params == NULL) { mpc32_set_3dlut_mode(mpc, LUT_BYPASS, false, false, mpcc_id); return false; } mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true); mode = get3dlut_config(mpc, &is_17x17x17, &is_12bits_color_channel, mpcc_id); if (mode == LUT_BYPASS || mode == LUT_RAM_B) mode = LUT_RAM_A; else mode = LUT_RAM_B; is_17x17x17 = !params->use_tetrahedral_9; is_12bits_color_channel = params->use_12bits; if (is_17x17x17) { lut0 = params->tetrahedral_17.lut0; lut1 = params->tetrahedral_17.lut1; lut2 = params->tetrahedral_17.lut2; lut3 = params->tetrahedral_17.lut3; lut_size0 = sizeof(params->tetrahedral_17.lut0)/ sizeof(params->tetrahedral_17.lut0[0]); lut_size = sizeof(params->tetrahedral_17.lut1)/ sizeof(params->tetrahedral_17.lut1[0]); } else { lut0 = params->tetrahedral_9.lut0; lut1 = params->tetrahedral_9.lut1; lut2 = params->tetrahedral_9.lut2; lut3 = params->tetrahedral_9.lut3; lut_size0 = sizeof(params->tetrahedral_9.lut0)/ sizeof(params->tetrahedral_9.lut0[0]); lut_size = sizeof(params->tetrahedral_9.lut1)/ sizeof(params->tetrahedral_9.lut1[0]); } mpc32_select_3dlut_ram(mpc, mode, is_12bits_color_channel, mpcc_id); mpc32_select_3dlut_ram_mask(mpc, 0x1, mpcc_id); if (is_12bits_color_channel) mpc32_set3dlut_ram12(mpc, lut0, lut_size0, mpcc_id); else mpc32_set3dlut_ram10(mpc, lut0, lut_size0, mpcc_id); mpc32_select_3dlut_ram_mask(mpc, 0x2, mpcc_id); if (is_12bits_color_channel) mpc32_set3dlut_ram12(mpc, lut1, lut_size, mpcc_id); else mpc32_set3dlut_ram10(mpc, lut1, lut_size, mpcc_id); mpc32_select_3dlut_ram_mask(mpc, 0x4, mpcc_id); if (is_12bits_color_channel) mpc32_set3dlut_ram12(mpc, lut2, lut_size, mpcc_id); else mpc32_set3dlut_ram10(mpc, lut2, lut_size, mpcc_id); mpc32_select_3dlut_ram_mask(mpc, 0x8, mpcc_id); if (is_12bits_color_channel) mpc32_set3dlut_ram12(mpc, lut3, lut_size, mpcc_id); else mpc32_set3dlut_ram10(mpc, lut3, lut_size, mpcc_id); mpc32_set_3dlut_mode(mpc, mode, is_12bits_color_channel, is_17x17x17, mpcc_id); if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) mpc32_power_on_shaper_3dlut(mpc, mpcc_id, false); return true; } static const struct mpc_funcs dcn32_mpc_funcs = { .read_mpcc_state = mpc1_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, .mpc_init = mpc32_mpc_init, .mpc_init_single_inst = mpc1_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, .wait_for_idle = mpc2_assert_idle_mpcc, .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, .set_denorm = mpc3_set_denorm, .set_denorm_clamp = mpc3_set_denorm_clamp, .set_output_csc = mpc3_set_output_csc, .set_ocsc_default = mpc3_set_ocsc_default, .set_output_gamma = mpc3_set_output_gamma, .insert_plane_to_secondary = NULL, .remove_mpcc_from_secondary = NULL, .set_dwb_mux = mpc3_set_dwb_mux, .disable_dwb_mux = mpc3_disable_dwb_mux, .is_dwb_idle = mpc3_is_dwb_idle, .set_out_rate_control = mpc3_set_out_rate_control, .set_gamut_remap = mpc3_set_gamut_remap, .program_shaper = mpc32_program_shaper, .program_3dlut = mpc32_program_3dlut, .program_1dlut = mpc32_program_post1dlut, .acquire_rmu = NULL, .release_rmu = NULL, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, }; void dcn32_mpc_construct(struct dcn30_mpc *mpc30, struct dc_context *ctx, const struct dcn30_mpc_registers *mpc_regs, const struct dcn30_mpc_shift *mpc_shift, const struct dcn30_mpc_mask *mpc_mask, int num_mpcc, int num_rmu) { int i; mpc30->base.ctx = ctx; mpc30->base.funcs = &dcn32_mpc_funcs; mpc30->mpc_regs = mpc_regs; mpc30->mpc_shift = mpc_shift; mpc30->mpc_mask = mpc_mask; mpc30->mpcc_in_use_mask = 0; mpc30->num_mpcc = num_mpcc; mpc30->num_rmu = num_rmu; for (i = 0; i < MAX_MPCC; i++) mpc3_init_mpcc(&mpc30->base.mpcc_array[i], i); }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
/* * Copyright 2012-20 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dce_calcs.h" #include "reg_helper.h" #include "basics/conversion.h" #include "dcn32_hubp.h" #define REG(reg)\ hubp2->hubp_regs->reg #define CTX \ hubp2->base.ctx #undef FN #define FN(reg_name, field_name) \ hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name void hubp32_update_force_pstate_disallow(struct hubp *hubp, bool pstate_disallow) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_UPDATE_2(UCLK_PSTATE_FORCE, DATA_UCLK_PSTATE_FORCE_EN, pstate_disallow, DATA_UCLK_PSTATE_FORCE_VALUE, 0); } void hubp32_update_force_cursor_pstate_disallow(struct hubp *hubp, bool pstate_disallow) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_UPDATE_2(UCLK_PSTATE_FORCE, CURSOR_UCLK_PSTATE_FORCE_EN, pstate_disallow, CURSOR_UCLK_PSTATE_FORCE_VALUE, 0); } void hubp32_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); // Also cache cursor in MALL if using MALL for SS REG_UPDATE_2(DCHUBP_MALL_CONFIG, USE_MALL_SEL, mall_sel, USE_MALL_FOR_CURSOR, c_cursor); } void hubp32_prepare_subvp_buffering(struct hubp *hubp, bool enable) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_UPDATE(DCHUBP_VMPG_CONFIG, FORCE_ONE_ROW_FOR_FRAME, enable); /* Programming guide suggests CURSOR_REQ_MODE = 1 for SubVP: * For Pstate change using the MALL with sub-viewport buffering, * the cursor does not use the MALL (USE_MALL_FOR_CURSOR is ignored) * and sub-viewport positioning by Display FW has to avoid the cursor * requests to DRAM (set CURSOR_REQ_MODE = 1 to minimize this exclusion). * * CURSOR_REQ_MODE = 1 begins fetching cursor data at the beginning of display prefetch. * Setting this should allow the sub-viewport position to always avoid the cursor because * we do not allow the sub-viewport region to overlap with display prefetch (i.e. during blank). */ REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable); } void hubp32_phantom_hubp_post_enable(struct hubp *hubp) { uint32_t reg_val; struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); /* For phantom pipe enable, disable GSL */ REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, 0); REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, 1); reg_val = REG_READ(DCHUBP_CNTL); if (reg_val) { /* init sequence workaround: in case HUBP is * power gated, this wait would timeout. * * we just wrote reg_val to non-0, if it stay 0 * it means HUBP is gated */ REG_WAIT(DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, 1, 1, 200); } } void hubp32_cursor_set_attributes( struct hubp *hubp, const struct dc_cursor_attributes *attr) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch); enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk( attr->width, attr->color_format); //Round cursor width up to next multiple of 64 uint32_t cursor_width = ((attr->width + 63) / 64) * 64; uint32_t cursor_height = attr->height; uint32_t cursor_size = cursor_width * cursor_height; hubp->curs_attr = *attr; REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); REG_UPDATE(CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, attr->address.low_part); REG_UPDATE_2(CURSOR_SIZE, CURSOR_WIDTH, attr->width, CURSOR_HEIGHT, attr->height); REG_UPDATE_4(CURSOR_CONTROL, CURSOR_MODE, attr->color_format, CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, CURSOR_PITCH, hw_pitch, CURSOR_LINES_PER_CHUNK, lpc); REG_SET_2(CURSOR_SETTINGS, 0, /* no shift of the cursor HDL schedule */ CURSOR0_DST_Y_OFFSET, 0, /* used to shift the cursor chunk request deadline */ CURSOR0_CHUNK_HDL_ADJUST, 3); switch (attr->color_format) { case CURSOR_MODE_MONO: cursor_size /= 2; break; case CURSOR_MODE_COLOR_1BIT_AND: case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: cursor_size *= 4; break; case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: default: cursor_size *= 8; break; } if (cursor_size > 16384) REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true); else REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false); } void hubp32_init(struct hubp *hubp) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8); } static struct hubp_funcs dcn32_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr, .hubp_program_surface_config = hubp3_program_surface_config, .hubp_is_flip_pending = hubp2_is_flip_pending, .hubp_setup = hubp3_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings, .set_blank = hubp2_set_blank, .set_blank_regs = hubp2_set_blank_regs, .dcc_control = hubp3_dcc_control, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp32_cursor_set_attributes, .set_cursor_position = hubp2_cursor_set_position, .hubp_clk_cntl = hubp2_clk_cntl, .hubp_vtg_sel = hubp2_vtg_sel, .dmdata_set_attributes = hubp3_dmdata_set_attributes, .dmdata_load = hubp2_dmdata_load, .dmdata_status_done = hubp2_dmdata_status_done, .hubp_read_state = hubp3_read_state, .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp3_init, .set_unbounded_requesting = hubp31_set_unbounded_requesting, .hubp_soft_reset = hubp31_soft_reset, .hubp_set_flip_int = hubp1_set_flip_int, .hubp_in_blank = hubp1_in_blank, .hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow, .hubp_update_force_cursor_pstate_disallow = hubp32_update_force_cursor_pstate_disallow, .phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable, .hubp_update_mall_sel = hubp32_update_mall_sel, .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering }; bool hubp32_construct( struct dcn20_hubp *hubp2, struct dc_context *ctx, uint32_t inst, const struct dcn_hubp2_registers *hubp_regs, const struct dcn_hubp2_shift *hubp_shift, const struct dcn_hubp2_mask *hubp_mask) { hubp2->base.funcs = &dcn32_hubp_funcs; hubp2->base.ctx = ctx; hubp2->hubp_regs = hubp_regs; hubp2->hubp_shift = hubp_shift; hubp2->hubp_mask = hubp_mask; hubp2->base.inst = inst; hubp2->base.opp_id = OPP_ID_INVALID; hubp2->base.mpcc_id = 0xf; return true; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn32_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" #include "link.h" #include "dpcd_defs.h" #define DC_LOGGER \ enc1->base.ctx->logger #define REG(reg)\ (enc1->regs->reg) #undef FN #define FN(reg_name, field_name) \ enc1->se_shift->field_name, enc1->se_mask->field_name #define VBI_LINE_0 0 #define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000 #define CTX \ enc1->base.ctx static void enc32_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, odm_combine ? 1 : 0); } /* setup stream encoder in dvi mode */ static void enc32_stream_encoder_dvi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, bool is_dual_link) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { struct bp_encoder_control cntl = {0}; cntl.action = ENCODER_CONTROL_SETUP; cntl.engine_id = enc1->base.id; cntl.signal = is_dual_link ? SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; cntl.enable_dp_audio = false; cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; if (enc1->base.bp->funcs->encoder_control( enc1->base.bp, &cntl) != BP_RESULT_OK) return; } else { //Set pattern for clock channel, default vlue 0x63 does not work REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F); //DIG_BE_TMDS_DVI_MODE : TMDS-DVI mode is already set in link_encoder_setup //DIG_SOURCE_SELECT is already set in dig_connect_to_otg /* DIG_START is removed from the register spec */ } ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888); enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); } /* setup stream encoder in hdmi mode */ static void enc32_stream_encoder_hdmi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, int actual_pix_clk_khz, bool enable_audio) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { struct bp_encoder_control cntl = {0}; cntl.action = ENCODER_CONTROL_SETUP; cntl.engine_id = enc1->base.id; cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; cntl.enable_dp_audio = enable_audio; cntl.pixel_clock = actual_pix_clk_khz; cntl.lanes_number = LANE_COUNT_FOUR; if (enc1->base.bp->funcs->encoder_control( enc1->base.bp, &cntl) != BP_RESULT_OK) return; } else { //Set pattern for clock channel, default vlue 0x63 does not work REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F); //DIG_BE_TMDS_HDMI_MODE : TMDS-HDMI mode is already set in link_encoder_setup //DIG_SOURCE_SELECT is already set in dig_connect_to_otg /* DIG_START is removed from the register spec */ } /* Configure pixel encoding */ enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); /* setup HDMI engine */ REG_UPDATE_6(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, 1, HDMI_KEEPOUT_MODE, 1, HDMI_DEEP_COLOR_ENABLE, 0, HDMI_DATA_SCRAMBLE_EN, 0, HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1, HDMI_CLOCK_CHANNEL_RATE, 0); /* Configure color depth */ switch (crtc_timing->display_color_depth) { case COLOR_DEPTH_888: REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); break; case COLOR_DEPTH_101010: if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1, HDMI_DEEP_COLOR_ENABLE, 0); } else { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1, HDMI_DEEP_COLOR_ENABLE, 1); } break; case COLOR_DEPTH_121212: if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2, HDMI_DEEP_COLOR_ENABLE, 0); } else { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2, HDMI_DEEP_COLOR_ENABLE, 1); } break; case COLOR_DEPTH_161616: REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 3, HDMI_DEEP_COLOR_ENABLE, 1); break; default: break; } if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) { /* enable HDMI data scrambler * HDMI_CLOCK_CHANNEL_RATE_MORE_340M * Clock channel frequency is 1/4 of character rate. */ REG_UPDATE_2(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, 1, HDMI_CLOCK_CHANNEL_RATE, 1); } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) { /* TODO: New feature for DCE11, still need to implement */ /* enable HDMI data scrambler * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE * Clock channel frequency is the same * as character rate */ REG_UPDATE_2(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, 1, HDMI_CLOCK_CHANNEL_RATE, 0); } /* Enable transmission of General Control packet on every frame */ REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1, HDMI_GC_SEND, 1, HDMI_NULL_SEND, 1); /* Disable Audio Content Protection packet transmission */ REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0); /* following belongs to audio */ /* Enable Audio InfoFrame packet transmission. */ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); /* update double-buffered AUDIO_INFO registers immediately */ ASSERT(enc->afmt); enc->afmt->funcs->audio_info_immediate_update(enc->afmt); /* Select line number on which to send Audio InfoFrame packets */ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, VBI_LINE_0 + 2); /* set HDMI GC AVMUTE */ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0); } static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !timing->dsc_cfg.ycbcr422_simple); return two_pix; } static bool is_h_timing_divisible_by_2(const struct dc_crtc_timing *timing) { /* math borrowed from function of same name in inc/resource * checks if h_timing is divisible by 2 */ bool divisible = false; uint16_t h_blank_start = 0; uint16_t h_blank_end = 0; if (timing) { h_blank_start = timing->h_total - timing->h_front_porch; h_blank_end = h_blank_start - timing->h_addressable; /* HTOTAL, Hblank start/end, and Hsync start/end all must be * divisible by 2 in order for the horizontal timing params * to be considered divisible by 2. Hsync start is always 0. */ divisible = (timing->h_total % 2 == 0) && (h_blank_start % 2 == 0) && (h_blank_end % 2 == 0) && (timing->h_sync_width % 2 == 0); } return divisible; } static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_timing *timing) { /* should be functionally the same as dcn32_is_dp_dig_pixel_rate_div_policy for DP encoders*/ return is_h_timing_divisible_by_2(timing) && dc->debug.enable_dp_dig_pixel_rate_div_policy; } void enc32_stream_encoder_dp_unblank( struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); struct dc *dc = enc->ctx->dc; if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) { uint32_t n_vid = 0x8000; uint32_t m_vid; uint32_t n_multiply = 0; uint32_t pix_per_cycle = 0; uint64_t m_vid_l = n_vid; /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */ if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1 || is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) { /*this logic should be the same in get_pixel_clock_parameters() */ n_multiply = 1; pix_per_cycle = 1; } /* M / N = Fstream / Flink * m_vid / n_vid = pixel rate / link rate */ m_vid_l *= param->timing.pix_clk_100hz / 10; m_vid_l = div_u64(m_vid_l, param->link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ); m_vid = (uint32_t) m_vid_l; /* enable auto measurement */ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0); /* auto measurement need 1 full 0x8000 symbol cycle to kick in, * therefore program initial value for Mvid and Nvid */ REG_UPDATE(DP_VID_N, DP_VID_N, n_vid); REG_UPDATE(DP_VID_M, DP_VID_M, m_vid); REG_UPDATE_2(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1, DP_VID_N_MUL, n_multiply); REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, pix_per_cycle); } /* make sure stream is disabled before resetting steer fifo */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); /* DIG_START is removed from the register spec */ /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen * that it overflows during mode transition, and sometimes doesn't recover. */ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); udelay(10); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); /* DIG Resync FIFO now needs to be explicitly enabled */ // TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000); /* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1 * so set it to 1/2 full = 7 before reset as suggested by hardware team. */ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1); REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0); REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); /* wait 100us for DIG/DP logic to prime * (i.e. a few video lines) */ udelay(100); /* the hardware would start sending video at the start of the next DP * frame (i.e. rising edge of the vblank). * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this * register has no effect on enable transition! HW always guarantees * VID_STREAM enable at start of next frame, and this is not * programmable */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } /* Set DSC-related configuration. * dsc_mode: 0 disables DSC, other values enable DSC in specified format * sc_bytes_per_pixel: DP_DSC_BYTES_PER_PIXEL removed in DCN32 * dsc_slice_width: DP_DSC_SLICE_WIDTH removed in DCN32 */ static void enc32_dp_set_dsc_config(struct stream_encoder *enc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1); } /* this function read dsc related register fields to be logged later in dcn10_log_hw_state * into a dcn_dsc_state struct. */ static void enc32_read_state(struct stream_encoder *enc, struct enc_state *s) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); //if dsc is enabled, continue to read REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode); if (s->dsc_mode) { REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, &s->sec_gsp_pps_line_num); REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference); REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num); REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, &s->sec_gsp_pps_enable); REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable); } } static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); /* The naming of this field is confusing, what it means is the output mode of otg, which * is the input mode of the dig */ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0); } static void enc32_reset_fifo(struct stream_encoder *enc, bool reset) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); uint32_t reset_val = reset ? 1 : 0; uint32_t is_symclk_on; REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val); REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on); if (is_symclk_on) REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000); else udelay(10); } void enc32_enable_fifo(struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); enc32_reset_fifo(enc, true); enc32_reset_fifo(enc, false); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); } static const struct stream_encoder_funcs dcn32_str_enc_funcs = { .dp_set_odm_combine = enc32_dp_set_odm_combine, .dp_set_stream_attribute = enc2_stream_encoder_dp_set_stream_attribute, .hdmi_set_stream_attribute = enc32_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = enc32_stream_encoder_dvi_set_stream_attribute, .set_throttled_vcp_size = enc1_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = enc3_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = enc3_stream_encoder_stop_hdmi_info_packets, .update_dp_info_packets_sdp_line_num = enc3_stream_encoder_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = enc32_stream_encoder_dp_unblank, .audio_mute_control = enc3_audio_mute_control, .dp_audio_setup = enc3_se_dp_audio_setup, .dp_audio_enable = enc3_se_dp_audio_enable, .dp_audio_disable = enc1_se_dp_audio_disable, .hdmi_audio_setup = enc3_se_hdmi_audio_setup, .hdmi_audio_disable = enc1_se_hdmi_audio_disable, .setup_stereo_sync = enc1_setup_stereo_sync, .set_avmute = enc1_stream_encoder_set_avmute, .dig_connect_to_otg = enc1_dig_connect_to_otg, .dig_source_otg = enc1_dig_source_otg, .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, .enc_read_state = enc32_read_state, .dp_set_dsc_config = enc32_dp_set_dsc_config, .dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet, .set_dynamic_metadata = enc2_set_dynamic_metadata, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .set_input_mode = enc32_set_dig_input_mode, .enable_fifo = enc32_enable_fifo, }; void dcn32_dio_stream_encoder_construct( struct dcn10_stream_encoder *enc1, struct dc_context *ctx, struct dc_bios *bp, enum engine_id eng_id, struct vpg *vpg, struct afmt *afmt, const struct dcn10_stream_enc_registers *regs, const struct dcn10_stream_encoder_shift *se_shift, const struct dcn10_stream_encoder_mask *se_mask) { enc1->base.funcs = &dcn32_str_enc_funcs; enc1->base.ctx = ctx; enc1->base.id = eng_id; enc1->base.bp = bp; enc1->base.vpg = vpg; enc1->base.afmt = afmt; enc1->regs = regs; enc1->se_shift = se_shift; enc1->se_mask = se_mask; enc1->base.stream_enc_inst = vpg->inst; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "resource.h" #include "mcif_wb.h" #include "dcn32_mmhubbub.h" #define REG(reg)\ mcif_wb30->mcif_wb_regs->reg #define CTX \ mcif_wb30->base.ctx #undef FN #define FN(reg_name, field_name) \ mcif_wb30->mcif_wb_shift->field_name, mcif_wb30->mcif_wb_mask->field_name #define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8 #define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40 /* wbif programming guide: * 1. set up wbif parameter: * unsigned long long luma_address[4]; //4 frame buffer * unsigned long long chroma_address[4]; * unsigned int luma_pitch; * unsigned int chroma_pitch; * unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10 * unsigned int slice_lines; //slice size * unsigned int time_per_pixel; // time per pixel, in ns * unsigned int arbitration_slice; // 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes * unsigned int max_scaled_time; // used for QOS generation * unsigned int swlock=0x0; * unsigned int cli_watermark[4]; //4 group urgent watermark * unsigned int pstate_watermark[4]; //4 group pstate watermark * unsigned int sw_int_en; // Software interrupt enable, frame end and overflow * unsigned int sw_slice_int_en; // slice end interrupt enable * unsigned int sw_overrun_int_en; // overrun error interrupt enable * unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow * unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow * * 2. configure wbif register * a. call mmhubbub_config_wbif() * * 3. Enable wbif * call set_wbif_bufmgr_enable(); * * 4. wbif_dump_status(), option, for debug purpose * the bufmgr status can show the progress of write back, can be used for debug purpose */ static void mmhubbub32_warmup_mcif(struct mcif_wb *mcif_wb, struct mcif_warmup_params *params) { struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb); union large_integer start_address_shift = {.quad_part = params->start_address.quad_part >> 5}; /* Set base address and region size for warmup */ REG_SET(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, 0, MMHUBBUB_WARMUP_BASE_ADDR_HIGH, start_address_shift.high_part); REG_SET(MMHUBBUB_WARMUP_BASE_ADDR_LOW, 0, MMHUBBUB_WARMUP_BASE_ADDR_LOW, start_address_shift.low_part); REG_SET(MMHUBBUB_WARMUP_ADDR_REGION, 0, MMHUBBUB_WARMUP_ADDR_REGION, params->region_size >> 5); // REG_SET(MMHUBBUB_WARMUP_P_VMID, 0, MMHUBBUB_WARMUP_P_VMID, params->p_vmid); /* Set address increment and enable warmup */ REG_SET_3(MMHUBBUB_WARMUP_CONTROL_STATUS, 0, MMHUBBUB_WARMUP_EN, true, MMHUBBUB_WARMUP_SW_INT_EN, true, MMHUBBUB_WARMUP_INC_ADDR, params->address_increment >> 5); /* Wait for an interrupt to signal warmup is completed */ REG_WAIT(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_STATUS, 1, 20, 100); /* Acknowledge interrupt */ REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_ACK, 1); /* Disable warmup */ REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false); } static void mmhubbub32_config_mcif_buf(struct mcif_wb *mcif_wb, struct mcif_buf_params *params, unsigned int dest_height) { struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb); /* buffer address for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, MCIF_ADDR(params->luma_address[0])); REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[0])); /* buffer address for Chroma in planar mode (unused in packing mode) */ REG_UPDATE(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, MCIF_ADDR(params->chroma_address[0])); REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[0])); /* buffer address for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, MCIF_ADDR(params->luma_address[1])); REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[1])); /* buffer address for Chroma in planar mode (unused in packing mode) */ REG_UPDATE(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, MCIF_ADDR(params->chroma_address[1])); REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[1])); /* buffer address for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, MCIF_ADDR(params->luma_address[2])); REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[2])); /* buffer address for Chroma in planar mode (unused in packing mode) */ REG_UPDATE(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, MCIF_ADDR(params->chroma_address[2])); REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[2])); /* buffer address for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, MCIF_ADDR(params->luma_address[3])); REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[3])); /* buffer address for Chroma in planar mode (unused in packing mode) */ REG_UPDATE(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, MCIF_ADDR(params->chroma_address[3])); REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[3])); /* setup luma & chroma size * should be enough to contain a whole frame Luma data, * the programmed value is frame buffer size [27:8], 256-byte aligned */ REG_UPDATE(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, (params->luma_pitch>>8) * dest_height); REG_UPDATE(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, (params->chroma_pitch>>8) * dest_height); /* enable address fence */ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, 1); /* setup pitch, the programmed value is [15:8], 256B align */ REG_UPDATE_2(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, params->luma_pitch >> 8, MCIF_WB_BUF_CHROMA_PITCH, params->chroma_pitch >> 8); } static void mmhubbub32_config_mcif_arb(struct mcif_wb *mcif_wb, struct mcif_arb_params *params) { struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb); /* Programmed by the video driver based on the CRTC timing (for DWB) */ REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, params->time_per_pixel); /* Programming dwb watermark */ /* Watermark to generate urgent in MCIF_WB_CLI, value is determined by MCIF_WB_CLI_WATERMARK_MASK. */ /* Program in ns. A formula will be provided in the pseudo code to calculate the value. */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x0); /* urgent_watermarkA */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[0]); REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x1); /* urgent_watermarkB */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[1]); REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x2); /* urgent_watermarkC */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[2]); REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x3); /* urgent_watermarkD */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[3]); /* Programming nb pstate watermark */ /* nbp_state_change_watermarkA */ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x0); REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[0]); /* nbp_state_change_watermarkB */ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x1); REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[1]); /* nbp_state_change_watermarkC */ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x2); REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[2]); /* nbp_state_change_watermarkD */ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x3); REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[3]); /* dram_speed_change_duration - register removed */ //REG_UPDATE(MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, // MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, params->dram_speed_change_duration); /* max_scaled_time */ REG_UPDATE(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, params->max_scaled_time); /* slice_lines */ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, params->slice_lines-1); /* Set arbitration unit for Luma/Chroma */ /* arb_unit=2 should be chosen for more efficiency */ /* Arbitration size, 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes */ REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice); } static const struct mcif_wb_funcs dcn32_mmhubbub_funcs = { .warmup_mcif = mmhubbub32_warmup_mcif, .enable_mcif = mmhubbub2_enable_mcif, .disable_mcif = mmhubbub2_disable_mcif, .config_mcif_buf = mmhubbub32_config_mcif_buf, .config_mcif_arb = mmhubbub32_config_mcif_arb, .config_mcif_irq = mmhubbub2_config_mcif_irq, .dump_frame = mcifwb2_dump_frame, }; void dcn32_mmhubbub_construct(struct dcn30_mmhubbub *mcif_wb30, struct dc_context *ctx, const struct dcn30_mmhubbub_registers *mcif_wb_regs, const struct dcn30_mmhubbub_shift *mcif_wb_shift, const struct dcn30_mmhubbub_mask *mcif_wb_mask, int inst) { mcif_wb30->base.ctx = ctx; mcif_wb30->base.inst = inst; mcif_wb30->base.funcs = &dcn32_mmhubbub_funcs; mcif_wb30->mcif_wb_regs = mcif_wb_regs; mcif_wb30->mcif_wb_shift = mcif_wb_shift; mcif_wb30->mcif_wb_mask = mcif_wb_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
// SPDX-License-Identifier: MIT /* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "dcn32_init.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn32_resource.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" #include "dcn10/dcn10_ipp.h" #include "dcn30/dcn30_hubbub.h" #include "dcn31/dcn31_hubbub.h" #include "dcn32/dcn32_hubbub.h" #include "dcn32/dcn32_mpc.h" #include "dcn32_hubp.h" #include "irq/dcn32/irq_service_dcn32.h" #include "dcn32/dcn32_dpp.h" #include "dcn32/dcn32_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn30/dcn30_opp.h" #include "dcn20/dcn20_dsc.h" #include "dcn30/dcn30_vpg.h" #include "dcn30/dcn30_afmt.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn32/dcn32_dio_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" #include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32/dcn32_dio_link_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" #include "virtual/virtual_stream_encoder.h" #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" #include "link.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" #include "dcn32/dcn32_mmhubbub.h" #include "dcn/dcn_3_2_0_offset.h" #include "dcn/dcn_3_2_0_sh_mask.h" #include "nbio/nbio_4_3_0_offset.h" #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" #include "dml/dcn32/dcn32_fpu.h" #define DC_LOGGER_INIT(logger) enum dcn32_clk_src_array_id { DCN32_CLK_SRC_PLL0, DCN32_CLK_SRC_PLL1, DCN32_CLK_SRC_PLL2, DCN32_CLK_SRC_PLL3, DCN32_CLK_SRC_PLL4, DCN32_CLK_SRC_TOTAL }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] #define BASE(seg) BASE_INNER(seg) #define SR(reg_name)\ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SR_ARR(reg_name, id) \ REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name #define SR_ARR_INIT(reg_name, id, value) \ REG_STRUCT[id].reg_name = value #define SRI(reg_name, block, id)\ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRI_ARR(reg_name, block, id)\ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SR_ARR_I2C(reg_name, id) \ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name #define SRI_ARR_I2C(reg_name, block, id)\ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRI_ARR_ALPHABET(reg_name, block, index, id)\ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRI2(reg_name, block, id)\ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SRI2_ARR(reg_name, block, id)\ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SRIR(var_name, reg_name, block, id)\ .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII_ARR_2(reg_name, block, id, inst)\ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII_MPC_RMU(reg_name, block, id)\ .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII_DWB(reg_name, temp_name, block, id)\ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## temp_name #define SF_DWB2(reg_name, block, id, field_name, post_fix) \ .field_name = reg_name ## __ ## field_name ## post_fix #define DCCG_SRII(reg_name, block, id)\ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define VUPDATE_SRII(reg_name, block, id)\ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ reg ## reg_name ## _ ## block ## id /* NBIO */ #define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ regBIF_BX0_ ## reg_name #define NBIO_SR_ARR(reg_name, id)\ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ regBIF_BX0_ ## reg_name #undef CTX #define CTX ctx #define REG(reg_name) \ (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) static struct bios_registers bios_regs; #define bios_regs_init() \ ( \ NBIO_SR(BIOS_SCRATCH_3),\ NBIO_SR(BIOS_SCRATCH_6)\ ) #define clk_src_regs_init(index, pllid)\ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) static struct dce110_clk_src_regs clk_src_regs[5]; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK) }; #define abm_regs_init(id)\ ABM_DCN32_REG_LIST_RI(id) static struct dce_abm_registers abm_regs[4]; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN32(_MASK) }; #define audio_regs_init(id)\ AUD_COMMON_REG_LIST_RI(id) static struct dce_audio_registers audio_regs[5]; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define vpg_regs_init(id)\ VPG_DCN3_REG_LIST_RI(id) static struct dcn30_vpg_registers vpg_regs[10]; static const struct dcn30_vpg_shift vpg_shift = { DCN3_VPG_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_vpg_mask vpg_mask = { DCN3_VPG_MASK_SH_LIST(_MASK) }; #define afmt_regs_init(id)\ AFMT_DCN3_REG_LIST_RI(id) static struct dcn30_afmt_registers afmt_regs[6]; static const struct dcn30_afmt_shift afmt_shift = { DCN3_AFMT_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_afmt_mask afmt_mask = { DCN3_AFMT_MASK_SH_LIST(_MASK) }; #define apg_regs_init(id)\ APG_DCN31_REG_LIST_RI(id) static struct dcn31_apg_registers apg_regs[4]; static const struct dcn31_apg_shift apg_shift = { DCN31_APG_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_apg_mask apg_mask = { DCN31_APG_MASK_SH_LIST(_MASK) }; #define stream_enc_regs_init(id)\ SE_DCN32_REG_LIST_RI(id) static struct dcn10_stream_enc_registers stream_enc_regs[5]; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN32(_MASK) }; #define aux_regs_init(id)\ DCN2_AUX_REG_LIST_RI(id) static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; #define hpd_regs_init(id)\ HPD_REG_LIST_RI(id) static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; #define link_regs_init(id, phyid)\ ( \ LE_DCN31_REG_LIST_RI(id), \ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ ) /*DPCS_DCN31_REG_LIST(id),*/ \ static struct dcn10_link_enc_registers link_enc_regs[5]; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ //DPCS_DCN31_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ //DPCS_DCN31_MASK_SH_LIST(_MASK) }; #define hpo_dp_stream_encoder_reg_init(id)\ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) }; #define hpo_dp_link_encoder_reg_init(id)\ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) /*DCN3_1_RDPCSTX_REG_LIST(0),*/ /*DCN3_1_RDPCSTX_REG_LIST(1),*/ /*DCN3_1_RDPCSTX_REG_LIST(2),*/ /*DCN3_1_RDPCSTX_REG_LIST(3),*/ static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) }; #define dpp_regs_init(id)\ DPP_REG_LIST_DCN30_COMMON_RI(id) static struct dcn3_dpp_registers dpp_regs[4]; static const struct dcn3_dpp_shift tf_shift = { DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT) }; static const struct dcn3_dpp_mask tf_mask = { DPP_REG_LIST_SH_MASK_DCN30_COMMON(_MASK) }; #define opp_regs_init(id)\ OPP_REG_LIST_DCN30_RI(id) static struct dcn20_opp_registers opp_regs[4]; static const struct dcn20_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn20_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN20(_MASK) }; #define aux_engine_regs_init(id)\ ( \ AUX_COMMON_REG_LIST0_RI(id), \ SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\ ) static struct dce110_aux_registers aux_engine_regs[5]; static const struct dce110_aux_registers_shift aux_shift = { DCN_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCN_AUX_MASK_SH_LIST(_MASK) }; #define dwbc_regs_dcn3_init(id)\ DWBC_COMMON_REG_LIST_DCN30_RI(id) static struct dcn30_dwbc_registers dwbc30_regs[1]; static const struct dcn30_dwbc_shift dwbc30_shift = { DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_dwbc_mask dwbc30_mask = { DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define mcif_wb_regs_dcn3_init(id)\ MCIF_WB_COMMON_REG_LIST_DCN32_RI(id) static struct dcn30_mmhubbub_registers mcif_wb30_regs[1]; static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK) }; #define dsc_regsDCN20_init(id)\ DSC_REG_LIST_DCN20_RI(id) static struct dcn20_dsc_registers dsc_regs[4]; static const struct dcn20_dsc_shift dsc_shift = { DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; static struct dcn30_mpc_registers mpc_regs; #define dcn_mpc_regs_init() \ MPC_REG_LIST_DCN3_2_RI(0),\ MPC_REG_LIST_DCN3_2_RI(1),\ MPC_REG_LIST_DCN3_2_RI(2),\ MPC_REG_LIST_DCN3_2_RI(3),\ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) static const struct dcn30_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn30_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) }; #define optc_regs_init(id)\ OPTC_COMMON_REG_LIST_DCN3_2_RI(id) static struct dcn_optc_registers optc_regs[4]; static const struct dcn_optc_shift optc_shift = { OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) }; static const struct dcn_optc_mask optc_mask = { OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK) }; #define hubp_regs_init(id)\ HUBP_REG_LIST_DCN32_RI(id) static struct dcn_hubp2_registers hubp_regs[4]; static const struct dcn_hubp2_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn_hubp2_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN32(_MASK) }; static struct dcn_hubbub_registers hubbub_reg; #define hubbub_reg_init()\ HUBBUB_REG_LIST_DCN32_RI(0) static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN32(_MASK) }; static struct dccg_registers dccg_regs; #define dccg_regs_init()\ DCCG_REG_LIST_DCN32_RI() static const struct dccg_shift dccg_shift = { DCCG_MASK_SH_LIST_DCN32(__SHIFT) }; static const struct dccg_mask dccg_mask = { DCCG_MASK_SH_LIST_DCN32(_MASK) }; #define SRII2(reg_name_pre, reg_name_post, id)\ .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ ## id ## _ ## reg_name_post ## _BASE_IDX) + \ reg ## reg_name_pre ## id ## _ ## reg_name_post #define HWSEQ_DCN32_REG_LIST()\ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ SR(DIO_MEM_PWR_CTRL), \ SR(ODM_MEM_PWR_CTRL3), \ SR(MMHUBBUB_MEM_PWR_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL2), \ SR(DCFCLK_CNTL),\ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ SRII(PIXEL_RATE_CNTL, OTG, 0), \ SRII(PIXEL_RATE_CNTL, OTG, 1),\ SRII(PIXEL_RATE_CNTL, OTG, 2),\ SRII(PIXEL_RATE_CNTL, OTG, 3),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ SR(MICROSECOND_TIME_BASE_DIV), \ SR(MILLISECOND_TIME_BASE_DIV), \ SR(DISPCLK_FREQ_CHANGE_CNTL), \ SR(RBBMIF_TIMEOUT_DIS), \ SR(RBBMIF_TIMEOUT_DIS_2), \ SR(DCHUBBUB_CRC_CTRL), \ SR(DPP_TOP0_DPP_CRC_CTRL), \ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ SR(MPC_CRC_CTRL), \ SR(MPC_CRC_RESULT_GB), \ SR(MPC_CRC_RESULT_C), \ SR(MPC_CRC_RESULT_AR), \ SR(DOMAIN0_PG_CONFIG), \ SR(DOMAIN1_PG_CONFIG), \ SR(DOMAIN2_PG_CONFIG), \ SR(DOMAIN3_PG_CONFIG), \ SR(DOMAIN16_PG_CONFIG), \ SR(DOMAIN17_PG_CONFIG), \ SR(DOMAIN18_PG_CONFIG), \ SR(DOMAIN19_PG_CONFIG), \ SR(DOMAIN0_PG_STATUS), \ SR(DOMAIN1_PG_STATUS), \ SR(DOMAIN2_PG_STATUS), \ SR(DOMAIN3_PG_STATUS), \ SR(DOMAIN16_PG_STATUS), \ SR(DOMAIN17_PG_STATUS), \ SR(DOMAIN18_PG_STATUS), \ SR(DOMAIN19_PG_STATUS), \ SR(D1VGA_CONTROL), \ SR(D2VGA_CONTROL), \ SR(D3VGA_CONTROL), \ SR(D4VGA_CONTROL), \ SR(D5VGA_CONTROL), \ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ SR(AZALIA_AUDIO_DTO), \ SR(AZALIA_CONTROLLER_CLOCK_GATING) static struct dce_hwseq_registers hwseq_reg; #define hwseq_reg_init()\ HWSEQ_DCN32_REG_LIST() #define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN32_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN32_MASK_SH_LIST(_MASK) }; #define vmid_regs_init(id)\ DCN20_VMID_REG_LIST_RI(id) static struct dcn_vmid_registers vmid_regs[16]; static const struct dcn20_vmid_shift vmid_shifts = { DCN20_VMID_MASK_SH_LIST(__SHIFT) }; static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; static const struct resource_caps res_cap_dcn32 = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, .num_audio = 5, .num_stream_encoder = 5, .num_hpo_dp_stream_encoder = 4, .num_hpo_dp_link_encoder = 2, .num_pll = 5, .num_dwb = 1, .num_ddc = 5, .num_vmid = 16, .num_mpc_3dlut = 4, .num_dsc = 4, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true, .p010 = true, .ayuv = false, }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 16000 }, // 6:1 downscaling ratio: 1000/6 = 166.666 .max_downscale_factor = { .argb8888 = 167, .nv12 = 167, .fp16 = 167 }, 64, 64 }; static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, // Due to CRB, no need to MPC split anymore .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, .max_downscale_src_width = 7680,/*upto 8K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .enable_mem_low_power = { .bits = { .vga = false, .i2c = false, .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled .dscl = false, .cm = false, .mpc = false, .optc = true, } }, .use_max_lb = true, .force_disable_subvp = false, .exit_idle_opt_for_cursor_updates = true, .enable_single_display_2to1_odm_policy = true, /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/ .enable_double_buffered_dsc_pg_support = true, .enable_dp_dig_pixel_rate_div_policy = 1, .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" .alloc_extra_way_for_cursor = true, .min_prefetch_in_strobe_ns = 60000, // 60us .disable_unbounded_requesting = false, .override_dispclk_programming = true, .disable_fpo_optimizations = false, .fpo_vactive_margin_us = 2000, // 2000us .disable_fpo_vactive = false, .disable_boot_optimizations = false, .disable_subvp_high_refresh = false, .disable_dp_plus_plus_wa = true, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, .enable_legacy_fast_update = false, }; static struct dce_aux *dcn32_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; #undef REG_STRUCT #define REG_STRUCT aux_engine_regs aux_engine_regs_init(0), aux_engine_regs_init(1), aux_engine_regs_init(2), aux_engine_regs_init(3), aux_engine_regs_init(4); dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs_init(id)\ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) static struct dce_i2c_registers i2c_hw_regs[5]; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static struct dce_i2c_hw *dcn32_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; #undef REG_STRUCT #define REG_STRUCT i2c_hw_regs i2c_inst_regs_init(1), i2c_inst_regs_init(2), i2c_inst_regs_init(3), i2c_inst_regs_init(4), i2c_inst_regs_init(5); dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct clock_source *dcn32_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn31_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static struct hubbub *dcn32_hubbub_create(struct dc_context *ctx) { int i; struct dcn20_hubbub *hubbub2 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub2) return NULL; #undef REG_STRUCT #define REG_STRUCT hubbub_reg hubbub_reg_init(); #undef REG_STRUCT #define REG_STRUCT vmid_regs vmid_regs_init(0), vmid_regs_init(1), vmid_regs_init(2), vmid_regs_init(3), vmid_regs_init(4), vmid_regs_init(5), vmid_regs_init(6), vmid_regs_init(7), vmid_regs_init(8), vmid_regs_init(9), vmid_regs_init(10), vmid_regs_init(11), vmid_regs_init(12), vmid_regs_init(13), vmid_regs_init(14), vmid_regs_init(15); hubbub32_construct(hubbub2, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask, ctx->dc->dml.ip.det_buffer_size_kbytes, ctx->dc->dml.ip.pixel_chunk_size_kbytes, ctx->dc->dml.ip.config_return_buffer_size_in_kbytes); for (i = 0; i < res_cap_dcn32.num_vmid; i++) { struct dcn20_vmid *vmid = &hubbub2->vmid[i]; vmid->ctx = ctx; vmid->regs = &vmid_regs[i]; vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } return &hubbub2->base; } static struct hubp *dcn32_hubp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; #undef REG_STRUCT #define REG_STRUCT hubp_regs hubp_regs_init(0), hubp_regs_init(1), hubp_regs_init(2), hubp_regs_init(3); if (hubp32_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) return &hubp2->base; BREAK_TO_DEBUGGER(); kfree(hubp2); return NULL; } static void dcn32_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN30_DPP(*dpp)); *dpp = NULL; } static struct dpp *dcn32_dpp_create( struct dc_context *ctx, uint32_t inst) { struct dcn3_dpp *dpp3 = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); if (!dpp3) return NULL; #undef REG_STRUCT #define REG_STRUCT dpp_regs dpp_regs_init(0), dpp_regs_init(1), dpp_regs_init(2), dpp_regs_init(3); if (dpp32_construct(dpp3, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) return &dpp3->base; BREAK_TO_DEBUGGER(); kfree(dpp3); return NULL; } static struct mpc *dcn32_mpc_create( struct dc_context *ctx, int num_mpcc, int num_rmu) { struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); if (!mpc30) return NULL; #undef REG_STRUCT #define REG_STRUCT mpc_regs dcn_mpc_regs_init(); dcn32_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); return &mpc30->base; } static struct output_pixel_processor *dcn32_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp2 = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp2) { BREAK_TO_DEBUGGER(); return NULL; } #undef REG_STRUCT #define REG_STRUCT opp_regs opp_regs_init(0), opp_regs_init(1), opp_regs_init(2), opp_regs_init(3); dcn20_opp_construct(opp2, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp2->base; } static struct timing_generator *dcn32_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; #undef REG_STRUCT #define REG_STRUCT optc_regs optc_regs_init(0), optc_regs_init(1), optc_regs_init(2), optc_regs_init(3); tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &optc_regs[instance]; tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; dcn32_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; static struct link_encoder *dcn32_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; #undef REG_STRUCT #define REG_STRUCT link_enc_aux_regs aux_regs_init(0), aux_regs_init(1), aux_regs_init(2), aux_regs_init(3), aux_regs_init(4); #undef REG_STRUCT #define REG_STRUCT link_enc_hpd_regs hpd_regs_init(0), hpd_regs_init(1), hpd_regs_init(2), hpd_regs_init(3), hpd_regs_init(4); #undef REG_STRUCT #define REG_STRUCT link_enc_regs link_regs_init(0, A), link_regs_init(1, B), link_regs_init(2, C), link_regs_init(3, D), link_regs_init(4, E); dcn32_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc20->enc10.base; } struct panel_cntl *dcn32_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dcn31_panel_cntl *panel_cntl = kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dcn31_panel_cntl_construct(panel_cntl, init_data); return &panel_cntl->base; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS, FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *dcn32_create_audio( struct dc_context *ctx, unsigned int inst) { #undef REG_STRUCT #define REG_STRUCT audio_regs audio_regs_init(0), audio_regs_init(1), audio_regs_init(2), audio_regs_init(3), audio_regs_init(4); return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct vpg *dcn32_vpg_create( struct dc_context *ctx, uint32_t inst) { struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); if (!vpg3) return NULL; #undef REG_STRUCT #define REG_STRUCT vpg_regs vpg_regs_init(0), vpg_regs_init(1), vpg_regs_init(2), vpg_regs_init(3), vpg_regs_init(4), vpg_regs_init(5), vpg_regs_init(6), vpg_regs_init(7), vpg_regs_init(8), vpg_regs_init(9); vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); return &vpg3->base; } static struct afmt *dcn32_afmt_create( struct dc_context *ctx, uint32_t inst) { struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); if (!afmt3) return NULL; #undef REG_STRUCT #define REG_STRUCT afmt_regs afmt_regs_init(0), afmt_regs_init(1), afmt_regs_init(2), afmt_regs_init(3), afmt_regs_init(4), afmt_regs_init(5); afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); return &afmt3->base; } static struct apg *dcn31_apg_create( struct dc_context *ctx, uint32_t inst) { struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); if (!apg31) return NULL; #undef REG_STRUCT #define REG_STRUCT apg_regs apg_regs_init(0), apg_regs_init(1), apg_regs_init(2), apg_regs_init(3); apg31_construct(apg31, ctx, inst, &apg_regs[inst], &apg_shift, &apg_mask); return &apg31->base; } static struct stream_encoder *dcn32_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; struct afmt *afmt; int vpg_inst; int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ if (eng_id <= ENGINE_ID_DIGF) { vpg_inst = eng_id; afmt_inst = eng_id; } else return NULL; enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn32_vpg_create(ctx, vpg_inst); afmt = dcn32_afmt_create(ctx, afmt_inst); if (!enc1 || !vpg || !afmt) { kfree(enc1); kfree(vpg); kfree(afmt); return NULL; } #undef REG_STRUCT #define REG_STRUCT stream_enc_regs stream_enc_regs_init(0), stream_enc_regs_init(1), stream_enc_regs_init(2), stream_enc_regs_init(3), stream_enc_regs_init(4); dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static struct hpo_dp_stream_encoder *dcn32_hpo_dp_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; struct vpg *vpg; struct apg *apg; uint32_t hpo_dp_inst; uint32_t vpg_inst; uint32_t apg_inst; ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; /* Mapping of VPG register blocks to HPO DP block instance: * VPG[6] -> HPO_DP[0] * VPG[7] -> HPO_DP[1] * VPG[8] -> HPO_DP[2] * VPG[9] -> HPO_DP[3] */ vpg_inst = hpo_dp_inst + 6; /* Mapping of APG register blocks to HPO DP block instance: * APG[0] -> HPO_DP[0] * APG[1] -> HPO_DP[1] * APG[2] -> HPO_DP[2] * APG[3] -> HPO_DP[3] */ apg_inst = hpo_dp_inst; /* allocate HPO stream encoder and create VPG sub-block */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); vpg = dcn32_vpg_create(ctx, vpg_inst); apg = dcn31_apg_create(ctx, apg_inst); if (!hpo_dp_enc31 || !vpg || !apg) { kfree(hpo_dp_enc31); kfree(vpg); kfree(apg); return NULL; } #undef REG_STRUCT #define REG_STRUCT hpo_dp_stream_enc_regs hpo_dp_stream_encoder_reg_init(0), hpo_dp_stream_encoder_reg_init(1), hpo_dp_stream_encoder_reg_init(2), hpo_dp_stream_encoder_reg_init(3); dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, hpo_dp_inst, eng_id, vpg, apg, &hpo_dp_stream_enc_regs[hpo_dp_inst], &hpo_dp_se_shift, &hpo_dp_se_mask); return &hpo_dp_enc31->base; } static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create( uint8_t inst, struct dc_context *ctx) { struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); #undef REG_STRUCT #define REG_STRUCT hpo_dp_link_enc_regs hpo_dp_link_encoder_reg_init(0), hpo_dp_link_encoder_reg_init(1); hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], &hpo_dp_le_shift, &hpo_dp_le_mask); return &hpo_dp_enc31->base; } static struct dce_hwseq *dcn32_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); #undef REG_STRUCT #define REG_STRUCT hwseq_reg hwseq_reg_init(); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn32_create_audio, .create_stream_encoder = dcn32_stream_encoder_create, .create_hpo_dp_stream_encoder = dcn32_hpo_dp_stream_encoder_create, .create_hpo_dp_link_encoder = dcn32_hpo_dp_link_encoder_create, .create_hwseq = dcn32_hwseq_create, }; static void dcn32_resource_destruct(struct dcn32_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { if (pool->base.stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); pool->base.stream_enc[i]->vpg = NULL; } if (pool->base.stream_enc[i]->afmt != NULL) { kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); pool->base.stream_enc[i]->afmt = NULL; } kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { if (pool->base.hpo_dp_stream_enc[i] != NULL) { if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); pool->base.hpo_dp_stream_enc[i]->vpg = NULL; } if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); pool->base.hpo_dp_stream_enc[i]->apg = NULL; } kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); pool->base.hpo_dp_stream_enc[i] = NULL; } } for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { if (pool->base.hpo_dp_link_enc[i] != NULL) { kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); pool->base.hpo_dp_link_enc[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(TO_DCN20_HUBBUB(pool->base.hubbub)); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.dpps[i] != NULL) dcn32_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN20_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_opp; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dwb; i++) { if (pool->base.dwbc[i] != NULL) { kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); pool->base.dwbc[i] = NULL; } if (pool->base.mcif_wb[i] != NULL) { kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); pool->base.mcif_wb[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn20_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { if (pool->base.mpc_lut[i] != NULL) { dc_3dlut_func_release(pool->base.mpc_lut[i]); pool->base.mpc_lut[i] = NULL; } if (pool->base.mpc_shaper[i] != NULL) { dc_transfer_func_release(pool->base.mpc_shaper[i]); pool->base.mpc_shaper[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn20_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.multiple_abms[i] != NULL) dce_abm_destroy(&pool->base.multiple_abms[i]); } if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.oem_device != NULL) { struct dc *dc = pool->base.oem_device->ctx->dc; dc->link_srv->destroy_ddc_service(&pool->base.oem_device); } } static bool dcn32_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t dwb_count = pool->res_cap->num_dwb; for (i = 0; i < dwb_count; i++) { struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); if (!dwbc30) { dm_error("DC: failed to create dwbc30!\n"); return false; } #undef REG_STRUCT #define REG_STRUCT dwbc30_regs dwbc_regs_dcn3_init(0); dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); pool->dwbc[i] = &dwbc30->base; } return true; } static bool dcn32_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t dwb_count = pool->res_cap->num_dwb; for (i = 0; i < dwb_count; i++) { struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); if (!mcif_wb30) { dm_error("DC: failed to create mcif_wb30!\n"); return false; } #undef REG_STRUCT #define REG_STRUCT mcif_wb30_regs mcif_wb_regs_dcn3_init(0); dcn32_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); pool->mcif_wb[i] = &mcif_wb30->base; } return true; } static struct display_stream_compressor *dcn32_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); return NULL; } #undef REG_STRUCT #define REG_STRUCT dsc_regs dsc_regsDCN20_init(0), dsc_regsDCN20_init(1), dsc_regsDCN20_init(2), dsc_regsDCN20_init(3); dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); dsc->max_image_width = 6016; return &dsc->base; } static void dcn32_destroy_resource_pool(struct resource_pool **pool) { struct dcn32_resource_pool *dcn32_pool = TO_DCN32_RES_POOL(*pool); dcn32_resource_destruct(dcn32_pool); kfree(dcn32_pool); *pool = NULL; } bool dcn32_acquire_post_bldn_3dlut( struct resource_context *res_ctx, const struct resource_pool *pool, int mpcc_id, struct dc_3dlut **lut, struct dc_transfer_func **shaper) { bool ret = false; ASSERT(*lut == NULL && *shaper == NULL); *lut = NULL; *shaper = NULL; if (!res_ctx->is_mpc_3dlut_acquired[mpcc_id]) { *lut = pool->mpc_lut[mpcc_id]; *shaper = pool->mpc_shaper[mpcc_id]; res_ctx->is_mpc_3dlut_acquired[mpcc_id] = true; ret = true; } return ret; } bool dcn32_release_post_bldn_3dlut( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_3dlut **lut, struct dc_transfer_func **shaper) { int i; bool ret = false; for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) { res_ctx->is_mpc_3dlut_acquired[i] = false; pool->mpc_lut[i]->state.raw = 0; *lut = NULL; *shaper = NULL; ret = true; break; } } return ret; } static void dcn32_enable_phantom_plane(struct dc *dc, struct dc_state *context, struct dc_stream_state *phantom_stream, unsigned int dc_pipe_idx) { struct dc_plane_state *phantom_plane = NULL; struct dc_plane_state *prev_phantom_plane = NULL; struct pipe_ctx *curr_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; while (curr_pipe) { if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) phantom_plane = prev_phantom_plane; else phantom_plane = dc_create_plane_state(dc); memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, sizeof(phantom_plane->scaling_quality)); memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect)); memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect)); memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect)); memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size, sizeof(phantom_plane->plane_size)); memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info, sizeof(phantom_plane->tiling_info)); memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc)); phantom_plane->format = curr_pipe->plane_state->format; phantom_plane->rotation = curr_pipe->plane_state->rotation; phantom_plane->visible = curr_pipe->plane_state->visible; /* Shadow pipe has small viewport. */ phantom_plane->clip_rect.y = 0; phantom_plane->clip_rect.height = phantom_stream->src.height; phantom_plane->is_phantom = true; dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); curr_pipe = curr_pipe->bottom_pipe; prev_phantom_plane = phantom_plane; } } static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, unsigned int pipe_cnt, unsigned int dc_pipe_idx) { struct dc_stream_state *phantom_stream = NULL; struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; phantom_stream->dpms_off = true; phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; /* stream has limited viewport and small timing */ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src)); memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); DC_FP_START(); dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx); DC_FP_END(); dc_add_stream_to_ctx(dc, context, phantom_stream); return phantom_stream; } void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context) { int i; struct dc_plane_state *phantom_plane = NULL; struct dc_stream_state *phantom_stream = NULL; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { phantom_plane = pipe->plane_state; phantom_stream = pipe->stream; dc_plane_state_retain(phantom_plane); dc_stream_retain(phantom_stream); } } } // return true if removed piped from ctx, false otherwise bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update) { int i; bool removed_pipe = false; struct dc_plane_state *phantom_plane = NULL; struct dc_stream_state *phantom_stream = NULL; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // build scaling params for phantom pipes if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { phantom_plane = pipe->plane_state; phantom_stream = pipe->stream; dc_rem_all_planes_for_stream(dc, pipe->stream, context); dc_remove_stream_from_ctx(dc, context, pipe->stream); /* Ref count is incremented on allocation and also when added to the context. * Therefore we must call release for the the phantom plane and stream once * they are removed from the ctx to finally decrement the refcount to 0 to free. */ dc_plane_state_release(phantom_plane); dc_stream_release(phantom_stream); removed_pipe = true; } /* For non-full updates, a shallow copy of the current state * is created. In this case we don't want to erase the current * state (there can be 2 HIRQL threads, one in flip, and one in * checkMPO) that can cause a race condition. * * This is just a workaround, needs a proper fix. */ if (!fast_update) { // Clear all phantom stream info if (pipe->stream) { pipe->stream->mall_stream_config.type = SUBVP_NONE; pipe->stream->mall_stream_config.paired_stream = NULL; } if (pipe->plane_state) { pipe->plane_state->is_phantom = false; } } } return removed_pipe; } /* TODO: Input to this function should indicate which pipe indexes (or streams) * require a phantom pipe / stream */ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, unsigned int pipe_cnt, unsigned int index) { struct dc_stream_state *phantom_stream = NULL; unsigned int i; // The index of the DC pipe passed into this function is guarenteed to // be a valid candidate for SubVP (i.e. has a plane, stream, doesn't // already have phantom pipe assigned, etc.) by previous checks. phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index); dcn32_enable_phantom_plane(dc, context, phantom_stream, index); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // Build scaling params for phantom pipes which were newly added. // We determine which phantom pipes were added by comparing with // the phantom stream. if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { pipe->stream->use_dynamic_meta = false; pipe->plane_state->flip_immediate = false; if (!resource_build_scaling_params(pipe)) { // Log / remove phantom pipes since failed to build scaling params } } } } bool dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { bool out = false; BW_VAL_TRACE_SETUP(); int vlevel = 0; int pipe_cnt = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); struct mall_temp_config mall_temp_config; /* To handle Freesync properly, setting FreeSync DML parameters * to its default state for the first stage of validation */ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; DC_LOGGER_INIT(dc->ctx->logger); /* For fast validation, there are situations where a shallow copy of * of the dc->current_state is created for the validation. In this case * we want to save and restore the mall config because we always * teardown subvp at the beginning of validation (and don't attempt * to add it back if it's fast validation). If we don't restore the * subvp config in cases of fast validation + shallow copy of the * dc->current_state, the dc->current_state will have a partially * removed subvp state when we did not intend to remove it. */ if (fast_validate) { memset(&mall_temp_config, 0, sizeof(mall_temp_config)); dcn32_save_mall_state(dc, context, &mall_temp_config); } BW_VAL_TRACE_COUNT(); DC_FP_START(); out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); DC_FP_END(); if (fast_validate) dcn32_restore_mall_state(dc, context, &mall_temp_config); if (pipe_cnt == 0) goto validate_out; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); if (fast_validate) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); dcn32_override_min_req_memclk(dc, context); BW_VAL_TRACE_END_WATERMARKS(); goto validate_out; validate_fail: DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); BW_VAL_TRACE_SKIP(fail); out = false; validate_out: kfree(pipes); BW_VAL_TRACE_FINISH(); return out; } int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe = NULL; bool subvp_in_use = false; struct dc_crtc_timing *timing; bool vsr_odm_support = false; dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); /* Determine whether we will apply ODM 2to1 policy: * Applies to single display and where the number of planes is less than 3. * For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes. * * Apply pipe split policy first so we can predict the pipe split correctly * (dcn32_predict_pipe_split). */ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) continue; pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 && res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width); if (context->stream_count == 1 && context->stream_status[0].plane_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) && is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) && pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ && dc->debug.enable_single_display_2to1_odm_policy && !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; } pipe_cnt++; } for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) continue; pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; pipes[pipe_cnt].pipe.src.gpuvm = true; DC_FP_START(); dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); DC_FP_END(); pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; /* Only populate DML input with subvp info for full updates. * This is just a workaround -- needs a proper fix. */ if (!fast_validate) { switch (pipe->stream->mall_stream_config.type) { case SUBVP_MAIN: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; subvp_in_use = true; break; case SUBVP_PHANTOM: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_phantom_pipe; pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; // Disallow unbounded req for SubVP according to DCHUB programming guide pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; break; case SUBVP_NONE: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_disable; pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; break; default: break; } } pipes[pipe_cnt].dout.dsc_input_bpc = 0; if (pipes[pipe_cnt].dout.dsc_enable) { switch (timing->display_color_depth) { case COLOR_DEPTH_888: pipes[pipe_cnt].dout.dsc_input_bpc = 8; break; case COLOR_DEPTH_101010: pipes[pipe_cnt].dout.dsc_input_bpc = 10; break; case COLOR_DEPTH_121212: pipes[pipe_cnt].dout.dsc_input_bpc = 12; break; default: ASSERT(0); break; } } DC_FP_START(); dcn32_predict_pipe_split(context, &pipes[pipe_cnt]); DC_FP_END(); pipe_cnt++; } /* For DET allocation, we don't want to use DML policy (not optimal for utilizing all * the DET available for each pipe). Use the DET override input to maintain our driver * policy. */ dcn32_set_det_allocations(dc, context, pipes); // In general cases we want to keep the dram clock change requirement // (prefer configs that support MCLK switch). Only override to false // for SubVP if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use) context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false; else context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; return pipe_cnt; } static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel) { DC_FP_START(); dcn32_calculate_wm_and_dlg_fpu(dc, context, pipes, pipe_cnt, vlevel); DC_FP_END(); } static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { DC_FP_START(); dcn32_update_bw_bounding_box_fpu(dc, bw_params); DC_FP_END(); } static struct resource_funcs dcn32_res_pool_funcs = { .destroy = dcn32_destroy_resource_pool, .link_enc_create = dcn32_link_encoder_create, .link_enc_create_minimal = NULL, .panel_cntl_create = dcn32_panel_cntl_create, .validate_bandwidth = dcn32_validate_bandwidth, .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, .set_mcif_arb_params = dcn30_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut, .release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut, .update_bw_bounding_box = dcn32_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, .remove_phantom_pipes = dcn32_remove_phantom_pipes, .retain_phantom_pipes = dcn32_retain_phantom_pipes, .save_mall_state = dcn32_save_mall_state, .restore_mall_state = dcn32_restore_mall_state, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = REG_READ(CC_DC_PIPE_DIS); /* DCN32 support max 4 pipes */ value = value & 0xf; return value; } static bool dcn32_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn32_resource_pool *pool) { int i, j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; struct ddc_service_init_data ddc_init_data = {0}; uint32_t pipe_fuses = 0; uint32_t num_pipes = 4; #undef REG_STRUCT #define REG_STRUCT bios_regs bios_regs_init(); #undef REG_STRUCT #define REG_STRUCT clk_src_regs clk_src_regs_init(0, A), clk_src_regs_init(1, B), clk_src_regs_init(2, C), clk_src_regs_init(3, D), clk_src_regs_init(4, E); #undef REG_STRUCT #define REG_STRUCT abm_regs abm_regs_init(0), abm_regs_init(1), abm_regs_init(2), abm_regs_init(3); #undef REG_STRUCT #define REG_STRUCT dccg_regs dccg_regs_init(); DC_FP_START(); ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_dcn32; /* max number of pipes for ASIC before checking for pipe fuses */ num_pipes = pool->base.res_cap->num_timing_generator; pipe_fuses = read_pipe_fuses(ctx); for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) if (pipe_fuses & 1 << i) num_pipes--; if (pipe_fuses & 1) ASSERT(0); //Unexpected - Pipe 0 should always be fully functional! if (pipe_fuses & CC_DC_PIPE_DIS__DC_FULL_DIS_MASK) ASSERT(0); //Entire DCN is harvested! /* within dml lib, initial value is hard coded, if ASIC pipe is fused, the * value will be changed, update max_num_dpp and max_num_otg for dml. */ dcn3_2_ip.max_num_dpp = num_pipes; dcn3_2_ip.max_num_otg = num_pipes; pool->base.funcs = &dcn32_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.timing_generator_count = num_pipes; pool->base.pipe_count = num_pipes; pool->base.mpcc_count = num_pipes; dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ dc->caps.max_cursor_size = 64; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; dc->caps.mall_size_total = 0; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; /* Calculate the available MALL space */ dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( dc, dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel * 1024 * 1024; dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; dc->caps.subvp_fw_processing_delay_us = 15; dc->caps.subvp_drr_max_vblank_margin_us = 40; dc->caps.subvp_prefetch_end_to_mall_start_us = 15; dc->caps.subvp_swath_height_margin_lines = 16; dc->caps.subvp_pstate_allow_width_us = 20; dc->caps.subvp_vertical_int_margin_us = 30; dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin dc->caps.max_slave_planes = 2; dc->caps.max_slave_yuv_planes = 2; dc->caps.max_slave_rgb_planes = 2; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; if (dc->config.forceHBR2CP2520) dc->caps.force_dp_tps4_for_cp2520 = false; dc->caps.dp_hpo = true; dc->caps.dp_hdmi21_pcon_support = true; dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.seamless_odm = true; dc->caps.max_v_total = (1 << 15) - 1; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.input_lut_shared = 0; dc->caps.color.dpp.icsc = 1; dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr dc->caps.color.dpp.dgam_rom_caps.srgb = 1; dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; dc->caps.color.dpp.dgam_rom_caps.pq = 1; dc->caps.color.dpp.dgam_rom_caps.hlg = 1; dc->caps.color.dpp.post_csc = 1; dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; dc->caps.color.dpp.hw_3d_lut = 1; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.dpp.ogam_rom_caps.pq = 0; dc->caps.color.dpp.ogam_rom_caps.hlg = 0; dc->caps.color.dpp.ocsc = 0; dc->caps.color.mpc.gamut_remap = 1; dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //4, configurable to be before or after BLND in MPCC dc->caps.color.mpc.ogam_ram = 1; dc->caps.color.mpc.ogam_rom_caps.srgb = 0; dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; uint8_t is_vbios_lttpr_enable = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; } /* interop bit is implicit */ { dc->caps.vbios_lttpr_aware = true; } } if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); /************************************************* * Create resources * *************************************************/ /* Clock Sources for Pixel Clock*/ pool->base.clock_sources[DCN32_CLK_SRC_PLL0] = dcn32_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN32_CLK_SRC_PLL1] = dcn32_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCN32_CLK_SRC_PLL2] = dcn32_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCN32_CLK_SRC_PLL3] = dcn32_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCN32_CLK_SRC_PLL4] = dcn32_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clk_src_count = DCN32_CLK_SRC_TOTAL; /* todo: not reuse phy_pll registers */ pool->base.dp_clock_source = dcn32_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } } /* DCCG */ pool->base.dccg = dccg32_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); if (pool->base.dccg == NULL) { dm_error("DC: failed to create dccg!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* DML */ dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); /* IRQ Service */ init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn32_create(&init_data); if (!pool->base.irqs) goto create_fail; /* HUBBUB */ pool->base.hubbub = dcn32_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto create_fail; } /* HUBPs, DPPs, OPPs, TGs, ABMs */ for (i = 0, j = 0; i < pool->base.res_cap->num_timing_generator; i++) { /* if pipe is disabled, skip instance of HW pipe, * i.e, skip ASIC register instance */ if (pipe_fuses & 1 << i) continue; /* HUBPs */ pool->base.hubps[j] = dcn32_hubp_create(ctx, i); if (pool->base.hubps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create hubps!\n"); goto create_fail; } /* DPPs */ pool->base.dpps[j] = dcn32_dpp_create(ctx, i); if (pool->base.dpps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpps!\n"); goto create_fail; } /* OPPs */ pool->base.opps[j] = dcn32_opp_create(ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto create_fail; } /* TGs */ pool->base.timing_generators[j] = dcn32_timing_generator_create( ctx, i); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; } /* ABMs */ pool->base.multiple_abms[j] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); if (pool->base.multiple_abms[j] == NULL) { dm_error("DC: failed to create abm for pipe %d!\n", i); BREAK_TO_DEBUGGER(); goto create_fail; } /* index for resource pool arrays for next valid pipe */ j++; } /* PSR */ pool->base.psr = dmub_psr_create(ctx); if (pool->base.psr == NULL) { dm_error("DC: failed to create psr obj!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* MPCCs */ pool->base.mpc = dcn32_mpc_create(ctx, pool->base.res_cap->num_timing_generator, pool->base.res_cap->num_mpc_3dlut); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto create_fail; } /* DSCs */ for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn32_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create display stream compressor %d!\n", i); goto create_fail; } } /* DWB */ if (!dcn32_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dwbc!\n"); goto create_fail; } /* MMHUBBUB */ if (!dcn32_mmhubbub_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mcif_wb!\n"); goto create_fail; } /* AUX and I2C */ for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn32_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto create_fail; } pool->base.hw_i2cs[i] = dcn32_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto create_fail; } pool->base.sw_i2cs[i] = NULL; } /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto create_fail; /* HW Sequencer init functions and Plane caps */ dcn32_hw_sequencer_init_functions(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { ddc_init_data.ctx = dc->ctx; ddc_init_data.link = NULL; ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; DC_FP_END(); return true; create_fail: DC_FP_END(); dcn32_resource_destruct(pool); return false; } struct resource_pool *dcn32_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn32_resource_pool *pool = kzalloc(sizeof(struct dcn32_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dcn32_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); kfree(pool); return NULL; } /* * Find the most optimal free pipe from res_ctx, which could be used as a * secondary dpp pipe for input opp head pipe. * * a free pipe - a pipe in input res_ctx not yet used for any streams or * planes. * secondary dpp pipe - a pipe gets inserted to a head OPP pipe's MPC blending * tree. This is typical used for rendering MPO planes or additional offset * areas in MPCC combine. * * Hardware Transition Minimization Algorithm for Finding a Secondary DPP Pipe * ------------------------------------------------------------------------- * * PROBLEM: * * 1. There is a hardware limitation that a secondary DPP pipe cannot be * transferred from one MPC blending tree to the other in a single frame. * Otherwise it could cause glitches on the screen. * * For instance, we cannot transition from state 1 to state 2 in one frame. This * is because PIPE1 is transferred from PIPE0's MPC blending tree over to * PIPE2's MPC blending tree, which is not supported by hardware. * To support this transition we need to first remove PIPE1 from PIPE0's MPC * blending tree in one frame and then insert PIPE1 to PIPE2's MPC blending tree * in the next frame. This is not optimal as it will delay the flip for two * frames. * * State 1: * PIPE0 -- secondary DPP pipe --> (PIPE1) * PIPE2 -- secondary DPP pipe --> NONE * * State 2: * PIPE0 -- secondary DPP pipe --> NONE * PIPE2 -- secondary DPP pipe --> (PIPE1) * * 2. We want to in general minimize the unnecessary changes in pipe topology. * If a pipe is already added in current blending tree and there are no changes * to plane topology, we don't want to swap it with another free pipe * unnecessarily in every update. Powering up and down a pipe would require a * full update which delays the flip for 1 frame. If we use the original pipe * we don't have to toggle its power. So we can flip faster. */ static int find_optimal_free_pipe_as_secondary_dpp_pipe( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, const struct resource_pool *pool, const struct pipe_ctx *new_opp_head) { const struct pipe_ctx *cur_opp_head; int free_pipe_idx; cur_opp_head = &cur_res_ctx->pipe_ctx[new_opp_head->pipe_idx]; free_pipe_idx = resource_find_free_pipe_used_in_cur_mpc_blending_tree( cur_res_ctx, new_res_ctx, cur_opp_head); /* Up until here if we have not found a free secondary pipe, we will * need to wait for at least one frame to complete the transition * sequence. */ if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( cur_res_ctx, new_res_ctx, pool); /* Up until here if we have not found a free secondary pipe, we will * need to wait for at least two frames to complete the transition * sequence. It really doesn't matter which pipe we decide take from * current enabled pipes. It won't save our frame time when we swap only * one pipe or more pipes. */ if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) free_pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( cur_res_ctx, new_res_ctx, pool); if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); return free_pipe_idx; } static struct pipe_ctx *find_idle_secondary_pipe_check_mpo( struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) { int i; struct pipe_ctx *secondary_pipe = NULL; struct pipe_ctx *next_odm_mpo_pipe = NULL; int primary_index, preferred_pipe_idx; struct pipe_ctx *old_primary_pipe = NULL; /* * Modified from find_idle_secondary_pipe * With windowed MPO and ODM, we want to avoid the case where we want a * free pipe for the left side but the free pipe is being used on the * right side. * Add check on current_state if the primary_pipe is the left side, * to check the right side ( primary_pipe->next_odm_pipe ) to see if * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe ) * - If so, then don't use this pipe * EXCEPTION - 3 plane ( 2 MPO plane ) case * - in this case, the primary pipe has already gotten a free pipe for the * MPO window in the left * - when it tries to get a free pipe for the MPO window on the right, * it will see that it is already assigned to the right side * ( primary_pipe->next_odm_pipe ). But in this case, we want this * free pipe, since it will be for the right side. So add an * additional condition, that skipping the free pipe on the right only * applies if the primary pipe has no bottom pipe currently assigned */ if (primary_pipe) { primary_index = primary_pipe->pipe_idx; old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index]; if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe) && (!primary_pipe->bottom_pipe)) next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe; preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx; if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) && !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; } } /* * search backwards for the second pipe to keep pipe * assignment more consistent */ if (!secondary_pipe) for (i = pool->pipe_count - 1; i >= 0; i--) { if ((res_ctx->pipe_ctx[i].stream == NULL) && !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) { secondary_pipe = &res_ctx->pipe_ctx[i]; secondary_pipe->pipe_idx = i; break; } } return secondary_pipe; } static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( struct dc_state *state, const struct resource_pool *pool, struct dc_stream_state *stream, const struct pipe_ctx *head_pipe) { struct resource_context *res_ctx = &state->res_ctx; struct pipe_ctx *idle_pipe, *pipe; struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx; int head_index; if (!head_pipe) ASSERT(0); /* * Modified from dcn20_acquire_idle_pipe_for_layer * Check if head_pipe in old_context already has bottom_pipe allocated. * - If so, check if that pipe is available in the current context. * -- If so, reuse pipe from old_context */ head_index = head_pipe->pipe_idx; pipe = &old_ctx->pipe_ctx[head_index]; if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) { idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx]; idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx; } else { idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe); if (!idle_pipe) return NULL; } idle_pipe->stream = head_pipe->stream; idle_pipe->stream_res.tg = head_pipe->stream_res.tg; idle_pipe->stream_res.opp = head_pipe->stream_res.opp; idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; return idle_pipe; } struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( const struct dc_state *cur_ctx, struct dc_state *new_ctx, const struct resource_pool *pool, const struct pipe_ctx *opp_head_pipe) { int free_pipe_idx; struct pipe_ctx *free_pipe; if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm) return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool, opp_head_pipe); if (free_pipe_idx >= 0) { free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; free_pipe->pipe_idx = free_pipe_idx; free_pipe->stream = opp_head_pipe->stream; free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg; free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp; free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx]; free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx]; free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx]; free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe->pipe_idx]->inst; } else { ASSERT(opp_head_pipe); free_pipe = NULL; } return free_pipe; } unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) { /* * DCN32 and DCN321 SKUs may have different sizes for MALL * but we may not be able to access all the MALL space. * If the num_chans is power of 2, then we can access all * of the available MALL space. Otherwise, we can only * access: * * max_cab_size_in_bytes = total_cache_size_in_bytes * * ((2^floor(log2(num_chans)))/num_chans) * * Calculating the MALL sizes for all available SKUs, we * have come up with the follow simplified check. * - we have max_chans which provides the max MALL size. * Each chans supports 4MB of MALL so: * * total_cache_size_in_bytes = max_chans * 4 MB * * - we have avail_chans which shows the number of channels * we can use if we can't access the entire MALL space. * It is generally half of max_chans * - so we use the following checks: * * if (num_chans == max_chans), return max_chans * if (num_chans < max_chans), return avail_chans * * - exception is GC_11_0_0 where we can't access max_chans, * so we define max_avail_chans as the maximum available * MALL space * */ int gc_11_0_0_max_chans = 48; int gc_11_0_0_max_avail_chans = 32; int gc_11_0_0_avail_chans = 16; int gc_11_0_3_max_chans = 16; int gc_11_0_3_avail_chans = 8; int gc_11_0_2_max_chans = 8; int gc_11_0_2_avail_chans = 4; if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) { return (num_chans == gc_11_0_0_max_chans) ? gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans; } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) { return (num_chans == gc_11_0_2_max_chans) ? gc_11_0_2_max_chans : gc_11_0_2_avail_chans; } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) { return (num_chans == gc_11_0_3_max_chans) ? gc_11_0_3_max_chans : gc_11_0_3_avail_chans; } }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ // header file of functions being implemented #include "dcn32_resource.h" #include "dcn20/dcn20_resource.h" #include "dml/dcn32/display_mode_vba_util_32.h" #include "dml/dcn32/dcn32_fpu.h" static bool is_dual_plane(enum surface_pixel_format format) { return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } uint32_t dcn32_helper_mall_bytes_to_ways( struct dc *dc, uint32_t total_size_in_mall_bytes) { uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; /* add 2 lines for worst case alignment */ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; lines_per_way = total_cache_lines / dc->caps.cache_num_ways; num_ways = cache_lines_used / lines_per_way; if (cache_lines_used % lines_per_way > 0) num_ways++; return num_ways; } uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( struct dc *dc, struct pipe_ctx *pipe_ctx, bool ignore_cursor_buf) { struct hubp *hubp = pipe_ctx->plane_res.hubp; uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; uint32_t cursor_mall_size_bytes = 0; switch (pipe_ctx->stream->cursor_attributes.color_format) { case CURSOR_MODE_MONO: cursor_size /= 2; break; case CURSOR_MODE_COLOR_1BIT_AND: case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: cursor_size *= 4; break; case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: cursor_size *= 8; break; } /* only count if cursor is enabled, and if additional allocation needed outside of the * DCN cursor buffer */ if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf || cursor_size > 16384)) { /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) * Note: add 1 mblk in case of cursor misalignment */ cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES; } return cursor_mall_size_bytes; } /** * dcn32_helper_calculate_num_ways_for_subvp(): Calculate number of ways needed for SubVP * * Gets total allocation required for the phantom viewport calculated by DML in bytes and * converts to number of cache ways. * * @dc: current dc state * @context: new dc state * * Return: number of ways required for SubVP */ uint32_t dcn32_helper_calculate_num_ways_for_subvp( struct dc *dc, struct dc_state *context) { if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) { if (dc->debug.force_subvp_num_ways) { return dc->debug.force_subvp_num_ways; } else { return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); } } else { return 0; } } void dcn32_merge_pipes_for_subvp(struct dc *dc, struct dc_state *context) { uint32_t i; /* merge pipes if necessary */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // For now merge all pipes for SubVP since pipe split case isn't supported yet /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ if (pipe->prev_odm_pipe) { /*split off odm pipe*/ pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; if (pipe->next_odm_pipe) pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; pipe->bottom_pipe = NULL; pipe->next_odm_pipe = NULL; pipe->plane_state = NULL; pipe->stream = NULL; pipe->top_pipe = NULL; pipe->prev_odm_pipe = NULL; if (pipe->stream_res.dsc) dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { struct pipe_ctx *top_pipe = pipe->top_pipe; struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; top_pipe->bottom_pipe = bottom_pipe; if (bottom_pipe) bottom_pipe->top_pipe = top_pipe; pipe->top_pipe = NULL; pipe->bottom_pipe = NULL; pipe->plane_state = NULL; pipe->stream = NULL; memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); } } } bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc, struct dc_state *context) { uint32_t i; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) continue; if (!pipe->plane_state) return false; } return true; } bool dcn32_subvp_in_use(struct dc *dc, struct dc_state *context) { uint32_t i; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) return true; } return false; } bool dcn32_mpo_in_use(struct dc_state *context) { uint32_t i; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count > 1) return true; } return false; } bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context) { uint32_t i; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) continue; if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0) return true; } return false; } bool dcn32_is_center_timing(struct pipe_ctx *pipe) { bool is_center_timing = false; if (pipe->stream) { if (pipe->stream->timing.v_addressable != pipe->stream->dst.height || pipe->stream->timing.v_addressable != pipe->stream->src.height) { is_center_timing = true; } } if (pipe->plane_state) { if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height && pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) { is_center_timing = true; } } return is_center_timing; } bool dcn32_is_psr_capable(struct pipe_ctx *pipe) { bool psr_capable = false; if (pipe->stream && pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { psr_capable = true; } return psr_capable; } /** * dcn32_determine_det_override(): Determine DET allocation for each pipe * * This function determines how much DET to allocate for each pipe. The total number of * DET segments will be split equally among each of the streams, and after that the DET * segments per stream will be split equally among the planes for the given stream. * * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the * number of DET for that given plane will be split among the pipes driving that plane. * * * High level algorithm: * 1. Split total DET among number of streams * 2. For each stream, split DET among the planes * 3. For each plane, check if there is a pipe split. If yes, split the DET allocation * among those pipes. * 4. Assign the DET override to the DML pipes. * * @dc: Current DC state * @context: New DC state to be programmed * @pipes: Array of DML pipes * * Return: void */ void dcn32_determine_det_override(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { uint32_t i, j, k; uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0}; uint8_t pipe_counted[MAX_PIPES] = {0}; uint8_t pipe_cnt = 0; struct dc_plane_state *current_plane = NULL; uint8_t stream_count = 0; for (i = 0; i < context->stream_count; i++) { /* Don't count SubVP streams for DET allocation */ if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) stream_count++; } if (stream_count > 0) { stream_segments = 18 / stream_count; for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) continue; if (context->stream_status[i].plane_count > 0) plane_segments = stream_segments / context->stream_status[i].plane_count; else plane_segments = stream_segments; for (j = 0; j < dc->res_pool->pipe_count; j++) { pipe_plane_count = 0; if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] && pipe_counted[j] != 1) { /* Note: pipe_plane_count indicates the number of pipes to be used for a * given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split), * pipe_plane_count = 2 means 2:1 split, etc. */ pipe_plane_count++; pipe_counted[j] = 1; current_plane = context->res_ctx.pipe_ctx[j].plane_state; for (k = 0; k < dc->res_pool->pipe_count; k++) { if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] && context->res_ctx.pipe_ctx[k].plane_state == current_plane) { pipe_plane_count++; pipe_counted[k] = 1; } } pipe_segments[j] = plane_segments / pipe_plane_count; for (k = 0; k < dc->res_pool->pipe_count; k++) { if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] && context->res_ctx.pipe_ctx[k].plane_state == current_plane) { pipe_segments[k] = plane_segments / pipe_plane_count; } } } } } for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE; pipe_cnt++; } } else { for (i = 0; i < dc->res_pool->pipe_count; i++) pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE } } void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting; for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) continue; pipe = &res_ctx->pipe_ctx[i]; pipe_cnt++; } /* For DET allocation, we don't want to use DML policy (not optimal for utilizing all * the DET available for each pipe). Use the DET override input to maintain our driver * policy. */ if (pipe_cnt == 1) { pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE; if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { if (!is_dual_plane(pipe->plane_state->format)) { pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE; pipes[0].pipe.src.unbounded_req_mode = true; if (pipe->plane_state->src_rect.width >= 5120 && pipe->plane_state->src_rect.height >= 2880) pipes[0].pipe.src.det_size_override = 320; // 5K or higher } } } else dcn32_determine_det_override(dc, context, pipes); } /** * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases * * This function saves the MALL (SubVP) case for fast validation cases. For fast validation, * there are situations where a shallow copy of the dc->current_state is created for the * validation. In this case we want to save and restore the mall config because we always * teardown subvp at the beginning of validation (and don't attempt to add it back if it's * fast validation). If we don't restore the subvp config in cases of fast validation + * shallow copy of the dc->current_state, the dc->current_state will have a partially * removed subvp state when we did not intend to remove it. * * NOTE: This function ONLY works if the streams are not moved to a different pipe in the * validation. We don't expect this to happen in fast_validation=1 cases. * * @dc: Current DC state * @context: New DC state to be programmed * @temp_config: struct used to cache the existing MALL state * * Return: void */ void dcn32_save_mall_state(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config) { uint32_t i; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream) temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config; if (pipe->plane_state) temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom; } } /** * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases * * Restore the MALL state based on the previously saved state from dcn32_save_mall_state * * @dc: Current DC state * @context: New DC state to be programmed, restore MALL state into here * @temp_config: struct that has the cached MALL state * * Return: void */ void dcn32_restore_mall_state(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config) { uint32_t i; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream) pipe->stream->mall_stream_config = temp_config->mall_stream_config[i]; if (pipe->plane_state) pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i]; } } #define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW) /* * Scaling factor for v_blank stretch calculations considering timing in * micro-seconds and pixel clock in 100hz. * Note: the parenthesis are necessary to ensure the correct order of * operation where V_SCALE is used. */ #define V_SCALE (10000 / MAX_STRETCHED_V_BLANK) static int get_frame_rate_at_max_stretch_100hz( struct dc_stream_state *fpo_candidate_stream, uint32_t fpo_vactive_margin_us) { struct dc_crtc_timing *timing = NULL; uint32_t sec_per_100_lines; uint32_t max_v_blank; uint32_t curr_v_blank; uint32_t v_stretch_max; uint32_t stretched_frame_pix_cnt; uint32_t scaled_stretched_frame_pix_cnt; uint32_t scaled_refresh_rate; uint32_t v_scale; if (fpo_candidate_stream == NULL) return 0; /* check if refresh rate at least 120hz */ timing = &fpo_candidate_stream->timing; if (timing == NULL) return 0; v_scale = 10000 / (MAX_STRETCHED_V_BLANK + fpo_vactive_margin_us); sec_per_100_lines = timing->pix_clk_100hz / timing->h_total + 1; max_v_blank = sec_per_100_lines / v_scale + 1; curr_v_blank = timing->v_total - timing->v_addressable; v_stretch_max = (max_v_blank > curr_v_blank) ? (max_v_blank - curr_v_blank) : (0); stretched_frame_pix_cnt = (v_stretch_max + timing->v_total) * timing->h_total; scaled_stretched_frame_pix_cnt = stretched_frame_pix_cnt / 10000; scaled_refresh_rate = (timing->pix_clk_100hz) / scaled_stretched_frame_pix_cnt + 1; return scaled_refresh_rate; } static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch( struct dc_stream_state *fpo_candidate_stream, uint32_t fpo_vactive_margin_us) { int refresh_rate_max_stretch_100hz; int min_refresh_100hz; if (fpo_candidate_stream == NULL) return false; refresh_rate_max_stretch_100hz = get_frame_rate_at_max_stretch_100hz(fpo_candidate_stream, fpo_vactive_margin_us); min_refresh_100hz = fpo_candidate_stream->timing.min_refresh_in_uhz / 10000; if (refresh_rate_max_stretch_100hz < min_refresh_100hz) return false; return true; } static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream) { int refresh_rate = 0; int h_v_total = 0; struct dc_crtc_timing *timing = NULL; if (fpo_candidate_stream == NULL) return 0; /* check if refresh rate at least 120hz */ timing = &fpo_candidate_stream->timing; if (timing == NULL) return 0; h_v_total = timing->h_total * timing->v_total; if (h_v_total == 0) return 0; refresh_rate = ((timing->pix_clk_100hz * 100) / (h_v_total)) + 1; return refresh_rate; } /** * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch() - Determines if config can * support FPO * * @dc: current dc state * @context: new dc state * * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL */ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context) { int refresh_rate = 0; const int minimum_refreshrate_supported = 120; struct dc_stream_state *fpo_candidate_stream = NULL; bool is_fpo_vactive = false; uint32_t fpo_vactive_margin_us = 0; if (context == NULL) return NULL; if (dc->debug.disable_fams) return NULL; if (!dc->caps.dmub_caps.mclk_sw) return NULL; if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down) return NULL; /* For FPO we can support up to 2 display configs if: * - first display uses FPO * - Second display switches in VACTIVE */ if (context->stream_count > 2) return NULL; else if (context->stream_count == 2) { DC_FP_START(); dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream); DC_FP_END(); DC_FP_START(); is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us); DC_FP_END(); if (!is_fpo_vactive || dc->debug.disable_fpo_vactive) return NULL; } else fpo_candidate_stream = context->streams[0]; if (!fpo_candidate_stream) return NULL; if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams) return NULL; refresh_rate = get_refresh_rate(fpo_candidate_stream); if (refresh_rate < minimum_refreshrate_supported) return NULL; fpo_vactive_margin_us = is_fpo_vactive ? dc->debug.fpo_vactive_margin_us : 0; // For now hardcode the FPO + Vactive stretch margin to be 2000us if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us)) return NULL; if (!fpo_candidate_stream->allow_freesync) return NULL; if (fpo_candidate_stream->vrr_active_variable && dc->debug.disable_fams_gaming) return NULL; return fpo_candidate_stream; } bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height) { bool is_native_scaling = false; if (pipe->stream->timing.h_addressable == width && pipe->stream->timing.v_addressable == height && pipe->plane_state->src_rect.width == width && pipe->plane_state->src_rect.height == height && pipe->plane_state->dst_rect.width == width && pipe->plane_state->dst_rect.height == height) is_native_scaling = true; return is_native_scaling; } /** * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible * * @dc: Current DC state * @context: New DC state to be programmed * * SubVP + DRR is admissible under the following conditions: * - Config must have 2 displays (i.e., 2 non-phantom master pipes) * - One display is SubVP * - Other display must have Freesync enabled * - The potential DRR display must not be PSR capable * * Return: True if admissible, false otherwise */ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) { bool result = false; uint32_t i; uint8_t subvp_count = 0; uint8_t non_subvp_pipes = 0; bool drr_pipe_found = false; bool drr_psr_capable = false; uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (resource_is_pipe_type(pipe, OPP_HEAD) && resource_is_pipe_type(pipe, DPP_PIPE)) { if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_count++; refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); if (pipe->stream->ignore_msa_timing_param && (pipe->stream->allow_freesync || pipe->stream->vrr_active_variable)) { drr_pipe_found = true; } } } } if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && ((uint32_t)refresh_rate < 120)) result = true; return result; } /** * dcn32_subvp_vblank_admissable() - Determine if SubVP + Vblank config is admissible * * @dc: Current DC state * @context: New DC state to be programmed * @vlevel: Voltage level calculated by DML * * SubVP + Vblank is admissible under the following conditions: * - Config must have 2 displays (i.e., 2 non-phantom master pipes) * - One display is SubVP * - Other display must not have Freesync capability * - DML must have output DRAM clock change support as SubVP + Vblank * - The potential vblank display must not be PSR capable * * Return: True if admissible, false otherwise */ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel) { bool result = false; uint32_t i; uint8_t subvp_count = 0; uint8_t non_subvp_pipes = 0; bool drr_pipe_found = false; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vblank_psr_capable = false; uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (resource_is_pipe_type(pipe, OPP_HEAD) && resource_is_pipe_type(pipe, DPP_PIPE)) { if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_count++; refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); if (pipe->stream->ignore_msa_timing_param && (pipe->stream->allow_freesync || pipe->stream->vrr_active_variable)) { drr_pipe_found = true; } } } } if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable && ((uint32_t)refresh_rate < 120) && vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) result = true; return result; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn30/dcn30_hubbub.h" #include "dcn32_hubbub.h" #include "dm_services.h" #include "reg_helper.h" #define CTX \ hubbub2->base.ctx #define DC_LOGGER \ hubbub2->base.ctx->logger #define REG(reg)\ hubbub2->regs->reg #undef FN #define FN(reg_name, field_name) \ hubbub2->shifts->field_name, hubbub2->masks->field_name /** * DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for * DCN32 */ #define DCN32_CRB_SEGMENT_SIZE_KB 64 static void dcn32_init_crb(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &hubbub2->det0_size); REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &hubbub2->det1_size); REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &hubbub2->det2_size); REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &hubbub2->det3_size); REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, &hubbub2->compbuf_size_segments); REG_SET_2(COMPBUF_RESERVED_SPACE, 0, COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F); } void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4; ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long ASSERT(request_limit > 0); //field is only 24 bits long if (request_limit > 0xFFF) request_limit = 0xFFF; if (request_limit > 0) REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit); } void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; switch (hubp_inst) { case 0: REG_UPDATE(DCHUBBUB_DET0_CTRL, DET0_SIZE, det_size_segments); hubbub2->det0_size = det_size_segments; break; case 1: REG_UPDATE(DCHUBBUB_DET1_CTRL, DET1_SIZE, det_size_segments); hubbub2->det1_size = det_size_segments; break; case 2: REG_UPDATE(DCHUBBUB_DET2_CTRL, DET2_SIZE, det_size_segments); hubbub2->det2_size = det_size_segments; break; case 3: REG_UPDATE(DCHUBBUB_DET3_CTRL, DET3_SIZE, det_size_segments); hubbub2->det3_size = det_size_segments; break; default: break; } if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) { /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */ DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n", hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size, hubbub2->compbuf_size_segments, hubbub2->crb_size_segs); } } static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { if (compbuf_size_segments > hubbub2->compbuf_size_segments) { REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); } /* Should never be hit, if it is we have an erroneous hw config*/ ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); hubbub2->compbuf_size_segments = compbuf_size_segments; ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); } } static uint32_t convert_and_clamp( uint32_t wm_ns, uint32_t refclk_mhz, uint32_t clamp_value) { uint32_t ret_val = 0; ret_val = wm_ns * refclk_mhz; ret_val /= 1000; if (ret_val > clamp_value) ret_val = clamp_value; return ret_val; } bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; bool wm_pending = false; /* Repeat for water mark set A, B, C and D. */ /* clock state A */ if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.urgent_ns, prog_wm_value); } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->a.frac_urg_bw_flip > hubbub2->watermarks.a.frac_urg_bw_flip) { hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); } else if (watermarks->a.frac_urg_bw_flip < hubbub2->watermarks.a.frac_urg_bw_flip) wm_pending = true; if (safe_to_lower || watermarks->a.frac_urg_bw_nom > hubbub2->watermarks.a.frac_urg_bw_nom) { hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); } else if (watermarks->a.frac_urg_bw_nom < hubbub2->watermarks.a.frac_urg_bw_nom) wm_pending = true; if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.urgent_ns, prog_wm_value); } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->b.frac_urg_bw_flip > hubbub2->watermarks.b.frac_urg_bw_flip) { hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); } else if (watermarks->b.frac_urg_bw_flip < hubbub2->watermarks.b.frac_urg_bw_flip) wm_pending = true; if (safe_to_lower || watermarks->b.frac_urg_bw_nom > hubbub2->watermarks.b.frac_urg_bw_nom) { hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); } else if (watermarks->b.frac_urg_bw_nom < hubbub2->watermarks.b.frac_urg_bw_nom) wm_pending = true; if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.urgent_ns, prog_wm_value); } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->c.frac_urg_bw_flip > hubbub2->watermarks.c.frac_urg_bw_flip) { hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); } else if (watermarks->c.frac_urg_bw_flip < hubbub2->watermarks.c.frac_urg_bw_flip) wm_pending = true; if (safe_to_lower || watermarks->c.frac_urg_bw_nom > hubbub2->watermarks.c.frac_urg_bw_nom) { hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); } else if (watermarks->c.frac_urg_bw_nom < hubbub2->watermarks.c.frac_urg_bw_nom) wm_pending = true; if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.urgent_ns, prog_wm_value); } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->d.frac_urg_bw_flip > hubbub2->watermarks.d.frac_urg_bw_flip) { hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); } else if (watermarks->d.frac_urg_bw_flip < hubbub2->watermarks.d.frac_urg_bw_flip) wm_pending = true; if (safe_to_lower || watermarks->d.frac_urg_bw_nom > hubbub2->watermarks.d.frac_urg_bw_nom) { hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); } else if (watermarks->d.frac_urg_bw_nom < hubbub2->watermarks.d.frac_urg_bw_nom) wm_pending = true; if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) wm_pending = true; return wm_pending; } bool hubbub32_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; bool wm_pending = false; /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = watermarks->a.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.cstate_exit_ns < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = watermarks->b.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.cstate_exit_ns < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = watermarks->c.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.cstate_exit_ns < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = watermarks->d.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.cstate_exit_ns < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) wm_pending = true; return wm_pending; } bool hubbub32_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; bool wm_pending = false; /* Section for UCLK_PSTATE_CHANGE_WATERMARKS */ /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = watermarks->a.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.pstate_change_ns < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = watermarks->b.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.pstate_change_ns < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = watermarks->c.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 0, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.pstate_change_ns < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = watermarks->d.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 0, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.pstate_change_ns < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) wm_pending = true; /* Section for FCLK_PSTATE_CHANGE_WATERMARKS */ /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.fclk_pstate_change_ns > hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) { hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns = watermarks->a.cstate_pstate.fclk_pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.fclk_pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->a.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.fclk_pstate_change_ns < hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.fclk_pstate_change_ns > hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) { hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns = watermarks->b.cstate_pstate.fclk_pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.fclk_pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->b.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.fclk_pstate_change_ns < hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.fclk_pstate_change_ns > hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) { hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns = watermarks->c.cstate_pstate.fclk_pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.fclk_pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 0, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->c.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.fclk_pstate_change_ns < hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.fclk_pstate_change_ns > hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) { hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns = watermarks->d.cstate_pstate.fclk_pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.fclk_pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 0, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->d.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.fclk_pstate_change_ns < hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) wm_pending = true; return wm_pending; } bool hubbub32_program_usr_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; bool wm_pending = false; /* clock state A */ if (safe_to_lower || watermarks->a.usr_retraining_ns > hubbub2->watermarks.a.usr_retraining_ns) { hubbub2->watermarks.a.usr_retraining_ns = watermarks->a.usr_retraining_ns; prog_wm_value = convert_and_clamp( watermarks->a.usr_retraining_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->a.usr_retraining_ns, prog_wm_value); } else if (watermarks->a.usr_retraining_ns < hubbub2->watermarks.a.usr_retraining_ns) wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.usr_retraining_ns > hubbub2->watermarks.b.usr_retraining_ns) { hubbub2->watermarks.b.usr_retraining_ns = watermarks->b.usr_retraining_ns; prog_wm_value = convert_and_clamp( watermarks->b.usr_retraining_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->b.usr_retraining_ns, prog_wm_value); } else if (watermarks->b.usr_retraining_ns < hubbub2->watermarks.b.usr_retraining_ns) wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.usr_retraining_ns > hubbub2->watermarks.c.usr_retraining_ns) { hubbub2->watermarks.c.usr_retraining_ns = watermarks->c.usr_retraining_ns; prog_wm_value = convert_and_clamp( watermarks->c.usr_retraining_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 0, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->c.usr_retraining_ns, prog_wm_value); } else if (watermarks->c.usr_retraining_ns < hubbub2->watermarks.c.usr_retraining_ns) wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.usr_retraining_ns > hubbub2->watermarks.d.usr_retraining_ns) { hubbub2->watermarks.d.usr_retraining_ns = watermarks->d.usr_retraining_ns; prog_wm_value = convert_and_clamp( watermarks->d.usr_retraining_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 0, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->d.usr_retraining_ns, prog_wm_value); } else if (watermarks->d.usr_retraining_ns < hubbub2->watermarks.d.usr_retraining_ns) wm_pending = true; return wm_pending; } void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); /* * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE = 1 means enabling forcing value * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE = 1 or 0, means value to be forced when force enable */ REG_UPDATE_2(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, allow, DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, allow); } static bool hubbub32_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { bool wm_pending = false; if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; /* * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. * If the memory controller is fully utilized and the DCHub requestors are * well ahead of their amortized schedule, then it is safe to prevent the next winner * from being committed and sent to the fabric. * The utilization of the memory controller is approximated by ensuring that * the number of outstanding requests is greater than a threshold specified * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. * * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) * to turn off it for now. */ /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow); return wm_pending; } /* Copy values from WM set A to all other sets */ static void hubbub32_init_watermarks(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t reg; reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg); reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg); reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg); } static void hubbub32_wm_read_state(struct hubbub *hubbub, struct dcn_hubbub_wm *wm) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); struct dcn_hubbub_wm_set *s; memset(wm, 0, sizeof(struct dcn_hubbub_wm)); s = &wm->sets[0]; s->wm_set = 0; REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change); REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change); s = &wm->sets[1]; s->wm_set = 1; REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change); REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change); s = &wm->sets[2]; s->wm_set = 2; REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change); REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change); s = &wm->sets[3]; s->wm_set = 3; REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change); REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change); } void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; uint32_t prog_wm_value = convert_and_clamp(hubbub2->watermarks.a.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); } void hubbub32_init(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); /* Enable clock gate*/ if (hubbub->ctx->dc->debug.disable_clock_gate) { /*done in hwseq*/ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, 1, DCFCLK_R_DCHUBBUB_GATE_DIS, 1); } /* ignore the "df_pre_cstate_req" from the SDP port control. only the DCN will determine when to connect the SDP port */ REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1); /*Set SDP's max outstanding request to 512 must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ REG_UPDATE(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, 1); /*must set the registers back to 256 in zero frame buffer mode*/ REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512); } static const struct hubbub_funcs hubbub32_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub3_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, .wm_read_state = hubbub32_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub32_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub32_init_watermarks, .program_det_size = dcn32_program_det_size, .program_compbuf_size = dcn32_program_compbuf_size, .init_crb = dcn32_init_crb, .hubbub_read_state = hubbub2_read_state, .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .set_request_limit = hubbub32_set_request_limit }; void hubbub32_construct(struct dcn20_hubbub *hubbub2, struct dc_context *ctx, const struct dcn_hubbub_registers *hubbub_regs, const struct dcn_hubbub_shift *hubbub_shift, const struct dcn_hubbub_mask *hubbub_mask, int det_size_kb, int pixel_chunk_size_kb, int config_return_buffer_size_kb) { hubbub2->base.ctx = ctx; hubbub2->base.funcs = &hubbub32_funcs; hubbub2->regs = hubbub_regs; hubbub2->shifts = hubbub_shift; hubbub2->masks = hubbub_mask; hubbub2->debug_test_index_pstate = 0xB; hubbub2->detile_buf_size = det_size_kb * 1024; hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024; hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN32_CRB_SEGMENT_SIZE_KB; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "dm_helpers.h" #include "include/hdcp_msg_types.h" #include "include/signal_types.h" #include "core_types.h" #include "link.h" #include "link_hwss.h" #include "link/protocols/link_dpcd.h" #define DC_LOGGER \ link->ctx->logger #define HDCP14_KSV_SIZE 5 #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = { [HDCP_MESSAGE_ID_READ_BKSV] = true, [HDCP_MESSAGE_ID_READ_RI_R0] = true, [HDCP_MESSAGE_ID_READ_PJ] = true, [HDCP_MESSAGE_ID_WRITE_AKSV] = false, [HDCP_MESSAGE_ID_WRITE_AINFO] = false, [HDCP_MESSAGE_ID_WRITE_AN] = false, [HDCP_MESSAGE_ID_READ_VH_X] = true, [HDCP_MESSAGE_ID_READ_VH_0] = true, [HDCP_MESSAGE_ID_READ_VH_1] = true, [HDCP_MESSAGE_ID_READ_VH_2] = true, [HDCP_MESSAGE_ID_READ_VH_3] = true, [HDCP_MESSAGE_ID_READ_VH_4] = true, [HDCP_MESSAGE_ID_READ_BCAPS] = true, [HDCP_MESSAGE_ID_READ_BSTATUS] = true, [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true, [HDCP_MESSAGE_ID_READ_BINFO] = true, [HDCP_MESSAGE_ID_HDCP2VERSION] = true, [HDCP_MESSAGE_ID_RX_CAPS] = true, [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false, [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true, [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false, [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false, [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true, [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true, [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false, [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true, [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false, [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true, [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false, [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false, [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true, [HDCP_MESSAGE_ID_READ_RXSTATUS] = true, [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false }; static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = { [HDCP_MESSAGE_ID_READ_BKSV] = 0x0, [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, [HDCP_MESSAGE_ID_READ_PJ] = 0xA, [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, [HDCP_MESSAGE_ID_WRITE_AN] = 0x18, [HDCP_MESSAGE_ID_READ_VH_X] = 0x20, [HDCP_MESSAGE_ID_READ_VH_0] = 0x20, [HDCP_MESSAGE_ID_READ_VH_1] = 0x24, [HDCP_MESSAGE_ID_READ_VH_2] = 0x28, [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, [HDCP_MESSAGE_ID_READ_VH_4] = 0x30, [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50, [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60, [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80, [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60, [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60, [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80, [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80, [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60, [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70, [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0, }; struct protection_properties { bool supported; bool (*process_transaction)( struct dc_link *link, struct hdcp_protection_message *message_info); }; static const struct protection_properties non_supported_protection = { .supported = false }; static bool hdmi_14_process_transaction( struct dc_link *link, struct hdcp_protection_message *message_info) { uint8_t *buff = NULL; bool result; const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/ const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/ struct i2c_command i2c_command; uint8_t offset = hdcp_i2c_offsets[message_info->msg_id]; struct i2c_payload i2c_payloads[] = { { true, 0, 1, &offset }, /* actual hdcp payload, will be filled later, zeroed for now*/ { 0 } }; switch (message_info->link) { case HDCP_LINK_SECONDARY: i2c_payloads[0].address = hdcp_i2c_addr_link_secondary; i2c_payloads[1].address = hdcp_i2c_addr_link_secondary; break; case HDCP_LINK_PRIMARY: default: i2c_payloads[0].address = hdcp_i2c_addr_link_primary; i2c_payloads[1].address = hdcp_i2c_addr_link_primary; break; } if (hdcp_cmd_is_read[message_info->msg_id]) { i2c_payloads[1].write = false; i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads); i2c_payloads[1].length = message_info->length; i2c_payloads[1].data = message_info->data; } else { i2c_command.number_of_payloads = 1; buff = kzalloc(message_info->length + 1, GFP_KERNEL); if (!buff) return false; buff[0] = offset; memmove(&buff[1], message_info->data, message_info->length); i2c_payloads[0].length = message_info->length + 1; i2c_payloads[0].data = buff; } i2c_command.payloads = i2c_payloads; i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz; result = dm_helpers_submit_i2c( link->ctx, link, &i2c_command); kfree(buff); return result; } static const struct protection_properties hdmi_14_protection = { .supported = true, .process_transaction = hdmi_14_process_transaction }; static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = { [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF, [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d, [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000, [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b, [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220, [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0, [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0, [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0, [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0, [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493, [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494 }; static bool dpcd_access_helper( struct dc_link *link, uint32_t length, uint8_t *data, uint32_t dpcd_addr, bool is_read) { enum dc_status status; uint32_t cur_length = 0; uint32_t offset = 0; uint32_t ksv_read_size = 0x6803b - 0x6802c; /* Read KSV, need repeatedly handle */ if (dpcd_addr == 0x6802c) { if (length % HDCP14_KSV_SIZE) { DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n", __func__, length, HDCP14_KSV_SIZE); } if (length > HDCP14_MAX_KSV_FIFO_SIZE) { DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n", __func__, length, HDCP14_MAX_KSV_FIFO_SIZE); } DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n", __func__, length / HDCP14_KSV_SIZE); while (length > 0) { if (length > ksv_read_size) { status = core_link_read_dpcd( link, dpcd_addr + offset, data + offset, ksv_read_size); data += ksv_read_size; length -= ksv_read_size; } else { status = core_link_read_dpcd( link, dpcd_addr + offset, data + offset, length); data += length; length = 0; } if (status != DC_OK) return false; } } else { while (length > 0) { if (length > DEFAULT_AUX_MAX_DATA_SIZE) cur_length = DEFAULT_AUX_MAX_DATA_SIZE; else cur_length = length; if (is_read) { status = core_link_read_dpcd( link, dpcd_addr + offset, data + offset, cur_length); } else { status = core_link_write_dpcd( link, dpcd_addr + offset, data + offset, cur_length); } if (status != DC_OK) return false; length -= cur_length; offset += cur_length; } } return true; } static bool dp_11_process_transaction( struct dc_link *link, struct hdcp_protection_message *message_info) { return dpcd_access_helper( link, message_info->length, message_info->data, hdcp_dpcd_addrs[message_info->msg_id], hdcp_cmd_is_read[message_info->msg_id]); } static const struct protection_properties dp_11_protection = { .supported = true, .process_transaction = dp_11_process_transaction }; static const struct protection_properties *get_protection_properties_by_signal( struct dc_link *link, enum signal_type st, enum hdcp_version version) { switch (version) { case HDCP_VERSION_14: switch (st) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_HDMI_TYPE_A: return &hdmi_14_protection; case SIGNAL_TYPE_DISPLAY_PORT: if (link && (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER || link->dpcd_caps.dongle_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER)) { return &non_supported_protection; } return &dp_11_protection; case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_EDP: return &dp_11_protection; default: return &non_supported_protection; } break; case HDCP_VERSION_22: switch (st) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_HDMI_TYPE_A: return &hdmi_14_protection; //todo version2.2 case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_EDP: return &dp_11_protection; //todo version2.2 default: return &non_supported_protection; } break; default: return &non_supported_protection; } } enum hdcp_message_status dc_process_hdcp_msg( enum signal_type signal, struct dc_link *link, struct hdcp_protection_message *message_info) { enum hdcp_message_status status = HDCP_MESSAGE_FAILURE; uint32_t i = 0; const struct protection_properties *protection_props; if (!message_info) return HDCP_MESSAGE_UNSUPPORTED; if (message_info->msg_id < HDCP_MESSAGE_ID_READ_BKSV || message_info->msg_id >= HDCP_MESSAGE_ID_MAX) return HDCP_MESSAGE_UNSUPPORTED; protection_props = get_protection_properties_by_signal( link, signal, message_info->version); if (!protection_props->supported) return HDCP_MESSAGE_UNSUPPORTED; if (protection_props->process_transaction( link, message_info)) { status = HDCP_MESSAGE_SUCCESS; } else { for (i = 0; i < message_info->max_retries; i++) { if (protection_props->process_transaction( link, message_info)) { status = HDCP_MESSAGE_SUCCESS; break; } } } return status; }
linux-master
drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "include/logger_interface.h" #include "bios_parser_interface.h" #include "bios_parser.h" #include "bios_parser2.h" struct dc_bios *dal_bios_parser_create( struct bp_init_data *init, enum dce_version dce_version) { struct dc_bios *bios = NULL; bios = firmware_parser_create(init, dce_version); /* Fall back to old bios parser for older asics */ if (bios == NULL) bios = bios_parser_create(init, dce_version); return bios; } void dal_bios_parser_destroy(struct dc_bios **dcb) { struct dc_bios *bios = *dcb; bios->funcs->bios_parser_destroy(dcb); }
linux-master
drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "core_types.h" #include "ObjectID.h" #include "atomfirmware.h" #include "dc_bios_types.h" #include "include/grph_object_ctrl_defs.h" #include "include/bios_parser_interface.h" #include "include/logger_interface.h" #include "command_table2.h" #include "bios_parser_helper.h" #include "command_table_helper2.h" #include "bios_parser2.h" #include "bios_parser_types_internal2.h" #include "bios_parser_interface.h" #include "bios_parser_common.h" #define DC_LOGGER \ bp->base.ctx->logger #define LAST_RECORD_TYPE 0xff #define SMU9_SYSPLL0_ID 0 static enum bp_result get_gpio_i2c_info(struct bios_parser *bp, struct atom_i2c_record *record, struct graphics_object_i2c_info *info); static enum bp_result bios_parser_get_firmware_info( struct dc_bios *dcb, struct dc_firmware_info *info); static enum bp_result bios_parser_get_encoder_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, struct bp_encoder_cap_info *info); static enum bp_result get_firmware_info_v3_1( struct bios_parser *bp, struct dc_firmware_info *info); static enum bp_result get_firmware_info_v3_2( struct bios_parser *bp, struct dc_firmware_info *info); static enum bp_result get_firmware_info_v3_4( struct bios_parser *bp, struct dc_firmware_info *info); static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, struct atom_display_object_path_v2 *object); static struct atom_encoder_caps_record *get_encoder_cap_record( struct bios_parser *bp, struct atom_display_object_path_v2 *object); #define BIOS_IMAGE_SIZE_OFFSET 2 #define BIOS_IMAGE_SIZE_UNIT 512 #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table) static void bios_parser2_destruct(struct bios_parser *bp) { kfree(bp->base.bios_local_image); kfree(bp->base.integrated_info); } static void firmware_parser_destroy(struct dc_bios **dcb) { struct bios_parser *bp = BP_FROM_DCB(*dcb); if (!bp) { BREAK_TO_DEBUGGER(); return; } bios_parser2_destruct(bp); kfree(bp); *dcb = NULL; } static void get_atom_data_table_revision( struct atom_common_table_header *atom_data_tbl, struct atom_data_revision *tbl_revision) { if (!tbl_revision) return; /* initialize the revision to 0 which is invalid revision */ tbl_revision->major = 0; tbl_revision->minor = 0; if (!atom_data_tbl) return; tbl_revision->major = (uint32_t) atom_data_tbl->format_revision & 0x3f; tbl_revision->minor = (uint32_t) atom_data_tbl->content_revision & 0x3f; } /* BIOS oject table displaypath is per connector. * There is extra path not for connector. BIOS fill its encoderid as 0 */ static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); unsigned int count = 0; unsigned int i; switch (bp->object_info_tbl.revision.minor) { default: case 4: for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) if (bp->object_info_tbl.v1_4->display_path[i].encoderobjid != 0) count++; break; case 5: for (i = 0; i < bp->object_info_tbl.v1_5->number_of_path; i++) if (bp->object_info_tbl.v1_5->display_path[i].encoderobjid != 0) count++; break; } return count; } static struct graphics_object_id bios_parser_get_connector_id( struct dc_bios *dcb, uint8_t i) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct graphics_object_id object_id = dal_graphics_object_id_init( 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN); struct object_info_table *tbl = &bp->object_info_tbl; struct display_object_info_table_v1_4 *v1_4 = tbl->v1_4; struct display_object_info_table_v1_5 *v1_5 = tbl->v1_5; switch (bp->object_info_tbl.revision.minor) { default: case 4: if (v1_4->number_of_path > i) { /* If display_objid is generic object id, the encoderObj * /extencoderobjId should be 0 */ if (v1_4->display_path[i].encoderobjid != 0 && v1_4->display_path[i].display_objid != 0) object_id = object_id_from_bios_object_id( v1_4->display_path[i].display_objid); } break; case 5: if (v1_5->number_of_path > i) { /* If display_objid is generic object id, the encoderObjId * should be 0 */ if (v1_5->display_path[i].encoderobjid != 0 && v1_5->display_path[i].display_objid != 0) object_id = object_id_from_bios_object_id( v1_5->display_path[i].display_objid); } break; } return object_id; } static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb, struct graphics_object_id object_id, uint32_t index, struct graphics_object_id *src_object_id) { struct bios_parser *bp = BP_FROM_DCB(dcb); unsigned int i; enum bp_result bp_result = BP_RESULT_BADINPUT; struct graphics_object_id obj_id = { 0 }; struct object_info_table *tbl = &bp->object_info_tbl; if (!src_object_id) return bp_result; switch (object_id.type) { /* Encoder's Source is GPU. BIOS does not provide GPU, since all * displaypaths point to same GPU (0x1100). Hardcode GPU object type */ case OBJECT_TYPE_ENCODER: /* TODO: since num of src must be less than 2. * If found in for loop, should break. * DAL2 implementation may be changed too */ switch (bp->object_info_tbl.revision.minor) { default: case 4: for (i = 0; i < tbl->v1_4->number_of_path; i++) { obj_id = object_id_from_bios_object_id( tbl->v1_4->display_path[i].encoderobjid); if (object_id.type == obj_id.type && object_id.id == obj_id.id && object_id.enum_id == obj_id.enum_id) { *src_object_id = object_id_from_bios_object_id( 0x1100); /* break; */ } } bp_result = BP_RESULT_OK; break; case 5: for (i = 0; i < tbl->v1_5->number_of_path; i++) { obj_id = object_id_from_bios_object_id( tbl->v1_5->display_path[i].encoderobjid); if (object_id.type == obj_id.type && object_id.id == obj_id.id && object_id.enum_id == obj_id.enum_id) { *src_object_id = object_id_from_bios_object_id( 0x1100); /* break; */ } } bp_result = BP_RESULT_OK; break; } break; case OBJECT_TYPE_CONNECTOR: switch (bp->object_info_tbl.revision.minor) { default: case 4: for (i = 0; i < tbl->v1_4->number_of_path; i++) { obj_id = object_id_from_bios_object_id( tbl->v1_4->display_path[i] .display_objid); if (object_id.type == obj_id.type && object_id.id == obj_id.id && object_id.enum_id == obj_id.enum_id) { *src_object_id = object_id_from_bios_object_id( tbl->v1_4 ->display_path[i] .encoderobjid); /* break; */ } } bp_result = BP_RESULT_OK; break; } bp_result = BP_RESULT_OK; break; case 5: for (i = 0; i < tbl->v1_5->number_of_path; i++) { obj_id = object_id_from_bios_object_id( tbl->v1_5->display_path[i].display_objid); if (object_id.type == obj_id.type && object_id.id == obj_id.id && object_id.enum_id == obj_id.enum_id) { *src_object_id = object_id_from_bios_object_id( tbl->v1_5->display_path[i].encoderobjid); /* break; */ } } bp_result = BP_RESULT_OK; break; default: bp_result = BP_RESULT_OK; break; } return bp_result; } /* from graphics_object_id, find display path which includes the object_id */ static struct atom_display_object_path_v2 *get_bios_object( struct bios_parser *bp, struct graphics_object_id id) { unsigned int i; struct graphics_object_id obj_id = {0}; switch (id.type) { case OBJECT_TYPE_ENCODER: for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) { obj_id = object_id_from_bios_object_id( bp->object_info_tbl.v1_4->display_path[i].encoderobjid); if (id.type == obj_id.type && id.id == obj_id.id && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_4->display_path[i]; } fallthrough; case OBJECT_TYPE_CONNECTOR: case OBJECT_TYPE_GENERIC: /* Both Generic and Connector Object ID * will be stored on display_objid */ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) { obj_id = object_id_from_bios_object_id( bp->object_info_tbl.v1_4->display_path[i].display_objid); if (id.type == obj_id.type && id.id == obj_id.id && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_4->display_path[i]; } fallthrough; default: return NULL; } } /* from graphics_object_id, find display path which includes the object_id */ static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp, struct graphics_object_id id) { unsigned int i; struct graphics_object_id obj_id = {0}; switch (id.type) { case OBJECT_TYPE_ENCODER: for (i = 0; i < bp->object_info_tbl.v1_5->number_of_path; i++) { obj_id = object_id_from_bios_object_id( bp->object_info_tbl.v1_5->display_path[i].encoderobjid); if (id.type == obj_id.type && id.id == obj_id.id && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_5->display_path[i]; } break; case OBJECT_TYPE_CONNECTOR: case OBJECT_TYPE_GENERIC: /* Both Generic and Connector Object ID * will be stored on display_objid */ for (i = 0; i < bp->object_info_tbl.v1_5->number_of_path; i++) { obj_id = object_id_from_bios_object_id( bp->object_info_tbl.v1_5->display_path[i].display_objid); if (id.type == obj_id.type && id.id == obj_id.id && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_5->display_path[i]; } break; default: return NULL; } return NULL; } static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb, struct graphics_object_id id, struct graphics_object_i2c_info *info) { uint32_t offset; struct atom_display_object_path_v2 *object; struct atom_display_object_path_v3 *object_path_v3; struct atom_common_record_header *header; struct atom_i2c_record *record; struct atom_i2c_record dummy_record = {0}; struct bios_parser *bp = BP_FROM_DCB(dcb); if (!info) return BP_RESULT_BADINPUT; if (id.type == OBJECT_TYPE_GENERIC) { dummy_record.i2c_id = id.id; if (get_gpio_i2c_info(bp, &dummy_record, info) == BP_RESULT_OK) return BP_RESULT_OK; else return BP_RESULT_NORECORD; } switch (bp->object_info_tbl.revision.minor) { case 4: default: object = get_bios_object(bp, id); if (!object) return BP_RESULT_BADINPUT; offset = object->disp_recordoffset + bp->object_info_tbl_offset; break; case 5: object_path_v3 = get_bios_object_from_path_v3(bp, id); if (!object_path_v3) return BP_RESULT_BADINPUT; offset = object_path_v3->disp_recordoffset + bp->object_info_tbl_offset; break; } for (;;) { header = GET_IMAGE(struct atom_common_record_header, offset); if (!header) return BP_RESULT_BADBIOSTABLE; if (header->record_type == LAST_RECORD_TYPE || !header->record_size) break; if (header->record_type == ATOM_I2C_RECORD_TYPE && sizeof(struct atom_i2c_record) <= header->record_size) { /* get the I2C info */ record = (struct atom_i2c_record *) header; if (get_gpio_i2c_info(bp, record, info) == BP_RESULT_OK) return BP_RESULT_OK; } offset += header->record_size; } return BP_RESULT_NORECORD; } static enum bp_result get_gpio_i2c_info( struct bios_parser *bp, struct atom_i2c_record *record, struct graphics_object_i2c_info *info) { struct atom_gpio_pin_lut_v2_1 *header; uint32_t count = 0; unsigned int table_index = 0; bool find_valid = false; struct atom_gpio_pin_assignment *pin; if (!info) return BP_RESULT_BADINPUT; /* get the GPIO_I2C info */ if (!DATA_TABLES(gpio_pin_lut)) return BP_RESULT_BADBIOSTABLE; header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1, DATA_TABLES(gpio_pin_lut)); if (!header) return BP_RESULT_BADBIOSTABLE; if (sizeof(struct atom_common_table_header) + sizeof(struct atom_gpio_pin_assignment) > le16_to_cpu(header->table_header.structuresize)) return BP_RESULT_BADBIOSTABLE; /* TODO: is version change? */ if (header->table_header.content_revision != 1) return BP_RESULT_UNSUPPORTED; /* get data count */ count = (le16_to_cpu(header->table_header.structuresize) - sizeof(struct atom_common_table_header)) / sizeof(struct atom_gpio_pin_assignment); pin = (struct atom_gpio_pin_assignment *) header->gpio_pin; for (table_index = 0; table_index < count; table_index++) { if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) && ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) && ((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) { /* still valid */ find_valid = true; break; } pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment)); } /* If we don't find the entry that we are looking for then * we will return BP_Result_BadBiosTable. */ if (find_valid == false) return BP_RESULT_BADBIOSTABLE; /* get the GPIO_I2C_INFO */ info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false; info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX; info->i2c_engine_id = (record->i2c_id & I2C_HW_ENGINE_ID_MASK) >> 4; info->i2c_slave_address = record->i2c_slave_addr; /* TODO: check how to get register offset for en, Y, etc. */ info->gpio_info.clk_a_register_index = le16_to_cpu(pin->data_a_reg_index); info->gpio_info.clk_a_shift = pin->gpio_bitshift; return BP_RESULT_OK; } static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp, struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object */ return NULL; } offset = object->disp_recordoffset + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(struct atom_common_record_header, offset); if (!header) return NULL; if (header->record_type == ATOM_RECORD_END_TYPE || !header->record_size) break; if (header->record_type == ATOM_HPD_INT_RECORD_TYPE && sizeof(struct atom_hpd_int_record) <= header->record_size) return (struct atom_hpd_int_record *) header; offset += header->record_size; } return NULL; } static enum bp_result bios_parser_get_hpd_info( struct dc_bios *dcb, struct graphics_object_id id, struct graphics_object_hpd_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct atom_display_object_path_v2 *object; struct atom_display_object_path_v3 *object_path_v3; struct atom_hpd_int_record *record = NULL; if (!info) return BP_RESULT_BADINPUT; switch (bp->object_info_tbl.revision.minor) { case 4: default: object = get_bios_object(bp, id); if (!object) return BP_RESULT_BADINPUT; record = get_hpd_record(bp, object); break; case 5: object_path_v3 = get_bios_object_from_path_v3(bp, id); if (!object_path_v3) return BP_RESULT_BADINPUT; record = get_hpd_record_for_path_v3(bp, object_path_v3); break; } if (record != NULL) { info->hpd_int_gpio_uid = record->pin_id; info->hpd_active = record->plugin_pin_state; return BP_RESULT_OK; } return BP_RESULT_NORECORD; } static struct atom_hpd_int_record *get_hpd_record( struct bios_parser *bp, struct atom_display_object_path_v2 *object) { struct atom_common_record_header *header; uint32_t offset; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object */ return NULL; } offset = le16_to_cpu(object->disp_recordoffset) + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(struct atom_common_record_header, offset); if (!header) return NULL; if (header->record_type == LAST_RECORD_TYPE || !header->record_size) break; if (header->record_type == ATOM_HPD_INT_RECORD_TYPE && sizeof(struct atom_hpd_int_record) <= header->record_size) return (struct atom_hpd_int_record *) header; offset += header->record_size; } return NULL; } /** * bios_parser_get_gpio_pin_info * Get GpioPin information of input gpio id * * @dcb: pointer to the DC BIOS * @gpio_id: GPIO ID * @info: GpioPin information structure * return: Bios parser result code * note: * to get the GPIO PIN INFO, we need: * 1. get the GPIO_ID from other object table, see GetHPDInfo() * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, * to get the registerA offset/mask */ static enum bp_result bios_parser_get_gpio_pin_info( struct dc_bios *dcb, uint32_t gpio_id, struct gpio_pin_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct atom_gpio_pin_lut_v2_1 *header; uint32_t count = 0; uint32_t i = 0; if (!DATA_TABLES(gpio_pin_lut)) return BP_RESULT_BADBIOSTABLE; header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1, DATA_TABLES(gpio_pin_lut)); if (!header) return BP_RESULT_BADBIOSTABLE; if (sizeof(struct atom_common_table_header) + sizeof(struct atom_gpio_pin_assignment) > le16_to_cpu(header->table_header.structuresize)) return BP_RESULT_BADBIOSTABLE; if (header->table_header.content_revision != 1) return BP_RESULT_UNSUPPORTED; /* Temporary hard code gpio pin info */ count = (le16_to_cpu(header->table_header.structuresize) - sizeof(struct atom_common_table_header)) / sizeof(struct atom_gpio_pin_assignment); for (i = 0; i < count; ++i) { if (header->gpio_pin[i].gpio_id != gpio_id) continue; info->offset = (uint32_t) le16_to_cpu( header->gpio_pin[i].data_a_reg_index); info->offset_y = info->offset + 2; info->offset_en = info->offset + 1; info->offset_mask = info->offset - 1; info->mask = (uint32_t) (1 << header->gpio_pin[i].gpio_bitshift); info->mask_y = info->mask + 2; info->mask_en = info->mask + 1; info->mask_mask = info->mask - 1; return BP_RESULT_OK; } return BP_RESULT_NORECORD; } static struct device_id device_type_from_device_id(uint16_t device_id) { struct device_id result_device_id; result_device_id.raw_device_tag = device_id; switch (device_id) { case ATOM_DISPLAY_LCD1_SUPPORT: result_device_id.device_type = DEVICE_TYPE_LCD; result_device_id.enum_id = 1; break; case ATOM_DISPLAY_LCD2_SUPPORT: result_device_id.device_type = DEVICE_TYPE_LCD; result_device_id.enum_id = 2; break; case ATOM_DISPLAY_DFP1_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 1; break; case ATOM_DISPLAY_DFP2_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 2; break; case ATOM_DISPLAY_DFP3_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 3; break; case ATOM_DISPLAY_DFP4_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 4; break; case ATOM_DISPLAY_DFP5_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 5; break; case ATOM_DISPLAY_DFP6_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 6; break; default: BREAK_TO_DEBUGGER(); /* Invalid device Id */ result_device_id.device_type = DEVICE_TYPE_UNKNOWN; result_device_id.enum_id = 0; } return result_device_id; } static enum bp_result bios_parser_get_device_tag( struct dc_bios *dcb, struct graphics_object_id connector_object_id, uint32_t device_tag_index, struct connector_device_tag_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct atom_display_object_path_v2 *object; struct atom_display_object_path_v3 *object_path_v3; if (!info) return BP_RESULT_BADINPUT; switch (bp->object_info_tbl.revision.minor) { case 4: default: /* getBiosObject will return MXM object */ object = get_bios_object(bp, connector_object_id); if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object id */ return BP_RESULT_BADINPUT; } info->acpi_device = 0; /* BIOS no longer provides this */ info->dev_id = device_type_from_device_id(object->device_tag); break; case 5: object_path_v3 = get_bios_object_from_path_v3(bp, connector_object_id); if (!object_path_v3) { BREAK_TO_DEBUGGER(); /* Invalid object id */ return BP_RESULT_BADINPUT; } info->acpi_device = 0; /* BIOS no longer provides this */ info->dev_id = device_type_from_device_id(object_path_v3->device_tag); break; } return BP_RESULT_OK; } static enum bp_result get_ss_info_v4_1( struct bios_parser *bp, uint32_t id, uint32_t index, struct spread_spectrum_info *ss_info) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL; struct atom_smu_info_v3_3 *smu_info = NULL; if (!ss_info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_1, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; ss_info->type.STEP_AND_DELAY_INFO = false; ss_info->spread_percentage_divider = 1000; /* BIOS no longer uses target clock. Always enable for now */ ss_info->target_clock_range = 0xffffffff; switch (id) { case AS_SIGNAL_TYPE_DVI: ss_info->spread_spectrum_percentage = disp_cntl_tbl->dvi_ss_percentage; ss_info->spread_spectrum_range = disp_cntl_tbl->dvi_ss_rate_10hz * 10; if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_HDMI: ss_info->spread_spectrum_percentage = disp_cntl_tbl->hdmi_ss_percentage; ss_info->spread_spectrum_range = disp_cntl_tbl->hdmi_ss_rate_10hz * 10; if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; /* TODO LVDS not support anymore? */ case AS_SIGNAL_TYPE_DISPLAY_PORT: ss_info->spread_spectrum_percentage = disp_cntl_tbl->dp_ss_percentage; ss_info->spread_spectrum_range = disp_cntl_tbl->dp_ss_rate_10hz * 10; if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_GPU_PLL: /* atom_firmware: DAL only get data from dce_info table. * if data within smu_info is needed for DAL, VBIOS should * copy it into dce_info */ result = BP_RESULT_UNSUPPORTED; break; case AS_SIGNAL_TYPE_XGMI: smu_info = GET_IMAGE(struct atom_smu_info_v3_3, DATA_TABLES(smu_info)); if (!smu_info) return BP_RESULT_BADBIOSTABLE; DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage); ss_info->spread_spectrum_percentage = smu_info->waflclk_ss_percentage; ss_info->spread_spectrum_range = smu_info->gpuclk_ss_rate_10hz * 10; if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_XGMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; default: result = BP_RESULT_UNSUPPORTED; } return result; } static enum bp_result get_ss_info_v4_2( struct bios_parser *bp, uint32_t id, uint32_t index, struct spread_spectrum_info *ss_info) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL; struct atom_smu_info_v3_1 *smu_info = NULL; if (!ss_info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; if (!DATA_TABLES(smu_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_2, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; smu_info = GET_IMAGE(struct atom_smu_info_v3_1, DATA_TABLES(smu_info)); if (!smu_info) return BP_RESULT_BADBIOSTABLE; DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage); ss_info->type.STEP_AND_DELAY_INFO = false; ss_info->spread_percentage_divider = 1000; /* BIOS no longer uses target clock. Always enable for now */ ss_info->target_clock_range = 0xffffffff; switch (id) { case AS_SIGNAL_TYPE_DVI: ss_info->spread_spectrum_percentage = disp_cntl_tbl->dvi_ss_percentage; ss_info->spread_spectrum_range = disp_cntl_tbl->dvi_ss_rate_10hz * 10; if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_HDMI: ss_info->spread_spectrum_percentage = disp_cntl_tbl->hdmi_ss_percentage; ss_info->spread_spectrum_range = disp_cntl_tbl->hdmi_ss_rate_10hz * 10; if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; /* TODO LVDS not support anymore? */ case AS_SIGNAL_TYPE_DISPLAY_PORT: ss_info->spread_spectrum_percentage = smu_info->gpuclk_ss_percentage; ss_info->spread_spectrum_range = smu_info->gpuclk_ss_rate_10hz * 10; if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_GPU_PLL: /* atom_firmware: DAL only get data from dce_info table. * if data within smu_info is needed for DAL, VBIOS should * copy it into dce_info */ result = BP_RESULT_UNSUPPORTED; break; default: result = BP_RESULT_UNSUPPORTED; } return result; } static enum bp_result get_ss_info_v4_5( struct bios_parser *bp, uint32_t id, uint32_t index, struct spread_spectrum_info *ss_info) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_5 *disp_cntl_tbl = NULL; if (!ss_info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_5, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; ss_info->type.STEP_AND_DELAY_INFO = false; ss_info->spread_percentage_divider = 1000; /* BIOS no longer uses target clock. Always enable for now */ ss_info->target_clock_range = 0xffffffff; switch (id) { case AS_SIGNAL_TYPE_DVI: ss_info->spread_spectrum_percentage = disp_cntl_tbl->dvi_ss_percentage; ss_info->spread_spectrum_range = disp_cntl_tbl->dvi_ss_rate_10hz * 10; if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_HDMI: ss_info->spread_spectrum_percentage = disp_cntl_tbl->hdmi_ss_percentage; ss_info->spread_spectrum_range = disp_cntl_tbl->hdmi_ss_rate_10hz * 10; if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_DISPLAY_PORT: ss_info->spread_spectrum_percentage = disp_cntl_tbl->dp_ss_percentage; ss_info->spread_spectrum_range = disp_cntl_tbl->dp_ss_rate_10hz * 10; if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) ss_info->type.CENTER_MODE = true; DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_GPU_PLL: /* atom_smu_info_v4_0 does not have fields for SS for SMU Display PLL anymore. * SMU Display PLL supposed to be without spread. * Better place for it would be in atom_display_controller_info_v4_5 table. */ result = BP_RESULT_UNSUPPORTED; break; default: result = BP_RESULT_UNSUPPORTED; break; } return result; } /** * bios_parser_get_spread_spectrum_info * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info * ver 3.1, * there is only one entry for each signal /ss id. However, there is * no planning of supporting multiple spread Sprectum entry for EverGreen * @dcb: pointer to the DC BIOS * @signal: ASSignalType to be converted to info index * @index: number of entries that match the converted info index * @ss_info: sprectrum information structure, * return: Bios parser result code */ static enum bp_result bios_parser_get_spread_spectrum_info( struct dc_bios *dcb, enum as_signal_type signal, uint32_t index, struct spread_spectrum_info *ss_info) { struct bios_parser *bp = BP_FROM_DCB(dcb); enum bp_result result = BP_RESULT_UNSUPPORTED; struct atom_common_table_header *header; struct atom_data_revision tbl_revision; if (!ss_info) /* check for bad input */ return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_UNSUPPORTED; header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(dce_info)); get_atom_data_table_revision(header, &tbl_revision); switch (tbl_revision.major) { case 4: switch (tbl_revision.minor) { case 1: return get_ss_info_v4_1(bp, signal, index, ss_info); case 2: case 3: case 4: return get_ss_info_v4_2(bp, signal, index, ss_info); case 5: return get_ss_info_v4_5(bp, signal, index, ss_info); default: ASSERT(0); break; } break; default: break; } /* there can not be more then one entry for SS Info table */ return result; } static enum bp_result get_soc_bb_info_v4_4( struct bios_parser *bp, struct bp_soc_bb_info *soc_bb_info) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL; if (!soc_bb_info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; if (!DATA_TABLES(smu_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_4, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; soc_bb_info->dram_clock_change_latency_100ns = disp_cntl_tbl->max_mclk_chg_lat; soc_bb_info->dram_sr_enter_exit_latency_100ns = disp_cntl_tbl->max_sr_enter_exit_lat; soc_bb_info->dram_sr_exit_latency_100ns = disp_cntl_tbl->max_sr_exit_lat; return result; } static enum bp_result get_soc_bb_info_v4_5( struct bios_parser *bp, struct bp_soc_bb_info *soc_bb_info) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_5 *disp_cntl_tbl = NULL; if (!soc_bb_info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_5, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; soc_bb_info->dram_clock_change_latency_100ns = disp_cntl_tbl->max_mclk_chg_lat; soc_bb_info->dram_sr_enter_exit_latency_100ns = disp_cntl_tbl->max_sr_enter_exit_lat; soc_bb_info->dram_sr_exit_latency_100ns = disp_cntl_tbl->max_sr_exit_lat; return result; } static enum bp_result bios_parser_get_soc_bb_info( struct dc_bios *dcb, struct bp_soc_bb_info *soc_bb_info) { struct bios_parser *bp = BP_FROM_DCB(dcb); enum bp_result result = BP_RESULT_UNSUPPORTED; struct atom_common_table_header *header; struct atom_data_revision tbl_revision; if (!soc_bb_info) /* check for bad input */ return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_UNSUPPORTED; header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(dce_info)); get_atom_data_table_revision(header, &tbl_revision); switch (tbl_revision.major) { case 4: switch (tbl_revision.minor) { case 1: case 2: case 3: break; case 4: result = get_soc_bb_info_v4_4(bp, soc_bb_info); break; case 5: result = get_soc_bb_info_v4_5(bp, soc_bb_info); break; default: break; } break; default: break; } return result; } static enum bp_result get_disp_caps_v4_1( struct bios_parser *bp, uint8_t *dce_caps) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL; if (!dce_caps) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_1, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; *dce_caps = disp_cntl_tbl->display_caps; return result; } static enum bp_result get_disp_caps_v4_2( struct bios_parser *bp, uint8_t *dce_caps) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL; if (!dce_caps) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_2, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; *dce_caps = disp_cntl_tbl->display_caps; return result; } static enum bp_result get_disp_caps_v4_3( struct bios_parser *bp, uint8_t *dce_caps) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_3 *disp_cntl_tbl = NULL; if (!dce_caps) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_3, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; *dce_caps = disp_cntl_tbl->display_caps; return result; } static enum bp_result get_disp_caps_v4_4( struct bios_parser *bp, uint8_t *dce_caps) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL; if (!dce_caps) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_4, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; *dce_caps = disp_cntl_tbl->display_caps; return result; } static enum bp_result get_disp_caps_v4_5( struct bios_parser *bp, uint8_t *dce_caps) { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_5 *disp_cntl_tbl = NULL; if (!dce_caps) return BP_RESULT_BADINPUT; if (!DATA_TABLES(dce_info)) return BP_RESULT_BADBIOSTABLE; disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_5, DATA_TABLES(dce_info)); if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; *dce_caps = disp_cntl_tbl->display_caps; return result; } static enum bp_result bios_parser_get_lttpr_interop( struct dc_bios *dcb, uint8_t *dce_caps) { struct bios_parser *bp = BP_FROM_DCB(dcb); enum bp_result result = BP_RESULT_UNSUPPORTED; struct atom_common_table_header *header; struct atom_data_revision tbl_revision; if (!DATA_TABLES(dce_info)) return BP_RESULT_UNSUPPORTED; header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(dce_info)); get_atom_data_table_revision(header, &tbl_revision); switch (tbl_revision.major) { case 4: switch (tbl_revision.minor) { case 1: result = get_disp_caps_v4_1(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE); break; case 2: result = get_disp_caps_v4_2(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE); break; case 3: result = get_disp_caps_v4_3(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE); break; case 4: result = get_disp_caps_v4_4(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE); break; case 5: result = get_disp_caps_v4_5(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE); break; default: break; } break; default: break; } DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor); return result; } static enum bp_result bios_parser_get_lttpr_caps( struct dc_bios *dcb, uint8_t *dce_caps) { struct bios_parser *bp = BP_FROM_DCB(dcb); enum bp_result result = BP_RESULT_UNSUPPORTED; struct atom_common_table_header *header; struct atom_data_revision tbl_revision; if (!DATA_TABLES(dce_info)) return BP_RESULT_UNSUPPORTED; *dce_caps = 0; header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(dce_info)); get_atom_data_table_revision(header, &tbl_revision); switch (tbl_revision.major) { case 4: switch (tbl_revision.minor) { case 1: result = get_disp_caps_v4_1(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE); break; case 2: result = get_disp_caps_v4_2(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE); break; case 3: result = get_disp_caps_v4_3(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE); break; case 4: result = get_disp_caps_v4_4(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE); break; case 5: result = get_disp_caps_v4_5(bp, dce_caps); *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE); break; default: break; } break; default: break; } DC_LOG_BIOS("DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor); if (dcb->ctx->dc->config.force_bios_enable_lttpr && *dce_caps == 0) { *dce_caps = 1; DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: forced enabled"); } return result; } static enum bp_result get_embedded_panel_info_v2_1( struct bios_parser *bp, struct embedded_panel_info *info) { struct lcd_info_v2_1 *lvds; if (!info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(lcd_info)) return BP_RESULT_UNSUPPORTED; lvds = GET_IMAGE(struct lcd_info_v2_1, DATA_TABLES(lcd_info)); if (!lvds) return BP_RESULT_BADBIOSTABLE; /* TODO: previous vv1_3, should v2_1 */ if (!((lvds->table_header.format_revision == 2) && (lvds->table_header.content_revision >= 1))) return BP_RESULT_UNSUPPORTED; memset(info, 0, sizeof(struct embedded_panel_info)); /* We need to convert from 10KHz units into KHz units */ info->lcd_timing.pixel_clk = le16_to_cpu(lvds->lcd_timing.pixclk) * 10; /* usHActive does not include borders, according to VBIOS team */ info->lcd_timing.horizontal_addressable = le16_to_cpu(lvds->lcd_timing.h_active); /* usHBlanking_Time includes borders, so we should really be * subtractingborders duing this translation, but LVDS generally * doesn't have borders, so we should be okay leaving this as is for * now. May need to revisit if we ever have LVDS with borders */ info->lcd_timing.horizontal_blanking_time = le16_to_cpu(lvds->lcd_timing.h_blanking_time); /* usVActive does not include borders, according to VBIOS team*/ info->lcd_timing.vertical_addressable = le16_to_cpu(lvds->lcd_timing.v_active); /* usVBlanking_Time includes borders, so we should really be * subtracting borders duing this translation, but LVDS generally * doesn't have borders, so we should be okay leaving this as is for * now. May need to revisit if we ever have LVDS with borders */ info->lcd_timing.vertical_blanking_time = le16_to_cpu(lvds->lcd_timing.v_blanking_time); info->lcd_timing.horizontal_sync_offset = le16_to_cpu(lvds->lcd_timing.h_sync_offset); info->lcd_timing.horizontal_sync_width = le16_to_cpu(lvds->lcd_timing.h_sync_width); info->lcd_timing.vertical_sync_offset = le16_to_cpu(lvds->lcd_timing.v_sync_offset); info->lcd_timing.vertical_sync_width = le16_to_cpu(lvds->lcd_timing.v_syncwidth); info->lcd_timing.horizontal_border = lvds->lcd_timing.h_border; info->lcd_timing.vertical_border = lvds->lcd_timing.v_border; /* not provided by VBIOS */ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0; info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo & ATOM_HSYNC_POLARITY); info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo & ATOM_VSYNC_POLARITY); /* not provided by VBIOS */ info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0; info->lcd_timing.misc_info.H_REPLICATION_BY2 = !!(lvds->lcd_timing.miscinfo & ATOM_H_REPLICATIONBY2); info->lcd_timing.misc_info.V_REPLICATION_BY2 = !!(lvds->lcd_timing.miscinfo & ATOM_V_REPLICATIONBY2); info->lcd_timing.misc_info.COMPOSITE_SYNC = !!(lvds->lcd_timing.miscinfo & ATOM_COMPOSITESYNC); info->lcd_timing.misc_info.INTERLACE = !!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE); /* not provided by VBIOS*/ info->lcd_timing.misc_info.DOUBLE_CLOCK = 0; /* not provided by VBIOS*/ info->ss_id = 0; info->realtek_eDPToLVDS = !!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID); return BP_RESULT_OK; } static enum bp_result bios_parser_get_embedded_panel_info( struct dc_bios *dcb, struct embedded_panel_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct atom_common_table_header *header; struct atom_data_revision tbl_revision; if (!DATA_TABLES(lcd_info)) return BP_RESULT_FAILURE; header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(lcd_info)); if (!header) return BP_RESULT_BADBIOSTABLE; get_atom_data_table_revision(header, &tbl_revision); switch (tbl_revision.major) { case 2: switch (tbl_revision.minor) { case 1: return get_embedded_panel_info_v2_1(bp, info); default: break; } break; default: break; } return BP_RESULT_FAILURE; } static uint32_t get_support_mask_for_device_id(struct device_id device_id) { enum dal_device_type device_type = device_id.device_type; uint32_t enum_id = device_id.enum_id; switch (device_type) { case DEVICE_TYPE_LCD: switch (enum_id) { case 1: return ATOM_DISPLAY_LCD1_SUPPORT; default: break; } break; case DEVICE_TYPE_DFP: switch (enum_id) { case 1: return ATOM_DISPLAY_DFP1_SUPPORT; case 2: return ATOM_DISPLAY_DFP2_SUPPORT; case 3: return ATOM_DISPLAY_DFP3_SUPPORT; case 4: return ATOM_DISPLAY_DFP4_SUPPORT; case 5: return ATOM_DISPLAY_DFP5_SUPPORT; case 6: return ATOM_DISPLAY_DFP6_SUPPORT; default: break; } break; default: break; } /* Unidentified device ID, return empty support mask. */ return 0; } static bool bios_parser_is_device_id_supported( struct dc_bios *dcb, struct device_id id) { struct bios_parser *bp = BP_FROM_DCB(dcb); uint32_t mask = get_support_mask_for_device_id(id); switch (bp->object_info_tbl.revision.minor) { case 4: default: return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0; break; case 5: return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0; break; } return false; } static uint32_t bios_parser_get_ss_entry_number( struct dc_bios *dcb, enum as_signal_type signal) { /* TODO: DAL2 atomfirmware implementation does not need this. * why DAL3 need this? */ return 1; } static enum bp_result bios_parser_transmitter_control( struct dc_bios *dcb, struct bp_transmitter_control *cntl) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.transmitter_control) return BP_RESULT_FAILURE; return bp->cmd_tbl.transmitter_control(bp, cntl); } static enum bp_result bios_parser_encoder_control( struct dc_bios *dcb, struct bp_encoder_control *cntl) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.dig_encoder_control) return BP_RESULT_FAILURE; return bp->cmd_tbl.dig_encoder_control(bp, cntl); } static enum bp_result bios_parser_set_pixel_clock( struct dc_bios *dcb, struct bp_pixel_clock_parameters *bp_params) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.set_pixel_clock) return BP_RESULT_FAILURE; return bp->cmd_tbl.set_pixel_clock(bp, bp_params); } static enum bp_result bios_parser_set_dce_clock( struct dc_bios *dcb, struct bp_set_dce_clock_parameters *bp_params) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.set_dce_clock) return BP_RESULT_FAILURE; return bp->cmd_tbl.set_dce_clock(bp, bp_params); } static enum bp_result bios_parser_program_crtc_timing( struct dc_bios *dcb, struct bp_hw_crtc_timing_parameters *bp_params) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.set_crtc_timing) return BP_RESULT_FAILURE; return bp->cmd_tbl.set_crtc_timing(bp, bp_params); } static enum bp_result bios_parser_enable_crtc( struct dc_bios *dcb, enum controller_id id, bool enable) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_crtc) return BP_RESULT_FAILURE; return bp->cmd_tbl.enable_crtc(bp, id, enable); } static enum bp_result bios_parser_enable_disp_power_gating( struct dc_bios *dcb, enum controller_id controller_id, enum bp_pipe_control_action action) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_disp_power_gating) return BP_RESULT_FAILURE; return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id, action); } static enum bp_result bios_parser_enable_lvtma_control( struct dc_bios *dcb, uint8_t uc_pwr_on, uint8_t panel_instance, uint8_t bypass_panel_control_wait) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_lvtma_control) return BP_RESULT_FAILURE; return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait); } static bool bios_parser_is_accelerated_mode( struct dc_bios *dcb) { return bios_is_accelerated_mode(dcb); } /** * bios_parser_set_scratch_critical_state - update critical state bit * in VBIOS scratch register * * @dcb: pointer to the DC BIO * @state: set or reset state */ static void bios_parser_set_scratch_critical_state( struct dc_bios *dcb, bool state) { bios_set_scratch_critical_state(dcb, state); } struct atom_dig_transmitter_info_header_v5_3 { struct atom_common_table_header table_header; uint16_t dpphy_hdmi_settings_offset; uint16_t dpphy_dvi_settings_offset; uint16_t dpphy_dp_setting_table_offset; uint16_t uniphy_xbar_settings_v2_table_offset; uint16_t dpphy_internal_reg_overide_offset; }; static enum bp_result bios_parser_get_firmware_info( struct dc_bios *dcb, struct dc_firmware_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); static enum bp_result result = BP_RESULT_BADBIOSTABLE; struct atom_common_table_header *header; struct atom_data_revision revision; if (info && DATA_TABLES(firmwareinfo)) { header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(firmwareinfo)); get_atom_data_table_revision(header, &revision); switch (revision.major) { case 3: switch (revision.minor) { case 1: result = get_firmware_info_v3_1(bp, info); break; case 2: case 3: result = get_firmware_info_v3_2(bp, info); break; case 4: result = get_firmware_info_v3_4(bp, info); break; default: break; } break; default: break; } } return result; } static enum bp_result get_firmware_info_v3_1( struct bios_parser *bp, struct dc_firmware_info *info) { struct atom_firmware_info_v3_1 *firmware_info; struct atom_display_controller_info_v4_1 *dce_info = NULL; if (!info) return BP_RESULT_BADINPUT; firmware_info = GET_IMAGE(struct atom_firmware_info_v3_1, DATA_TABLES(firmwareinfo)); dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1, DATA_TABLES(dce_info)); if (!firmware_info || !dce_info) return BP_RESULT_BADBIOSTABLE; memset(info, 0, sizeof(*info)); /* Pixel clock pll information. */ /* We need to convert from 10KHz units into KHz units */ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; info->default_engine_clk = firmware_info->bootup_sclk_in10khz * 10; /* 27MHz for Vega10: */ info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10; /* Hardcode frequency if BIOS gives no DCE Ref Clk */ if (info->pll_info.crystal_frequency == 0) info->pll_info.crystal_frequency = 27000; /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/ info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10; info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10; /* Get GPU PLL VCO Clock */ if (bp->cmd_tbl.get_smu_clock_info != NULL) { /* VBIOS gives in 10KHz */ info->smu_gpu_pll_output_freq = bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; } info->oem_i2c_present = false; return BP_RESULT_OK; } static enum bp_result get_firmware_info_v3_2( struct bios_parser *bp, struct dc_firmware_info *info) { struct atom_firmware_info_v3_2 *firmware_info; struct atom_display_controller_info_v4_1 *dce_info = NULL; struct atom_common_table_header *header; struct atom_data_revision revision; struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL; struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL; if (!info) return BP_RESULT_BADINPUT; firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2, DATA_TABLES(firmwareinfo)); dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1, DATA_TABLES(dce_info)); if (!firmware_info || !dce_info) return BP_RESULT_BADBIOSTABLE; memset(info, 0, sizeof(*info)); header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(smu_info)); get_atom_data_table_revision(header, &revision); if (revision.minor == 2) { /* Vega12 */ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, DATA_TABLES(smu_info)); DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); if (!smu_info_v3_2) return BP_RESULT_BADBIOSTABLE; info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; } else if (revision.minor == 3) { /* Vega20 */ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, DATA_TABLES(smu_info)); DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); if (!smu_info_v3_3) return BP_RESULT_BADBIOSTABLE; info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; } // We need to convert from 10KHz units into KHz units. info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */ info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10; /* Hardcode frequency if BIOS gives no DCE Ref Clk */ if (info->pll_info.crystal_frequency == 0) { if (revision.minor == 2) info->pll_info.crystal_frequency = 27000; else if (revision.minor == 3) info->pll_info.crystal_frequency = 100000; } /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/ info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10; info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10; /* Get GPU PLL VCO Clock */ if (bp->cmd_tbl.get_smu_clock_info != NULL) { if (revision.minor == 2) info->smu_gpu_pll_output_freq = bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; else if (revision.minor == 3) info->smu_gpu_pll_output_freq = bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10; } if (firmware_info->board_i2c_feature_id == 0x2) { info->oem_i2c_present = true; info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; } else { info->oem_i2c_present = false; } return BP_RESULT_OK; } static enum bp_result get_firmware_info_v3_4( struct bios_parser *bp, struct dc_firmware_info *info) { struct atom_firmware_info_v3_4 *firmware_info; struct atom_common_table_header *header; struct atom_data_revision revision; struct atom_display_controller_info_v4_1 *dce_info_v4_1 = NULL; struct atom_display_controller_info_v4_4 *dce_info_v4_4 = NULL; struct atom_smu_info_v3_5 *smu_info_v3_5 = NULL; struct atom_display_controller_info_v4_5 *dce_info_v4_5 = NULL; struct atom_smu_info_v4_0 *smu_info_v4_0 = NULL; if (!info) return BP_RESULT_BADINPUT; firmware_info = GET_IMAGE(struct atom_firmware_info_v3_4, DATA_TABLES(firmwareinfo)); if (!firmware_info) return BP_RESULT_BADBIOSTABLE; memset(info, 0, sizeof(*info)); header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(dce_info)); get_atom_data_table_revision(header, &revision); switch (revision.major) { case 4: switch (revision.minor) { case 5: dce_info_v4_5 = GET_IMAGE(struct atom_display_controller_info_v4_5, DATA_TABLES(dce_info)); if (!dce_info_v4_5) return BP_RESULT_BADBIOSTABLE; /* 100MHz expected */ info->pll_info.crystal_frequency = dce_info_v4_5->dce_refclk_10khz * 10; info->dp_phy_ref_clk = dce_info_v4_5->dpphy_refclk_10khz * 10; /* 50MHz expected */ info->i2c_engine_ref_clk = dce_info_v4_5->i2c_engine_refclk_10khz * 10; /* For DCN32/321 Display PLL VCO Frequency from dce_info_v4_5 may not be reliable */ break; case 4: dce_info_v4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4, DATA_TABLES(dce_info)); if (!dce_info_v4_4) return BP_RESULT_BADBIOSTABLE; /* 100MHz expected */ info->pll_info.crystal_frequency = dce_info_v4_4->dce_refclk_10khz * 10; info->dp_phy_ref_clk = dce_info_v4_4->dpphy_refclk_10khz * 10; /* 50MHz expected */ info->i2c_engine_ref_clk = dce_info_v4_4->i2c_engine_refclk_10khz * 10; /* Get SMU Display PLL VCO Frequency in KHz*/ info->smu_gpu_pll_output_freq = dce_info_v4_4->dispclk_pll_vco_freq * 10; break; default: /* should not come here, keep as backup, as was before */ dce_info_v4_1 = GET_IMAGE(struct atom_display_controller_info_v4_1, DATA_TABLES(dce_info)); if (!dce_info_v4_1) return BP_RESULT_BADBIOSTABLE; info->pll_info.crystal_frequency = dce_info_v4_1->dce_refclk_10khz * 10; info->dp_phy_ref_clk = dce_info_v4_1->dpphy_refclk_10khz * 10; info->i2c_engine_ref_clk = dce_info_v4_1->i2c_engine_refclk_10khz * 10; break; } break; default: ASSERT(0); break; } header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(smu_info)); get_atom_data_table_revision(header, &revision); switch (revision.major) { case 3: switch (revision.minor) { case 5: smu_info_v3_5 = GET_IMAGE(struct atom_smu_info_v3_5, DATA_TABLES(smu_info)); if (!smu_info_v3_5) return BP_RESULT_BADBIOSTABLE; DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_5->gpuclk_ss_percentage); info->default_engine_clk = smu_info_v3_5->bootup_dcefclk_10khz * 10; break; default: break; } break; case 4: switch (revision.minor) { case 0: smu_info_v4_0 = GET_IMAGE(struct atom_smu_info_v4_0, DATA_TABLES(smu_info)); if (!smu_info_v4_0) return BP_RESULT_BADBIOSTABLE; /* For DCN32/321 bootup DCFCLK from smu_info_v4_0 may not be reliable */ break; default: break; } break; default: break; } // We need to convert from 10KHz units into KHz units. info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; if (firmware_info->board_i2c_feature_id == 0x2) { info->oem_i2c_present = true; info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; } else { info->oem_i2c_present = false; } return BP_RESULT_OK; } static enum bp_result bios_parser_get_encoder_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, struct bp_encoder_cap_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct atom_display_object_path_v2 *object; struct atom_encoder_caps_record *record = NULL; if (!info) return BP_RESULT_BADINPUT; #if defined(CONFIG_DRM_AMD_DC_FP) /* encoder cap record not available in v1_5 */ if (bp->object_info_tbl.revision.minor == 5) return BP_RESULT_NORECORD; #endif object = get_bios_object(bp, object_id); if (!object) return BP_RESULT_BADINPUT; record = get_encoder_cap_record(bp, object); if (!record) return BP_RESULT_NORECORD; DC_LOG_BIOS("record->encodercaps 0x%x for object_id 0x%x", record->encodercaps, object_id.id); info->DP_HBR2_CAP = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_HBR2) ? 1 : 0; info->DP_HBR2_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_HBR2_EN) ? 1 : 0; info->DP_HBR3_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0; info->HDMI_6GB_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0; info->IS_DP2_CAPABLE = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0; info->DP_UHBR10_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_UHBR10_EN) ? 1 : 0; info->DP_UHBR13_5_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0; info->DP_UHBR20_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0; info->DP_IS_USB_C = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0; DC_LOG_BIOS("\t info->DP_IS_USB_C %d", info->DP_IS_USB_C); return BP_RESULT_OK; } static struct atom_encoder_caps_record *get_encoder_cap_record( struct bios_parser *bp, struct atom_display_object_path_v2 *object) { struct atom_common_record_header *header; uint32_t offset; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object */ return NULL; } offset = object->encoder_recordoffset + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(struct atom_common_record_header, offset); if (!header) return NULL; offset += header->record_size; if (header->record_type == LAST_RECORD_TYPE || !header->record_size) break; if (header->record_type != ATOM_ENCODER_CAP_RECORD_TYPE) continue; if (sizeof(struct atom_encoder_caps_record) <= header->record_size) return (struct atom_encoder_caps_record *)header; } return NULL; } static struct atom_disp_connector_caps_record *get_disp_connector_caps_record( struct bios_parser *bp, struct atom_display_object_path_v2 *object) { struct atom_common_record_header *header; uint32_t offset; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object */ return NULL; } offset = object->disp_recordoffset + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(struct atom_common_record_header, offset); if (!header) return NULL; offset += header->record_size; if (header->record_type == LAST_RECORD_TYPE || !header->record_size) break; if (header->record_type != ATOM_DISP_CONNECTOR_CAPS_RECORD_TYPE) continue; if (sizeof(struct atom_disp_connector_caps_record) <= header->record_size) return (struct atom_disp_connector_caps_record *)header; } return NULL; } static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp, struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object */ return NULL; } offset = object->disp_recordoffset + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(struct atom_common_record_header, offset); if (!header) return NULL; offset += header->record_size; if (header->record_type == ATOM_RECORD_END_TYPE || !header->record_size) break; if (header->record_type != ATOM_CONNECTOR_CAP_RECORD_TYPE) continue; if (sizeof(struct atom_connector_caps_record) <= header->record_size) return (struct atom_connector_caps_record *)header; } return NULL; } static enum bp_result bios_parser_get_disp_connector_caps_info( struct dc_bios *dcb, struct graphics_object_id object_id, struct bp_disp_connector_caps_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct atom_display_object_path_v2 *object; struct atom_display_object_path_v3 *object_path_v3; struct atom_connector_caps_record *record_path_v3; struct atom_disp_connector_caps_record *record = NULL; if (!info) return BP_RESULT_BADINPUT; switch (bp->object_info_tbl.revision.minor) { case 4: default: object = get_bios_object(bp, object_id); if (!object) return BP_RESULT_BADINPUT; record = get_disp_connector_caps_record(bp, object); if (!record) return BP_RESULT_NORECORD; info->INTERNAL_DISPLAY = (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; info->INTERNAL_DISPLAY_BL = (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; break; case 5: object_path_v3 = get_bios_object_from_path_v3(bp, object_id); if (!object_path_v3) return BP_RESULT_BADINPUT; record_path_v3 = get_connector_caps_record(bp, object_path_v3); if (!record_path_v3) return BP_RESULT_NORECORD; info->INTERNAL_DISPLAY = (record_path_v3->connector_caps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; info->INTERNAL_DISPLAY_BL = (record_path_v3->connector_caps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; break; } return BP_RESULT_OK; } static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp, struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object */ return NULL; } offset = object->disp_recordoffset + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(struct atom_common_record_header, offset); if (!header) return NULL; offset += header->record_size; if (header->record_type == ATOM_RECORD_END_TYPE || !header->record_size) break; if (header->record_type != ATOM_CONNECTOR_SPEED_UPTO) continue; if (sizeof(struct atom_connector_speed_record) <= header->record_size) return (struct atom_connector_speed_record *)header; } return NULL; } static enum bp_result bios_parser_get_connector_speed_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, struct bp_connector_speed_cap_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct atom_display_object_path_v3 *object_path_v3; //struct atom_connector_speed_record *record = NULL; struct atom_connector_speed_record *record; if (!info) return BP_RESULT_BADINPUT; object_path_v3 = get_bios_object_from_path_v3(bp, object_id); if (!object_path_v3) return BP_RESULT_BADINPUT; record = get_connector_speed_cap_record(bp, object_path_v3); if (!record) return BP_RESULT_NORECORD; info->DP_HBR2_EN = (record->connector_max_speed >= 5400) ? 1 : 0; info->DP_HBR3_EN = (record->connector_max_speed >= 8100) ? 1 : 0; info->HDMI_6GB_EN = (record->connector_max_speed >= 5940) ? 1 : 0; info->DP_UHBR10_EN = (record->connector_max_speed >= 10000) ? 1 : 0; info->DP_UHBR13_5_EN = (record->connector_max_speed >= 13500) ? 1 : 0; info->DP_UHBR20_EN = (record->connector_max_speed >= 20000) ? 1 : 0; return BP_RESULT_OK; } static enum bp_result get_vram_info_v23( struct bios_parser *bp, struct dc_vram_info *info) { struct atom_vram_info_header_v2_3 *info_v23; static enum bp_result result = BP_RESULT_OK; info_v23 = GET_IMAGE(struct atom_vram_info_header_v2_3, DATA_TABLES(vram_info)); if (info_v23 == NULL) return BP_RESULT_BADBIOSTABLE; info->num_chans = info_v23->vram_module[0].channel_num; info->dram_channel_width_bytes = (1 << info_v23->vram_module[0].channel_width) / 8; return result; } static enum bp_result get_vram_info_v24( struct bios_parser *bp, struct dc_vram_info *info) { struct atom_vram_info_header_v2_4 *info_v24; static enum bp_result result = BP_RESULT_OK; info_v24 = GET_IMAGE(struct atom_vram_info_header_v2_4, DATA_TABLES(vram_info)); if (info_v24 == NULL) return BP_RESULT_BADBIOSTABLE; info->num_chans = info_v24->vram_module[0].channel_num; info->dram_channel_width_bytes = (1 << info_v24->vram_module[0].channel_width) / 8; return result; } static enum bp_result get_vram_info_v25( struct bios_parser *bp, struct dc_vram_info *info) { struct atom_vram_info_header_v2_5 *info_v25; static enum bp_result result = BP_RESULT_OK; info_v25 = GET_IMAGE(struct atom_vram_info_header_v2_5, DATA_TABLES(vram_info)); if (info_v25 == NULL) return BP_RESULT_BADBIOSTABLE; info->num_chans = info_v25->vram_module[0].channel_num; info->dram_channel_width_bytes = (1 << info_v25->vram_module[0].channel_width) / 8; return result; } static enum bp_result get_vram_info_v30( struct bios_parser *bp, struct dc_vram_info *info) { struct atom_vram_info_header_v3_0 *info_v30; enum bp_result result = BP_RESULT_OK; info_v30 = GET_IMAGE(struct atom_vram_info_header_v3_0, DATA_TABLES(vram_info)); if (info_v30 == NULL) return BP_RESULT_BADBIOSTABLE; info->num_chans = info_v30->channel_num; info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8; return result; } /* * get_integrated_info_v11 * * @brief * Get V8 integrated BIOS information * * @param * bios_parser *bp - [in]BIOS parser handler to get master data table * integrated_info *info - [out] store and output integrated info * * @return * static enum bp_result - BP_RESULT_OK if information is available, * BP_RESULT_BADBIOSTABLE otherwise. */ static enum bp_result get_integrated_info_v11( struct bios_parser *bp, struct integrated_info *info) { struct atom_integrated_system_info_v1_11 *info_v11; uint32_t i; info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11, DATA_TABLES(integratedsysteminfo)); DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); if (info_v11 == NULL) return BP_RESULT_BADBIOSTABLE; info->gpu_cap_info = le32_to_cpu(info_v11->gpucapinfo); /* * system_config: Bit[0] = 0 : PCIE power gating disabled * = 1 : PCIE power gating enabled * Bit[1] = 0 : DDR-PLL shut down disabled * = 1 : DDR-PLL shut down enabled * Bit[2] = 0 : DDR-PLL power down disabled * = 1 : DDR-PLL power down enabled */ info->system_config = le32_to_cpu(info_v11->system_config); info->cpu_cap_info = le32_to_cpu(info_v11->cpucapinfo); info->memory_type = info_v11->memorytype; info->ma_channel_number = info_v11->umachannelnumber; info->lvds_ss_percentage = le16_to_cpu(info_v11->lvds_ss_percentage); info->dp_ss_control = le16_to_cpu(info_v11->reserved1); info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v11->lvds_ss_rate_10hz); info->hdmi_ss_percentage = le16_to_cpu(info_v11->hdmi_ss_percentage); info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v11->hdmi_ss_rate_10hz); info->dvi_ss_percentage = le16_to_cpu(info_v11->dvi_ss_percentage); info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v11->dvi_ss_rate_10hz); info->lvds_misc = info_v11->lvds_misc; for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) { info->ext_disp_conn_info.gu_id[i] = info_v11->extdispconninfo.guid[i]; } for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) { info->ext_disp_conn_info.path[i].device_connector_id = object_id_from_bios_object_id( le16_to_cpu(info_v11->extdispconninfo.path[i].connectorobjid)); info->ext_disp_conn_info.path[i].ext_encoder_obj_id = object_id_from_bios_object_id( le16_to_cpu( info_v11->extdispconninfo.path[i].ext_encoder_objid)); info->ext_disp_conn_info.path[i].device_tag = le16_to_cpu( info_v11->extdispconninfo.path[i].device_tag); info->ext_disp_conn_info.path[i].device_acpi_enum = le16_to_cpu( info_v11->extdispconninfo.path[i].device_acpi_enum); info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index = info_v11->extdispconninfo.path[i].auxddclut_index; info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index = info_v11->extdispconninfo.path[i].hpdlut_index; info->ext_disp_conn_info.path[i].channel_mapping.raw = info_v11->extdispconninfo.path[i].channelmapping; info->ext_disp_conn_info.path[i].caps = le16_to_cpu(info_v11->extdispconninfo.path[i].caps); } info->ext_disp_conn_info.checksum = info_v11->extdispconninfo.checksum; info->dp0_ext_hdmi_slv_addr = info_v11->dp0_retimer_set.HdmiSlvAddr; info->dp0_ext_hdmi_reg_num = info_v11->dp0_retimer_set.HdmiRegNum; for (i = 0; i < info->dp0_ext_hdmi_reg_num; i++) { info->dp0_ext_hdmi_reg_settings[i].i2c_reg_index = info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegIndex; info->dp0_ext_hdmi_reg_settings[i].i2c_reg_val = info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegVal; } info->dp0_ext_hdmi_6g_reg_num = info_v11->dp0_retimer_set.Hdmi6GRegNum; for (i = 0; i < info->dp0_ext_hdmi_6g_reg_num; i++) { info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_index = info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex; info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_val = info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal; } info->dp1_ext_hdmi_slv_addr = info_v11->dp1_retimer_set.HdmiSlvAddr; info->dp1_ext_hdmi_reg_num = info_v11->dp1_retimer_set.HdmiRegNum; for (i = 0; i < info->dp1_ext_hdmi_reg_num; i++) { info->dp1_ext_hdmi_reg_settings[i].i2c_reg_index = info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegIndex; info->dp1_ext_hdmi_reg_settings[i].i2c_reg_val = info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegVal; } info->dp1_ext_hdmi_6g_reg_num = info_v11->dp1_retimer_set.Hdmi6GRegNum; for (i = 0; i < info->dp1_ext_hdmi_6g_reg_num; i++) { info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_index = info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex; info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_val = info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal; } info->dp2_ext_hdmi_slv_addr = info_v11->dp2_retimer_set.HdmiSlvAddr; info->dp2_ext_hdmi_reg_num = info_v11->dp2_retimer_set.HdmiRegNum; for (i = 0; i < info->dp2_ext_hdmi_reg_num; i++) { info->dp2_ext_hdmi_reg_settings[i].i2c_reg_index = info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegIndex; info->dp2_ext_hdmi_reg_settings[i].i2c_reg_val = info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegVal; } info->dp2_ext_hdmi_6g_reg_num = info_v11->dp2_retimer_set.Hdmi6GRegNum; for (i = 0; i < info->dp2_ext_hdmi_6g_reg_num; i++) { info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_index = info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex; info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_val = info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal; } info->dp3_ext_hdmi_slv_addr = info_v11->dp3_retimer_set.HdmiSlvAddr; info->dp3_ext_hdmi_reg_num = info_v11->dp3_retimer_set.HdmiRegNum; for (i = 0; i < info->dp3_ext_hdmi_reg_num; i++) { info->dp3_ext_hdmi_reg_settings[i].i2c_reg_index = info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegIndex; info->dp3_ext_hdmi_reg_settings[i].i2c_reg_val = info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegVal; } info->dp3_ext_hdmi_6g_reg_num = info_v11->dp3_retimer_set.Hdmi6GRegNum; for (i = 0; i < info->dp3_ext_hdmi_6g_reg_num; i++) { info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_index = info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex; info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_val = info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal; } /** TODO - review **/ #if 0 info->boot_up_engine_clock = le32_to_cpu(info_v11->ulBootUpEngineClock) * 10; info->dentist_vco_freq = le32_to_cpu(info_v11->ulDentistVCOFreq) * 10; info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10; for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { /* Convert [10KHz] into [KHz] */ info->disp_clk_voltage[i].max_supported_clk = le32_to_cpu(info_v11->sDISPCLK_Voltage[i]. ulMaximumSupportedCLK) * 10; info->disp_clk_voltage[i].voltage_index = le32_to_cpu(info_v11->sDISPCLK_Voltage[i].ulVoltageIndex); } info->boot_up_req_display_vector = le32_to_cpu(info_v11->ulBootUpReqDisplayVector); info->boot_up_nb_voltage = le16_to_cpu(info_v11->usBootUpNBVoltage); info->ext_disp_conn_info_offset = le16_to_cpu(info_v11->usExtDispConnInfoOffset); info->gmc_restore_reset_time = le32_to_cpu(info_v11->ulGMCRestoreResetTime); info->minimum_n_clk = le32_to_cpu(info_v11->ulNbpStateNClkFreq[0]); for (i = 1; i < 4; ++i) info->minimum_n_clk = info->minimum_n_clk < le32_to_cpu(info_v11->ulNbpStateNClkFreq[i]) ? info->minimum_n_clk : le32_to_cpu( info_v11->ulNbpStateNClkFreq[i]); info->idle_n_clk = le32_to_cpu(info_v11->ulIdleNClk); info->ddr_dll_power_up_time = le32_to_cpu(info_v11->ulDDR_DLL_PowerUpTime); info->ddr_pll_power_up_time = le32_to_cpu(info_v11->ulDDR_PLL_PowerUpTime); info->pcie_clk_ss_type = le16_to_cpu(info_v11->usPCIEClkSSType); info->max_lvds_pclk_freq_in_single_link = le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink); info->max_lvds_pclk_freq_in_single_link = le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink); info->lvds_pwr_on_seq_dig_on_to_de_in_4ms = info_v11->ucLVDSPwrOnSeqDIGONtoDE_in4Ms; info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms = info_v11->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms = info_v11->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; info->lvds_pwr_off_seq_vary_bl_to_de_in4ms = info_v11->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; info->lvds_pwr_off_seq_de_to_dig_on_in4ms = info_v11->ucLVDSPwrOffSeqDEtoDIGON_in4Ms; info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms = info_v11->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; info->lvds_off_to_on_delay_in_4ms = info_v11->ucLVDSOffToOnDelay_in4Ms; info->lvds_bit_depth_control_val = le32_to_cpu(info_v11->ulLCDBitDepthControlVal); for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) { /* Convert [10KHz] into [KHz] */ info->avail_s_clk[i].supported_s_clk = le32_to_cpu(info_v11->sAvail_SCLK[i].ulSupportedSCLK) * 10; info->avail_s_clk[i].voltage_index = le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageIndex); info->avail_s_clk[i].voltage_id = le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageID); } #endif /* TODO*/ return BP_RESULT_OK; } static enum bp_result get_integrated_info_v2_1( struct bios_parser *bp, struct integrated_info *info) { struct atom_integrated_system_info_v2_1 *info_v2_1; uint32_t i; info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1, DATA_TABLES(integratedsysteminfo)); DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); if (info_v2_1 == NULL) return BP_RESULT_BADBIOSTABLE; info->gpu_cap_info = le32_to_cpu(info_v2_1->gpucapinfo); /* * system_config: Bit[0] = 0 : PCIE power gating disabled * = 1 : PCIE power gating enabled * Bit[1] = 0 : DDR-PLL shut down disabled * = 1 : DDR-PLL shut down enabled * Bit[2] = 0 : DDR-PLL power down disabled * = 1 : DDR-PLL power down enabled */ info->system_config = le32_to_cpu(info_v2_1->system_config); info->cpu_cap_info = le32_to_cpu(info_v2_1->cpucapinfo); info->memory_type = info_v2_1->memorytype; info->ma_channel_number = info_v2_1->umachannelnumber; info->dp_ss_control = le16_to_cpu(info_v2_1->reserved1); for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) { info->ext_disp_conn_info.gu_id[i] = info_v2_1->extdispconninfo.guid[i]; } for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) { info->ext_disp_conn_info.path[i].device_connector_id = object_id_from_bios_object_id( le16_to_cpu(info_v2_1->extdispconninfo.path[i].connectorobjid)); info->ext_disp_conn_info.path[i].ext_encoder_obj_id = object_id_from_bios_object_id( le16_to_cpu( info_v2_1->extdispconninfo.path[i].ext_encoder_objid)); info->ext_disp_conn_info.path[i].device_tag = le16_to_cpu( info_v2_1->extdispconninfo.path[i].device_tag); info->ext_disp_conn_info.path[i].device_acpi_enum = le16_to_cpu( info_v2_1->extdispconninfo.path[i].device_acpi_enum); info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index = info_v2_1->extdispconninfo.path[i].auxddclut_index; info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index = info_v2_1->extdispconninfo.path[i].hpdlut_index; info->ext_disp_conn_info.path[i].channel_mapping.raw = info_v2_1->extdispconninfo.path[i].channelmapping; info->ext_disp_conn_info.path[i].caps = le16_to_cpu(info_v2_1->extdispconninfo.path[i].caps); } info->ext_disp_conn_info.checksum = info_v2_1->extdispconninfo.checksum; info->dp0_ext_hdmi_slv_addr = info_v2_1->dp0_retimer_set.HdmiSlvAddr; info->dp0_ext_hdmi_reg_num = info_v2_1->dp0_retimer_set.HdmiRegNum; for (i = 0; i < info->dp0_ext_hdmi_reg_num; i++) { info->dp0_ext_hdmi_reg_settings[i].i2c_reg_index = info_v2_1->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegIndex; info->dp0_ext_hdmi_reg_settings[i].i2c_reg_val = info_v2_1->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegVal; } info->dp0_ext_hdmi_6g_reg_num = info_v2_1->dp0_retimer_set.Hdmi6GRegNum; for (i = 0; i < info->dp0_ext_hdmi_6g_reg_num; i++) { info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_index = info_v2_1->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex; info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_val = info_v2_1->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal; } info->dp1_ext_hdmi_slv_addr = info_v2_1->dp1_retimer_set.HdmiSlvAddr; info->dp1_ext_hdmi_reg_num = info_v2_1->dp1_retimer_set.HdmiRegNum; for (i = 0; i < info->dp1_ext_hdmi_reg_num; i++) { info->dp1_ext_hdmi_reg_settings[i].i2c_reg_index = info_v2_1->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegIndex; info->dp1_ext_hdmi_reg_settings[i].i2c_reg_val = info_v2_1->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegVal; } info->dp1_ext_hdmi_6g_reg_num = info_v2_1->dp1_retimer_set.Hdmi6GRegNum; for (i = 0; i < info->dp1_ext_hdmi_6g_reg_num; i++) { info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_index = info_v2_1->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex; info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_val = info_v2_1->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal; } info->dp2_ext_hdmi_slv_addr = info_v2_1->dp2_retimer_set.HdmiSlvAddr; info->dp2_ext_hdmi_reg_num = info_v2_1->dp2_retimer_set.HdmiRegNum; for (i = 0; i < info->dp2_ext_hdmi_reg_num; i++) { info->dp2_ext_hdmi_reg_settings[i].i2c_reg_index = info_v2_1->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegIndex; info->dp2_ext_hdmi_reg_settings[i].i2c_reg_val = info_v2_1->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegVal; } info->dp2_ext_hdmi_6g_reg_num = info_v2_1->dp2_retimer_set.Hdmi6GRegNum; for (i = 0; i < info->dp2_ext_hdmi_6g_reg_num; i++) { info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_index = info_v2_1->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex; info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_val = info_v2_1->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal; } info->dp3_ext_hdmi_slv_addr = info_v2_1->dp3_retimer_set.HdmiSlvAddr; info->dp3_ext_hdmi_reg_num = info_v2_1->dp3_retimer_set.HdmiRegNum; for (i = 0; i < info->dp3_ext_hdmi_reg_num; i++) { info->dp3_ext_hdmi_reg_settings[i].i2c_reg_index = info_v2_1->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegIndex; info->dp3_ext_hdmi_reg_settings[i].i2c_reg_val = info_v2_1->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegVal; } info->dp3_ext_hdmi_6g_reg_num = info_v2_1->dp3_retimer_set.Hdmi6GRegNum; for (i = 0; i < info->dp3_ext_hdmi_6g_reg_num; i++) { info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_index = info_v2_1->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex; info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_val = info_v2_1->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal; } info->edp1_info.edp_backlight_pwm_hz = le16_to_cpu(info_v2_1->edp1_info.edp_backlight_pwm_hz); info->edp1_info.edp_ss_percentage = le16_to_cpu(info_v2_1->edp1_info.edp_ss_percentage); info->edp1_info.edp_ss_rate_10hz = le16_to_cpu(info_v2_1->edp1_info.edp_ss_rate_10hz); info->edp1_info.edp_pwr_on_off_delay = info_v2_1->edp1_info.edp_pwr_on_off_delay; info->edp1_info.edp_pwr_on_vary_bl_to_blon = info_v2_1->edp1_info.edp_pwr_on_vary_bl_to_blon; info->edp1_info.edp_pwr_down_bloff_to_vary_bloff = info_v2_1->edp1_info.edp_pwr_down_bloff_to_vary_bloff; info->edp1_info.edp_panel_bpc = info_v2_1->edp1_info.edp_panel_bpc; info->edp1_info.edp_bootup_bl_level = info_v2_1->edp1_info.edp_bootup_bl_level; info->edp2_info.edp_backlight_pwm_hz = le16_to_cpu(info_v2_1->edp2_info.edp_backlight_pwm_hz); info->edp2_info.edp_ss_percentage = le16_to_cpu(info_v2_1->edp2_info.edp_ss_percentage); info->edp2_info.edp_ss_rate_10hz = le16_to_cpu(info_v2_1->edp2_info.edp_ss_rate_10hz); info->edp2_info.edp_pwr_on_off_delay = info_v2_1->edp2_info.edp_pwr_on_off_delay; info->edp2_info.edp_pwr_on_vary_bl_to_blon = info_v2_1->edp2_info.edp_pwr_on_vary_bl_to_blon; info->edp2_info.edp_pwr_down_bloff_to_vary_bloff = info_v2_1->edp2_info.edp_pwr_down_bloff_to_vary_bloff; info->edp2_info.edp_panel_bpc = info_v2_1->edp2_info.edp_panel_bpc; info->edp2_info.edp_bootup_bl_level = info_v2_1->edp2_info.edp_bootup_bl_level; return BP_RESULT_OK; } static enum bp_result get_integrated_info_v2_2( struct bios_parser *bp, struct integrated_info *info) { struct atom_integrated_system_info_v2_2 *info_v2_2; uint32_t i; info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2, DATA_TABLES(integratedsysteminfo)); DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); if (info_v2_2 == NULL) return BP_RESULT_BADBIOSTABLE; info->gpu_cap_info = le32_to_cpu(info_v2_2->gpucapinfo); /* * system_config: Bit[0] = 0 : PCIE power gating disabled * = 1 : PCIE power gating enabled * Bit[1] = 0 : DDR-PLL shut down disabled * = 1 : DDR-PLL shut down enabled * Bit[2] = 0 : DDR-PLL power down disabled * = 1 : DDR-PLL power down enabled */ info->system_config = le32_to_cpu(info_v2_2->system_config); info->cpu_cap_info = le32_to_cpu(info_v2_2->cpucapinfo); info->memory_type = info_v2_2->memorytype; info->ma_channel_number = info_v2_2->umachannelnumber; info->dp_ss_control = le16_to_cpu(info_v2_2->reserved1); for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) { info->ext_disp_conn_info.gu_id[i] = info_v2_2->extdispconninfo.guid[i]; } for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) { info->ext_disp_conn_info.path[i].device_connector_id = object_id_from_bios_object_id( le16_to_cpu(info_v2_2->extdispconninfo.path[i].connectorobjid)); info->ext_disp_conn_info.path[i].ext_encoder_obj_id = object_id_from_bios_object_id( le16_to_cpu( info_v2_2->extdispconninfo.path[i].ext_encoder_objid)); info->ext_disp_conn_info.path[i].device_tag = le16_to_cpu( info_v2_2->extdispconninfo.path[i].device_tag); info->ext_disp_conn_info.path[i].device_acpi_enum = le16_to_cpu( info_v2_2->extdispconninfo.path[i].device_acpi_enum); info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index = info_v2_2->extdispconninfo.path[i].auxddclut_index; info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index = info_v2_2->extdispconninfo.path[i].hpdlut_index; info->ext_disp_conn_info.path[i].channel_mapping.raw = info_v2_2->extdispconninfo.path[i].channelmapping; info->ext_disp_conn_info.path[i].caps = le16_to_cpu(info_v2_2->extdispconninfo.path[i].caps); } info->ext_disp_conn_info.checksum = info_v2_2->extdispconninfo.checksum; info->ext_disp_conn_info.fixdpvoltageswing = info_v2_2->extdispconninfo.fixdpvoltageswing; info->edp1_info.edp_backlight_pwm_hz = le16_to_cpu(info_v2_2->edp1_info.edp_backlight_pwm_hz); info->edp1_info.edp_ss_percentage = le16_to_cpu(info_v2_2->edp1_info.edp_ss_percentage); info->edp1_info.edp_ss_rate_10hz = le16_to_cpu(info_v2_2->edp1_info.edp_ss_rate_10hz); info->edp1_info.edp_pwr_on_off_delay = info_v2_2->edp1_info.edp_pwr_on_off_delay; info->edp1_info.edp_pwr_on_vary_bl_to_blon = info_v2_2->edp1_info.edp_pwr_on_vary_bl_to_blon; info->edp1_info.edp_pwr_down_bloff_to_vary_bloff = info_v2_2->edp1_info.edp_pwr_down_bloff_to_vary_bloff; info->edp1_info.edp_panel_bpc = info_v2_2->edp1_info.edp_panel_bpc; info->edp1_info.edp_bootup_bl_level = info->edp2_info.edp_backlight_pwm_hz = le16_to_cpu(info_v2_2->edp2_info.edp_backlight_pwm_hz); info->edp2_info.edp_ss_percentage = le16_to_cpu(info_v2_2->edp2_info.edp_ss_percentage); info->edp2_info.edp_ss_rate_10hz = le16_to_cpu(info_v2_2->edp2_info.edp_ss_rate_10hz); info->edp2_info.edp_pwr_on_off_delay = info_v2_2->edp2_info.edp_pwr_on_off_delay; info->edp2_info.edp_pwr_on_vary_bl_to_blon = info_v2_2->edp2_info.edp_pwr_on_vary_bl_to_blon; info->edp2_info.edp_pwr_down_bloff_to_vary_bloff = info_v2_2->edp2_info.edp_pwr_down_bloff_to_vary_bloff; info->edp2_info.edp_panel_bpc = info_v2_2->edp2_info.edp_panel_bpc; info->edp2_info.edp_bootup_bl_level = info_v2_2->edp2_info.edp_bootup_bl_level; return BP_RESULT_OK; } /* * construct_integrated_info * * @brief * Get integrated BIOS information based on table revision * * @param * bios_parser *bp - [in]BIOS parser handler to get master data table * integrated_info *info - [out] store and output integrated info * * @return * static enum bp_result - BP_RESULT_OK if information is available, * BP_RESULT_BADBIOSTABLE otherwise. */ static enum bp_result construct_integrated_info( struct bios_parser *bp, struct integrated_info *info) { static enum bp_result result = BP_RESULT_BADBIOSTABLE; struct atom_common_table_header *header; struct atom_data_revision revision; uint32_t i; uint32_t j; if (info && DATA_TABLES(integratedsysteminfo)) { header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(integratedsysteminfo)); get_atom_data_table_revision(header, &revision); switch (revision.major) { case 1: switch (revision.minor) { case 11: case 12: result = get_integrated_info_v11(bp, info); break; default: return result; } break; case 2: switch (revision.minor) { case 1: result = get_integrated_info_v2_1(bp, info); break; case 2: result = get_integrated_info_v2_2(bp, info); break; default: return result; } break; default: return result; } if (result == BP_RESULT_OK) { DC_LOG_BIOS("edp1:\n" "\tedp_pwr_on_off_delay = %d\n" "\tedp_pwr_on_vary_bl_to_blon = %d\n" "\tedp_pwr_down_bloff_to_vary_bloff = %d\n" "\tedp_bootup_bl_level = %d\n", info->edp1_info.edp_pwr_on_off_delay, info->edp1_info.edp_pwr_on_vary_bl_to_blon, info->edp1_info.edp_pwr_down_bloff_to_vary_bloff, info->edp1_info.edp_bootup_bl_level); DC_LOG_BIOS("edp2:\n" "\tedp_pwr_on_off_delayv = %d\n" "\tedp_pwr_on_vary_bl_to_blon = %d\n" "\tedp_pwr_down_bloff_to_vary_bloff = %d\n" "\tedp_bootup_bl_level = %d\n", info->edp2_info.edp_pwr_on_off_delay, info->edp2_info.edp_pwr_on_vary_bl_to_blon, info->edp2_info.edp_pwr_down_bloff_to_vary_bloff, info->edp2_info.edp_bootup_bl_level); } } if (result != BP_RESULT_OK) return result; else { // Log each external path for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) { if (info->ext_disp_conn_info.path[i].device_tag != 0) DC_LOG_BIOS("integrated_info:For EXTERNAL DISPLAY PATH %d --------------\n" "DEVICE_TAG: 0x%x\n" "DEVICE_ACPI_ENUM: 0x%x\n" "DEVICE_CONNECTOR_ID: 0x%x\n" "EXT_AUX_DDC_LUT_INDEX: %d\n" "EXT_HPD_PIN_LUT_INDEX: %d\n" "EXT_ENCODER_OBJ_ID: 0x%x\n" "Encoder CAPS: 0x%x\n", i, info->ext_disp_conn_info.path[i].device_tag, info->ext_disp_conn_info.path[i].device_acpi_enum, info->ext_disp_conn_info.path[i].device_connector_id.id, info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index, info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index, info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id, info->ext_disp_conn_info.path[i].caps ); if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i); else if (bp->base.ctx->dc->config.force_bios_fixed_vs) { info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN; DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i); } } // Log the Checksum and Voltage Swing DC_LOG_BIOS("Integrated info table CHECKSUM: %d\n" "Integrated info table FIX_DP_VOLTAGE_SWING: %d\n", info->ext_disp_conn_info.checksum, info->ext_disp_conn_info.fixdpvoltageswing); if (bp->base.ctx->dc->config.force_bios_fixed_vs && info->ext_disp_conn_info.fixdpvoltageswing == 0) { info->ext_disp_conn_info.fixdpvoltageswing = bp->base.ctx->dc->config.force_bios_fixed_vs & 0xF; DC_LOG_BIOS("driver forced fixdpvoltageswing = %d\n", info->ext_disp_conn_info.fixdpvoltageswing); } } /* Sort voltage table from low to high*/ for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { for (j = i; j > 0; --j) { if (info->disp_clk_voltage[j].max_supported_clk < info->disp_clk_voltage[j-1].max_supported_clk) swap(info->disp_clk_voltage[j-1], info->disp_clk_voltage[j]); } } return result; } static enum bp_result bios_parser_get_vram_info( struct dc_bios *dcb, struct dc_vram_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); static enum bp_result result = BP_RESULT_BADBIOSTABLE; struct atom_common_table_header *header; struct atom_data_revision revision; if (info && DATA_TABLES(vram_info)) { header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(vram_info)); get_atom_data_table_revision(header, &revision); switch (revision.major) { case 2: switch (revision.minor) { case 3: result = get_vram_info_v23(bp, info); break; case 4: result = get_vram_info_v24(bp, info); break; case 5: result = get_vram_info_v25(bp, info); break; default: break; } break; case 3: switch (revision.minor) { case 0: result = get_vram_info_v30(bp, info); break; default: break; } break; default: return result; } } return result; } static struct integrated_info *bios_parser_create_integrated_info( struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct integrated_info *info; info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL); if (info == NULL) { ASSERT_CRITICAL(0); return NULL; } if (construct_integrated_info(bp, info) == BP_RESULT_OK) return info; kfree(info); return NULL; } static enum bp_result update_slot_layout_info( struct dc_bios *dcb, unsigned int i, struct slot_layout_info *slot_layout_info) { unsigned int record_offset; unsigned int j; struct atom_display_object_path_v2 *object; struct atom_bracket_layout_record *record; struct atom_common_record_header *record_header; static enum bp_result result; struct bios_parser *bp; struct object_info_table *tbl; struct display_object_info_table_v1_4 *v1_4; record = NULL; record_header = NULL; result = BP_RESULT_NORECORD; bp = BP_FROM_DCB(dcb); tbl = &bp->object_info_tbl; v1_4 = tbl->v1_4; object = &v1_4->display_path[i]; record_offset = (unsigned int) (object->disp_recordoffset) + (unsigned int)(bp->object_info_tbl_offset); for (;;) { record_header = (struct atom_common_record_header *) GET_IMAGE(struct atom_common_record_header, record_offset); if (record_header == NULL) { result = BP_RESULT_BADBIOSTABLE; break; } /* the end of the list */ if (record_header->record_type == 0xff || record_header->record_size == 0) { break; } if (record_header->record_type == ATOM_BRACKET_LAYOUT_RECORD_TYPE && sizeof(struct atom_bracket_layout_record) <= record_header->record_size) { record = (struct atom_bracket_layout_record *) (record_header); result = BP_RESULT_OK; break; } record_offset += record_header->record_size; } /* return if the record not found */ if (result != BP_RESULT_OK) return result; /* get slot sizes */ slot_layout_info->length = record->bracketlen; slot_layout_info->width = record->bracketwidth; /* get info for each connector in the slot */ slot_layout_info->num_of_connectors = record->conn_num; for (j = 0; j < slot_layout_info->num_of_connectors; ++j) { slot_layout_info->connectors[j].connector_type = (enum connector_layout_type) (record->conn_info[j].connector_type); switch (record->conn_info[j].connector_type) { case CONNECTOR_TYPE_DVI_D: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_DVI_D; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_DVI; break; case CONNECTOR_TYPE_HDMI: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_HDMI; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_HDMI; break; case CONNECTOR_TYPE_DISPLAY_PORT: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_DP; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_DP; break; case CONNECTOR_TYPE_MINI_DISPLAY_PORT: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_MINI_DP; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_MINI_DP; break; default: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_UNKNOWN; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_UNKNOWN; } slot_layout_info->connectors[j].position = record->conn_info[j].position; slot_layout_info->connectors[j].connector_id = object_id_from_bios_object_id( record->conn_info[j].connectorobjid); } return result; } static enum bp_result update_slot_layout_info_v2( struct dc_bios *dcb, unsigned int i, struct slot_layout_info *slot_layout_info) { unsigned int record_offset; struct atom_display_object_path_v3 *object; struct atom_bracket_layout_record_v2 *record; struct atom_common_record_header *record_header; static enum bp_result result; struct bios_parser *bp; struct object_info_table *tbl; struct display_object_info_table_v1_5 *v1_5; struct graphics_object_id connector_id; record = NULL; record_header = NULL; result = BP_RESULT_NORECORD; bp = BP_FROM_DCB(dcb); tbl = &bp->object_info_tbl; v1_5 = tbl->v1_5; object = &v1_5->display_path[i]; record_offset = (unsigned int) (object->disp_recordoffset) + (unsigned int)(bp->object_info_tbl_offset); for (;;) { record_header = (struct atom_common_record_header *) GET_IMAGE(struct atom_common_record_header, record_offset); if (record_header == NULL) { result = BP_RESULT_BADBIOSTABLE; break; } /* the end of the list */ if (record_header->record_type == ATOM_RECORD_END_TYPE || record_header->record_size == 0) { break; } if (record_header->record_type == ATOM_BRACKET_LAYOUT_V2_RECORD_TYPE && sizeof(struct atom_bracket_layout_record_v2) <= record_header->record_size) { record = (struct atom_bracket_layout_record_v2 *) (record_header); result = BP_RESULT_OK; break; } record_offset += record_header->record_size; } /* return if the record not found */ if (result != BP_RESULT_OK) return result; /* get slot sizes */ connector_id = object_id_from_bios_object_id(object->display_objid); slot_layout_info->length = record->bracketlen; slot_layout_info->width = record->bracketwidth; slot_layout_info->num_of_connectors = v1_5->number_of_path; slot_layout_info->connectors[i].position = record->conn_num; slot_layout_info->connectors[i].connector_id = connector_id; switch (connector_id.id) { case CONNECTOR_ID_SINGLE_LINK_DVID: case CONNECTOR_ID_DUAL_LINK_DVID: slot_layout_info->connectors[i].connector_type = CONNECTOR_LAYOUT_TYPE_DVI_D; slot_layout_info->connectors[i].length = CONNECTOR_SIZE_DVI; break; case CONNECTOR_ID_HDMI_TYPE_A: slot_layout_info->connectors[i].connector_type = CONNECTOR_LAYOUT_TYPE_HDMI; slot_layout_info->connectors[i].length = CONNECTOR_SIZE_HDMI; break; case CONNECTOR_ID_DISPLAY_PORT: case CONNECTOR_ID_USBC: if (record->mini_type == MINI_TYPE_NORMAL) { slot_layout_info->connectors[i].connector_type = CONNECTOR_LAYOUT_TYPE_DP; slot_layout_info->connectors[i].length = CONNECTOR_SIZE_DP; } else { slot_layout_info->connectors[i].connector_type = CONNECTOR_LAYOUT_TYPE_MINI_DP; slot_layout_info->connectors[i].length = CONNECTOR_SIZE_MINI_DP; } break; default: slot_layout_info->connectors[i].connector_type = CONNECTOR_LAYOUT_TYPE_UNKNOWN; slot_layout_info->connectors[i].length = CONNECTOR_SIZE_UNKNOWN; } return result; } static enum bp_result get_bracket_layout_record( struct dc_bios *dcb, unsigned int bracket_layout_id, struct slot_layout_info *slot_layout_info) { unsigned int i; struct bios_parser *bp = BP_FROM_DCB(dcb); static enum bp_result result; struct object_info_table *tbl; struct display_object_info_table_v1_4 *v1_4; struct display_object_info_table_v1_5 *v1_5; if (slot_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n"); return BP_RESULT_BADINPUT; } tbl = &bp->object_info_tbl; v1_4 = tbl->v1_4; v1_5 = tbl->v1_5; result = BP_RESULT_NORECORD; switch (bp->object_info_tbl.revision.minor) { case 4: default: for (i = 0; i < v1_4->number_of_path; ++i) { if (bracket_layout_id == v1_4->display_path[i].display_objid) { result = update_slot_layout_info(dcb, i, slot_layout_info); break; } } break; case 5: for (i = 0; i < v1_5->number_of_path; ++i) result = update_slot_layout_info_v2(dcb, i, slot_layout_info); break; } return result; } static enum bp_result bios_get_board_layout_info( struct dc_bios *dcb, struct board_layout_info *board_layout_info) { unsigned int i; struct bios_parser *bp; static enum bp_result record_result; unsigned int max_slots; const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1, GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2, 0, 0 }; bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n"); return BP_RESULT_BADINPUT; } board_layout_info->num_of_slots = 0; max_slots = MAX_BOARD_SLOTS; // Assume single slot on v1_5 if (bp->object_info_tbl.revision.minor == 5) { max_slots = 1; } for (i = 0; i < max_slots; ++i) { record_result = get_bracket_layout_record(dcb, slot_index_to_vbios_id[i], &board_layout_info->slots[i]); if (record_result == BP_RESULT_NORECORD && i > 0) break; /* no more slots present in bios */ else if (record_result != BP_RESULT_OK) return record_result; /* fail */ ++board_layout_info->num_of_slots; } /* all data is valid */ board_layout_info->is_number_of_slots_valid = 1; board_layout_info->is_slots_size_valid = 1; board_layout_info->is_connector_offsets_valid = 1; board_layout_info->is_connector_lengths_valid = 1; return BP_RESULT_OK; } static uint16_t bios_parser_pack_data_tables( struct dc_bios *dcb, void *dst) { // TODO: There is data bytes alignment issue, disable it for now. return 0; } static struct atom_dc_golden_table_v1 *bios_get_golden_table( struct bios_parser *bp, uint32_t rev_major, uint32_t rev_minor, uint16_t *dc_golden_table_ver) { struct atom_display_controller_info_v4_4 *disp_cntl_tbl_4_4 = NULL; uint32_t dc_golden_offset = 0; *dc_golden_table_ver = 0; if (!DATA_TABLES(dce_info)) return NULL; /* ver.4.4 or higher */ switch (rev_major) { case 4: switch (rev_minor) { case 4: disp_cntl_tbl_4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4, DATA_TABLES(dce_info)); if (!disp_cntl_tbl_4_4) return NULL; dc_golden_offset = DATA_TABLES(dce_info) + disp_cntl_tbl_4_4->dc_golden_table_offset; *dc_golden_table_ver = disp_cntl_tbl_4_4->dc_golden_table_ver; break; case 5: default: /* For atom_display_controller_info_v4_5 there is no need to get golden table from * dc_golden_table_offset as all these fields previously in golden table used for AUX * pre-charge settings are now available directly in atom_display_controller_info_v4_5. */ break; } break; } if (!dc_golden_offset) return NULL; if (*dc_golden_table_ver != 1) return NULL; return GET_IMAGE(struct atom_dc_golden_table_v1, dc_golden_offset); } static enum bp_result bios_get_atom_dc_golden_table( struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); enum bp_result result = BP_RESULT_OK; struct atom_dc_golden_table_v1 *atom_dc_golden_table = NULL; struct atom_common_table_header *header; struct atom_data_revision tbl_revision; uint16_t dc_golden_table_ver = 0; header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(dce_info)); if (!header) return BP_RESULT_UNSUPPORTED; get_atom_data_table_revision(header, &tbl_revision); atom_dc_golden_table = bios_get_golden_table(bp, tbl_revision.major, tbl_revision.minor, &dc_golden_table_ver); if (!atom_dc_golden_table) return BP_RESULT_UNSUPPORTED; dcb->golden_table.dc_golden_table_ver = dc_golden_table_ver; dcb->golden_table.aux_dphy_rx_control0_val = atom_dc_golden_table->aux_dphy_rx_control0_val; dcb->golden_table.aux_dphy_rx_control1_val = atom_dc_golden_table->aux_dphy_rx_control1_val; dcb->golden_table.aux_dphy_tx_control_val = atom_dc_golden_table->aux_dphy_tx_control_val; dcb->golden_table.dc_gpio_aux_ctrl_0_val = atom_dc_golden_table->dc_gpio_aux_ctrl_0_val; dcb->golden_table.dc_gpio_aux_ctrl_1_val = atom_dc_golden_table->dc_gpio_aux_ctrl_1_val; dcb->golden_table.dc_gpio_aux_ctrl_2_val = atom_dc_golden_table->dc_gpio_aux_ctrl_2_val; dcb->golden_table.dc_gpio_aux_ctrl_3_val = atom_dc_golden_table->dc_gpio_aux_ctrl_3_val; dcb->golden_table.dc_gpio_aux_ctrl_4_val = atom_dc_golden_table->dc_gpio_aux_ctrl_4_val; dcb->golden_table.dc_gpio_aux_ctrl_5_val = atom_dc_golden_table->dc_gpio_aux_ctrl_5_val; return result; } static const struct dc_vbios_funcs vbios_funcs = { .get_connectors_number = bios_parser_get_connectors_number, .get_connector_id = bios_parser_get_connector_id, .get_src_obj = bios_parser_get_src_obj, .get_i2c_info = bios_parser_get_i2c_info, .get_hpd_info = bios_parser_get_hpd_info, .get_device_tag = bios_parser_get_device_tag, .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info, .get_ss_entry_number = bios_parser_get_ss_entry_number, .get_embedded_panel_info = bios_parser_get_embedded_panel_info, .get_gpio_pin_info = bios_parser_get_gpio_pin_info, .get_encoder_cap_info = bios_parser_get_encoder_cap_info, .is_device_id_supported = bios_parser_is_device_id_supported, .is_accelerated_mode = bios_parser_is_accelerated_mode, .set_scratch_critical_state = bios_parser_set_scratch_critical_state, /* COMMANDS */ .encoder_control = bios_parser_encoder_control, .transmitter_control = bios_parser_transmitter_control, .enable_crtc = bios_parser_enable_crtc, .set_pixel_clock = bios_parser_set_pixel_clock, .set_dce_clock = bios_parser_set_dce_clock, .program_crtc_timing = bios_parser_program_crtc_timing, .enable_disp_power_gating = bios_parser_enable_disp_power_gating, .bios_parser_destroy = firmware_parser_destroy, .get_board_layout_info = bios_get_board_layout_info, /* TODO: use this fn in hw init?*/ .pack_data_tables = bios_parser_pack_data_tables, .get_atom_dc_golden_table = bios_get_atom_dc_golden_table, .enable_lvtma_control = bios_parser_enable_lvtma_control, .get_soc_bb_info = bios_parser_get_soc_bb_info, .get_disp_connector_caps_info = bios_parser_get_disp_connector_caps_info, .get_lttpr_caps = bios_parser_get_lttpr_caps, .get_lttpr_interop = bios_parser_get_lttpr_interop, .get_connector_speed_cap_info = bios_parser_get_connector_speed_cap_info, }; static bool bios_parser2_construct( struct bios_parser *bp, struct bp_init_data *init, enum dce_version dce_version) { uint16_t *rom_header_offset = NULL; struct atom_rom_header_v2_2 *rom_header = NULL; struct display_object_info_table_v1_4 *object_info_tbl; struct atom_data_revision tbl_rev = {0}; if (!init) return false; if (!init->bios) return false; bp->base.funcs = &vbios_funcs; bp->base.bios = init->bios; bp->base.bios_size = bp->base.bios[OFFSET_TO_ATOM_ROM_IMAGE_SIZE] * BIOS_IMAGE_SIZE_UNIT; bp->base.ctx = init->ctx; bp->base.bios_local_image = NULL; rom_header_offset = GET_IMAGE(uint16_t, OFFSET_TO_ATOM_ROM_HEADER_POINTER); if (!rom_header_offset) return false; rom_header = GET_IMAGE(struct atom_rom_header_v2_2, *rom_header_offset); if (!rom_header) return false; get_atom_data_table_revision(&rom_header->table_header, &tbl_rev); if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 2)) return false; bp->master_data_tbl = GET_IMAGE(struct atom_master_data_table_v2_1, rom_header->masterdatatable_offset); if (!bp->master_data_tbl) return false; bp->object_info_tbl_offset = DATA_TABLES(displayobjectinfo); if (!bp->object_info_tbl_offset) return false; object_info_tbl = GET_IMAGE(struct display_object_info_table_v1_4, bp->object_info_tbl_offset); if (!object_info_tbl) return false; get_atom_data_table_revision(&object_info_tbl->table_header, &bp->object_info_tbl.revision); if (bp->object_info_tbl.revision.major == 1 && bp->object_info_tbl.revision.minor == 4) { struct display_object_info_table_v1_4 *tbl_v1_4; tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4, bp->object_info_tbl_offset); if (!tbl_v1_4) return false; bp->object_info_tbl.v1_4 = tbl_v1_4; } else if (bp->object_info_tbl.revision.major == 1 && bp->object_info_tbl.revision.minor == 5) { struct display_object_info_table_v1_5 *tbl_v1_5; tbl_v1_5 = GET_IMAGE(struct display_object_info_table_v1_5, bp->object_info_tbl_offset); if (!tbl_v1_5) return false; bp->object_info_tbl.v1_5 = tbl_v1_5; } else { ASSERT(0); return false; } dal_firmware_parser_init_cmd_tbl(bp); dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version); bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base); bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK; bios_parser_get_vram_info(&bp->base, &bp->base.vram_info); return true; } struct dc_bios *firmware_parser_create( struct bp_init_data *init, enum dce_version dce_version) { struct bios_parser *bp; bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL); if (!bp) return NULL; if (bios_parser2_construct(bp, init, dce_version)) return &bp->base; kfree(bp); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "amdgpu.h" #include "atom.h" #include "include/bios_parser_interface.h" #include "command_table.h" #include "command_table_helper.h" #include "bios_parser_helper.h" #include "bios_parser_types_internal.h" #define EXEC_BIOS_CMD_TABLE(command, params)\ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GetIndexIntoMasterTable(COMMAND, command), \ (uint32_t *)&params) == 0) #define BIOS_CMD_TABLE_REVISION(command, frev, crev)\ amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GetIndexIntoMasterTable(COMMAND, command), &frev, &crev) #define BIOS_CMD_TABLE_PARA_REVISION(command)\ bios_cmd_table_para_revision(bp->base.ctx->driver_context, \ GetIndexIntoMasterTable(COMMAND, command)) static void init_dig_encoder_control(struct bios_parser *bp); static void init_transmitter_control(struct bios_parser *bp); static void init_set_pixel_clock(struct bios_parser *bp); static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp); static void init_adjust_display_pll(struct bios_parser *bp); static void init_dac_encoder_control(struct bios_parser *bp); static void init_dac_output_control(struct bios_parser *bp); static void init_set_crtc_timing(struct bios_parser *bp); static void init_enable_crtc(struct bios_parser *bp); static void init_enable_crtc_mem_req(struct bios_parser *bp); static void init_external_encoder_control(struct bios_parser *bp); static void init_enable_disp_power_gating(struct bios_parser *bp); static void init_program_clock(struct bios_parser *bp); static void init_set_dce_clock(struct bios_parser *bp); void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp) { init_dig_encoder_control(bp); init_transmitter_control(bp); init_set_pixel_clock(bp); init_enable_spread_spectrum_on_ppll(bp); init_adjust_display_pll(bp); init_dac_encoder_control(bp); init_dac_output_control(bp); init_set_crtc_timing(bp); init_enable_crtc(bp); init_enable_crtc_mem_req(bp); init_program_clock(bp); init_external_encoder_control(bp); init_enable_disp_power_gating(bp); init_set_dce_clock(bp); } static uint32_t bios_cmd_table_para_revision(void *dev, uint32_t index) { struct amdgpu_device *adev = dev; uint8_t frev, crev; if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) return crev; else return 0; } /******************************************************************************* ******************************************************************************** ** ** D I G E N C O D E R C O N T R O L ** ******************************************************************************** *******************************************************************************/ static enum bp_result encoder_control_digx_v3( struct bios_parser *bp, struct bp_encoder_control *cntl); static enum bp_result encoder_control_digx_v4( struct bios_parser *bp, struct bp_encoder_control *cntl); static enum bp_result encoder_control_digx_v5( struct bios_parser *bp, struct bp_encoder_control *cntl); static void init_encoder_control_dig_v1(struct bios_parser *bp); static void init_dig_encoder_control(struct bios_parser *bp) { uint32_t version = BIOS_CMD_TABLE_PARA_REVISION(DIGxEncoderControl); switch (version) { case 2: bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v3; break; case 4: bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v4; break; case 5: bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v5; break; default: init_encoder_control_dig_v1(bp); break; } } static enum bp_result encoder_control_dig_v1( struct bios_parser *bp, struct bp_encoder_control *cntl); static enum bp_result encoder_control_dig1_v1( struct bios_parser *bp, struct bp_encoder_control *cntl); static enum bp_result encoder_control_dig2_v1( struct bios_parser *bp, struct bp_encoder_control *cntl); static void init_encoder_control_dig_v1(struct bios_parser *bp) { struct cmd_tbl *cmd_tbl = &bp->cmd_tbl; if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG1EncoderControl)) cmd_tbl->encoder_control_dig1 = encoder_control_dig1_v1; else cmd_tbl->encoder_control_dig1 = NULL; if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG2EncoderControl)) cmd_tbl->encoder_control_dig2 = encoder_control_dig2_v1; else cmd_tbl->encoder_control_dig2 = NULL; cmd_tbl->dig_encoder_control = encoder_control_dig_v1; } static enum bp_result encoder_control_dig_v1( struct bios_parser *bp, struct bp_encoder_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; struct cmd_tbl *cmd_tbl = &bp->cmd_tbl; if (cntl != NULL) switch (cntl->engine_id) { case ENGINE_ID_DIGA: if (cmd_tbl->encoder_control_dig1 != NULL) result = cmd_tbl->encoder_control_dig1(bp, cntl); break; case ENGINE_ID_DIGB: if (cmd_tbl->encoder_control_dig2 != NULL) result = cmd_tbl->encoder_control_dig2(bp, cntl); break; default: break; } return result; } static enum bp_result encoder_control_dig1_v1( struct bios_parser *bp, struct bp_encoder_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0}; bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params); if (EXEC_BIOS_CMD_TABLE(DIG1EncoderControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result encoder_control_dig2_v1( struct bios_parser *bp, struct bp_encoder_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0}; bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params); if (EXEC_BIOS_CMD_TABLE(DIG2EncoderControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result encoder_control_digx_v3( struct bios_parser *bp, struct bp_encoder_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; DIG_ENCODER_CONTROL_PARAMETERS_V3 params = {0}; if (LANE_COUNT_FOUR < cntl->lanes_number) params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */ else params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */ params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id); /* We need to convert from KHz units into 10KHz units */ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action); params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10)); params.ucEncoderMode = (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( cntl->signal, cntl->enable_dp_audio); params.ucLaneNum = (uint8_t)(cntl->lanes_number); switch (cntl->color_depth) { case COLOR_DEPTH_888: params.ucBitPerColor = PANEL_8BIT_PER_COLOR; break; case COLOR_DEPTH_101010: params.ucBitPerColor = PANEL_10BIT_PER_COLOR; break; case COLOR_DEPTH_121212: params.ucBitPerColor = PANEL_12BIT_PER_COLOR; break; case COLOR_DEPTH_161616: params.ucBitPerColor = PANEL_16BIT_PER_COLOR; break; default: break; } if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result encoder_control_digx_v4( struct bios_parser *bp, struct bp_encoder_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; DIG_ENCODER_CONTROL_PARAMETERS_V4 params = {0}; if (LANE_COUNT_FOUR < cntl->lanes_number) params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */ else params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */ params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id); /* We need to convert from KHz units into 10KHz units */ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action); params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10)); params.ucEncoderMode = (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom( cntl->signal, cntl->enable_dp_audio)); params.ucLaneNum = (uint8_t)(cntl->lanes_number); switch (cntl->color_depth) { case COLOR_DEPTH_888: params.ucBitPerColor = PANEL_8BIT_PER_COLOR; break; case COLOR_DEPTH_101010: params.ucBitPerColor = PANEL_10BIT_PER_COLOR; break; case COLOR_DEPTH_121212: params.ucBitPerColor = PANEL_12BIT_PER_COLOR; break; case COLOR_DEPTH_161616: params.ucBitPerColor = PANEL_16BIT_PER_COLOR; break; default: break; } if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result encoder_control_digx_v5( struct bios_parser *bp, struct bp_encoder_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; ENCODER_STREAM_SETUP_PARAMETERS_V5 params = {0}; params.ucDigId = (uint8_t)(cntl->engine_id); params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action); params.ulPixelClock = cntl->pixel_clock / 10; params.ucDigMode = (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom( cntl->signal, cntl->enable_dp_audio)); params.ucLaneNum = (uint8_t)(cntl->lanes_number); switch (cntl->color_depth) { case COLOR_DEPTH_888: params.ucBitPerColor = PANEL_8BIT_PER_COLOR; break; case COLOR_DEPTH_101010: params.ucBitPerColor = PANEL_10BIT_PER_COLOR; break; case COLOR_DEPTH_121212: params.ucBitPerColor = PANEL_12BIT_PER_COLOR; break; case COLOR_DEPTH_161616: params.ucBitPerColor = PANEL_16BIT_PER_COLOR; break; default: break; } if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) switch (cntl->color_depth) { case COLOR_DEPTH_101010: params.ulPixelClock = (params.ulPixelClock * 30) / 24; break; case COLOR_DEPTH_121212: params.ulPixelClock = (params.ulPixelClock * 36) / 24; break; case COLOR_DEPTH_161616: params.ulPixelClock = (params.ulPixelClock * 48) / 24; break; default: break; } if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** TRANSMITTER CONTROL ** ******************************************************************************** *******************************************************************************/ static enum bp_result transmitter_control_v2( struct bios_parser *bp, struct bp_transmitter_control *cntl); static enum bp_result transmitter_control_v3( struct bios_parser *bp, struct bp_transmitter_control *cntl); static enum bp_result transmitter_control_v4( struct bios_parser *bp, struct bp_transmitter_control *cntl); static enum bp_result transmitter_control_v1_5( struct bios_parser *bp, struct bp_transmitter_control *cntl); static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl); static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; uint8_t crev; if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl, frev, crev) == false) BREAK_TO_DEBUGGER(); switch (crev) { case 2: bp->cmd_tbl.transmitter_control = transmitter_control_v2; break; case 3: bp->cmd_tbl.transmitter_control = transmitter_control_v3; break; case 4: bp->cmd_tbl.transmitter_control = transmitter_control_v4; break; case 5: bp->cmd_tbl.transmitter_control = transmitter_control_v1_5; break; case 6: bp->cmd_tbl.transmitter_control = transmitter_control_v1_6; break; default: dm_output_to_console("Don't have transmitter_control for v%d\n", crev); bp->cmd_tbl.transmitter_control = NULL; break; } } static enum bp_result transmitter_control_v2( struct bios_parser *bp, struct bp_transmitter_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 params; enum connector_id connector_id = dal_graphics_object_id_get_connector_id(cntl->connector_obj_id); memset(&params, 0, sizeof(params)); switch (cntl->transmitter) { case TRANSMITTER_UNIPHY_A: case TRANSMITTER_UNIPHY_B: case TRANSMITTER_UNIPHY_C: case TRANSMITTER_UNIPHY_D: case TRANSMITTER_UNIPHY_E: case TRANSMITTER_UNIPHY_F: case TRANSMITTER_TRAVIS_LCD: break; default: return BP_RESULT_BADINPUT; } switch (cntl->action) { case TRANSMITTER_CONTROL_INIT: if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) || (CONNECTOR_ID_DUAL_LINK_DVID == connector_id)) /* on INIT this bit should be set according to the * physical connector * Bit0: dual link connector flag * =0 connector is single link connector * =1 connector is dual link connector */ params.acConfig.fDualLinkConnector = 1; /* connector object id */ params.usInitInfo = cpu_to_le16((uint8_t)cntl->connector_obj_id.id); break; case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS: /* voltage swing and pre-emphsis */ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select; params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings; break; default: /* if dual-link */ if (LANE_COUNT_FOUR < cntl->lanes_number) { /* on ENABLE/DISABLE this bit should be set according to * actual timing (number of lanes) * Bit0: dual link connector flag * =0 connector is single link connector * =1 connector is dual link connector */ params.acConfig.fDualLinkConnector = 1; /* link rate, half for dual link * We need to convert from KHz units into 20KHz units */ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 20)); } else /* link rate, half for dual link * We need to convert from KHz units into 10KHz units */ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10)); break; } /* 00 - coherent mode * 01 - incoherent mode */ params.acConfig.fCoherentMode = cntl->coherent; if ((TRANSMITTER_UNIPHY_B == cntl->transmitter) || (TRANSMITTER_UNIPHY_D == cntl->transmitter) || (TRANSMITTER_UNIPHY_F == cntl->transmitter)) /* Bit2: Transmitter Link selection * =0 when bit0=0, single link A/C/E, when bit0=1, * master link A/C/E * =1 when bit0=0, single link B/D/F, when bit0=1, * master link B/D/F */ params.acConfig.ucLinkSel = 1; if (ENGINE_ID_DIGB == cntl->engine_id) /* Bit3: Transmitter data source selection * =0 DIGA is data source. * =1 DIGB is data source. * This bit is only useful when ucAction= ATOM_ENABLE */ params.acConfig.ucEncoderSel = 1; if (CONNECTOR_ID_DISPLAY_PORT == connector_id || CONNECTOR_ID_USBC == connector_id) /* Bit4: DP connector flag * =0 connector is none-DP connector * =1 connector is DP connector */ params.acConfig.fDPConnector = 1; /* Bit[7:6]: Transmitter selection * =0 UNIPHY_ENCODER: UNIPHYA/B * =1 UNIPHY1_ENCODER: UNIPHYC/D * =2 UNIPHY2_ENCODER: UNIPHYE/F * =3 reserved */ params.acConfig.ucTransmitterSel = (uint8_t)bp->cmd_helper->transmitter_bp_to_atom( cntl->transmitter); params.ucAction = (uint8_t)cntl->action; if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result transmitter_control_v3( struct bios_parser *bp, struct bp_transmitter_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 params; uint32_t pll_id; enum connector_id conn_id = dal_graphics_object_id_get_connector_id(cntl->connector_obj_id); const struct command_table_helper *cmd = bp->cmd_helper; bool dual_link_conn = (CONNECTOR_ID_DUAL_LINK_DVII == conn_id) || (CONNECTOR_ID_DUAL_LINK_DVID == conn_id); memset(&params, 0, sizeof(params)); switch (cntl->transmitter) { case TRANSMITTER_UNIPHY_A: case TRANSMITTER_UNIPHY_B: case TRANSMITTER_UNIPHY_C: case TRANSMITTER_UNIPHY_D: case TRANSMITTER_UNIPHY_E: case TRANSMITTER_UNIPHY_F: case TRANSMITTER_TRAVIS_LCD: break; default: return BP_RESULT_BADINPUT; } if (!cmd->clock_source_id_to_atom(cntl->pll_id, &pll_id)) return BP_RESULT_BADINPUT; /* fill information based on the action */ switch (cntl->action) { case TRANSMITTER_CONTROL_INIT: if (dual_link_conn) { /* on INIT this bit should be set according to the * phisycal connector * Bit0: dual link connector flag * =0 connector is single link connector * =1 connector is dual link connector */ params.acConfig.fDualLinkConnector = 1; } /* connector object id */ params.usInitInfo = cpu_to_le16((uint8_t)(cntl->connector_obj_id.id)); break; case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS: /* votage swing and pre-emphsis */ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select; params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings; break; default: if (dual_link_conn && cntl->multi_path) /* on ENABLE/DISABLE this bit should be set according to * actual timing (number of lanes) * Bit0: dual link connector flag * =0 connector is single link connector * =1 connector is dual link connector */ params.acConfig.fDualLinkConnector = 1; /* if dual-link */ if (LANE_COUNT_FOUR < cntl->lanes_number) { /* on ENABLE/DISABLE this bit should be set according to * actual timing (number of lanes) * Bit0: dual link connector flag * =0 connector is single link connector * =1 connector is dual link connector */ params.acConfig.fDualLinkConnector = 1; /* link rate, half for dual link * We need to convert from KHz units into 20KHz units */ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 20)); } else { /* link rate, half for dual link * We need to convert from KHz units into 10KHz units */ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10)); } break; } /* 00 - coherent mode * 01 - incoherent mode */ params.acConfig.fCoherentMode = cntl->coherent; if ((TRANSMITTER_UNIPHY_B == cntl->transmitter) || (TRANSMITTER_UNIPHY_D == cntl->transmitter) || (TRANSMITTER_UNIPHY_F == cntl->transmitter)) /* Bit2: Transmitter Link selection * =0 when bit0=0, single link A/C/E, when bit0=1, * master link A/C/E * =1 when bit0=0, single link B/D/F, when bit0=1, * master link B/D/F */ params.acConfig.ucLinkSel = 1; if (ENGINE_ID_DIGB == cntl->engine_id) /* Bit3: Transmitter data source selection * =0 DIGA is data source. * =1 DIGB is data source. * This bit is only useful when ucAction= ATOM_ENABLE */ params.acConfig.ucEncoderSel = 1; /* Bit[7:6]: Transmitter selection * =0 UNIPHY_ENCODER: UNIPHYA/B * =1 UNIPHY1_ENCODER: UNIPHYC/D * =2 UNIPHY2_ENCODER: UNIPHYE/F * =3 reserved */ params.acConfig.ucTransmitterSel = (uint8_t)cmd->transmitter_bp_to_atom(cntl->transmitter); params.ucLaneNum = (uint8_t)cntl->lanes_number; params.acConfig.ucRefClkSource = (uint8_t)pll_id; params.ucAction = (uint8_t)cntl->action; if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result transmitter_control_v4( struct bios_parser *bp, struct bp_transmitter_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 params; uint32_t ref_clk_src_id; enum connector_id conn_id = dal_graphics_object_id_get_connector_id(cntl->connector_obj_id); const struct command_table_helper *cmd = bp->cmd_helper; memset(&params, 0, sizeof(params)); switch (cntl->transmitter) { case TRANSMITTER_UNIPHY_A: case TRANSMITTER_UNIPHY_B: case TRANSMITTER_UNIPHY_C: case TRANSMITTER_UNIPHY_D: case TRANSMITTER_UNIPHY_E: case TRANSMITTER_UNIPHY_F: case TRANSMITTER_TRAVIS_LCD: break; default: return BP_RESULT_BADINPUT; } if (!cmd->clock_source_id_to_ref_clk_src(cntl->pll_id, &ref_clk_src_id)) return BP_RESULT_BADINPUT; switch (cntl->action) { case TRANSMITTER_CONTROL_INIT: { if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) || (CONNECTOR_ID_DUAL_LINK_DVID == conn_id)) /* on INIT this bit should be set according to the * phisycal connector * Bit0: dual link connector flag * =0 connector is single link connector * =1 connector is dual link connector */ params.acConfig.fDualLinkConnector = 1; /* connector object id */ params.usInitInfo = cpu_to_le16((uint8_t)(cntl->connector_obj_id.id)); } break; case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS: /* votage swing and pre-emphsis */ params.asMode.ucLaneSel = (uint8_t)(cntl->lane_select); params.asMode.ucLaneSet = (uint8_t)(cntl->lane_settings); break; default: if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) || (CONNECTOR_ID_DUAL_LINK_DVID == conn_id)) /* on ENABLE/DISABLE this bit should be set according to * actual timing (number of lanes) * Bit0: dual link connector flag * =0 connector is single link connector * =1 connector is dual link connector */ params.acConfig.fDualLinkConnector = 1; /* if dual-link */ if (LANE_COUNT_FOUR < cntl->lanes_number) /* link rate, half for dual link * We need to convert from KHz units into 20KHz units */ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 20)); else { /* link rate, half for dual link * We need to convert from KHz units into 10KHz units */ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10)); } break; } /* 00 - coherent mode * 01 - incoherent mode */ params.acConfig.fCoherentMode = cntl->coherent; if ((TRANSMITTER_UNIPHY_B == cntl->transmitter) || (TRANSMITTER_UNIPHY_D == cntl->transmitter) || (TRANSMITTER_UNIPHY_F == cntl->transmitter)) /* Bit2: Transmitter Link selection * =0 when bit0=0, single link A/C/E, when bit0=1, * master link A/C/E * =1 when bit0=0, single link B/D/F, when bit0=1, * master link B/D/F */ params.acConfig.ucLinkSel = 1; if (ENGINE_ID_DIGB == cntl->engine_id) /* Bit3: Transmitter data source selection * =0 DIGA is data source. * =1 DIGB is data source. * This bit is only useful when ucAction= ATOM_ENABLE */ params.acConfig.ucEncoderSel = 1; /* Bit[7:6]: Transmitter selection * =0 UNIPHY_ENCODER: UNIPHYA/B * =1 UNIPHY1_ENCODER: UNIPHYC/D * =2 UNIPHY2_ENCODER: UNIPHYE/F * =3 reserved */ params.acConfig.ucTransmitterSel = (uint8_t)(cmd->transmitter_bp_to_atom(cntl->transmitter)); params.ucLaneNum = (uint8_t)(cntl->lanes_number); params.acConfig.ucRefClkSource = (uint8_t)(ref_clk_src_id); params.ucAction = (uint8_t)(cntl->action); if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result transmitter_control_v1_5( struct bios_parser *bp, struct bp_transmitter_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; const struct command_table_helper *cmd = bp->cmd_helper; DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 params; memset(&params, 0, sizeof(params)); params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter); params.ucAction = (uint8_t)cntl->action; params.ucLaneNum = (uint8_t)cntl->lanes_number; params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id; params.ucDigMode = cmd->signal_type_to_atom_dig_mode(cntl->signal); params.asConfig.ucPhyClkSrcId = cmd->clock_source_id_to_atom_phy_clk_src_id(cntl->pll_id); /* 00 - coherent mode */ params.asConfig.ucCoherentMode = cntl->coherent; params.asConfig.ucHPDSel = cmd->hpd_sel_to_atom(cntl->hpd_sel); params.ucDigEncoderSel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); params.ucDPLaneSet = (uint8_t) cntl->lane_settings; params.usSymClock = cpu_to_le16((uint16_t) (cntl->pixel_clock / 10)); /* * In SI/TN case, caller have to set usPixelClock as following: * DP mode: usPixelClock = DP_LINK_CLOCK/10 * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz) * DVI single link mode: usPixelClock = pixel clock * DVI dual link mode: usPixelClock = pixel clock * HDMI mode: usPixelClock = pixel clock * deep_color_ratio * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp) * LVDS mode: usPixelClock = pixel clock */ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) { switch (cntl->color_depth) { case COLOR_DEPTH_101010: params.usSymClock = cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24); break; case COLOR_DEPTH_121212: params.usSymClock = cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24); break; case COLOR_DEPTH_161616: params.usSymClock = cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24); break; default: break; } } if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; const struct command_table_helper *cmd = bp->cmd_helper; DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 params; memset(&params, 0, sizeof(params)); params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter); params.ucAction = (uint8_t)cntl->action; if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS) params.ucDPLaneSet = (uint8_t)cntl->lane_settings; else params.ucDigMode = cmd->signal_type_to_atom_dig_mode(cntl->signal); params.ucLaneNum = (uint8_t)cntl->lanes_number; params.ucHPDSel = cmd->hpd_sel_to_atom(cntl->hpd_sel); params.ucDigEncoderSel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id; params.ulSymClock = cntl->pixel_clock/10; /* * In SI/TN case, caller have to set usPixelClock as following: * DP mode: usPixelClock = DP_LINK_CLOCK/10 * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz) * DVI single link mode: usPixelClock = pixel clock * DVI dual link mode: usPixelClock = pixel clock * HDMI mode: usPixelClock = pixel clock * deep_color_ratio * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp) * LVDS mode: usPixelClock = pixel clock */ switch (cntl->signal) { case SIGNAL_TYPE_HDMI_TYPE_A: switch (cntl->color_depth) { case COLOR_DEPTH_101010: params.ulSymClock = cpu_to_le16((le16_to_cpu(params.ulSymClock) * 30) / 24); break; case COLOR_DEPTH_121212: params.ulSymClock = cpu_to_le16((le16_to_cpu(params.ulSymClock) * 36) / 24); break; case COLOR_DEPTH_161616: params.ulSymClock = cpu_to_le16((le16_to_cpu(params.ulSymClock) * 48) / 24); break; default: break; } break; default: break; } if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** SET PIXEL CLOCK ** ******************************************************************************** *******************************************************************************/ static enum bp_result set_pixel_clock_v3( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); static enum bp_result set_pixel_clock_v5( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); static enum bp_result set_pixel_clock_v6( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); static void init_set_pixel_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) { case 3: bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v3; break; case 5: bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v5; break; case 6: bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v6; break; case 7: bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7; break; default: dm_output_to_console("Don't have set_pixel_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)); bp->cmd_tbl.set_pixel_clock = NULL; break; } } static enum bp_result set_pixel_clock_v3( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; PIXEL_CLOCK_PARAMETERS_V3 *params; SET_PIXEL_CLOCK_PS_ALLOCATION allocation; memset(&allocation, 0, sizeof(allocation)); if (CLOCK_SOURCE_ID_PLL1 == bp_params->pll_id) allocation.sPCLKInput.ucPpll = ATOM_PPLL1; else if (CLOCK_SOURCE_ID_PLL2 == bp_params->pll_id) allocation.sPCLKInput.ucPpll = ATOM_PPLL2; else return BP_RESULT_BADINPUT; allocation.sPCLKInput.usRefDiv = cpu_to_le16((uint16_t)bp_params->reference_divider); allocation.sPCLKInput.usFbDiv = cpu_to_le16((uint16_t)bp_params->feedback_divider); allocation.sPCLKInput.ucFracFbDiv = (uint8_t)bp_params->fractional_feedback_divider; allocation.sPCLKInput.ucPostDiv = (uint8_t)bp_params->pixel_clock_post_divider; /* We need to convert from 100Hz units into 10KHz units */ allocation.sPCLKInput.usPixelClock = cpu_to_le16((uint16_t)(bp_params->target_pixel_clock_100hz / 100)); params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput; params->ucTransmitterId = bp->cmd_helper->encoder_id_to_atom( dal_graphics_object_id_get_encoder_id( bp_params->encoder_object_id)); params->ucEncoderMode = (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false)); if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) params->ucMiscInfo |= PIXEL_CLOCK_MISC_FORCE_PROG_PPLL; if (bp_params->flags.USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK) params->ucMiscInfo |= PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK; if (CONTROLLER_ID_D1 != bp_params->controller_id) params->ucMiscInfo |= PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2; if (EXEC_BIOS_CMD_TABLE(SetPixelClock, allocation)) result = BP_RESULT_OK; return result; } #ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V5 /* video bios did not define this: */ typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V5 { PIXEL_CLOCK_PARAMETERS_V5 sPCLKInput; /* Caller doesn't need to init this portion */ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; } SET_PIXEL_CLOCK_PS_ALLOCATION_V5; #endif #ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V6 /* video bios did not define this: */ typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V6 { PIXEL_CLOCK_PARAMETERS_V6 sPCLKInput; /* Caller doesn't need to init this portion */ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; } SET_PIXEL_CLOCK_PS_ALLOCATION_V6; #endif static enum bp_result set_pixel_clock_v5( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; SET_PIXEL_CLOCK_PS_ALLOCATION_V5 clk; uint8_t controller_id; uint32_t pll_id; memset(&clk, 0, sizeof(clk)); if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id) && bp->cmd_helper->controller_id_to_atom( bp_params->controller_id, &controller_id)) { clk.sPCLKInput.ucCRTC = controller_id; clk.sPCLKInput.ucPpll = (uint8_t)pll_id; clk.sPCLKInput.ucRefDiv = (uint8_t)(bp_params->reference_divider); clk.sPCLKInput.usFbDiv = cpu_to_le16((uint16_t)(bp_params->feedback_divider)); clk.sPCLKInput.ulFbDivDecFrac = cpu_to_le32(bp_params->fractional_feedback_divider); clk.sPCLKInput.ucPostDiv = (uint8_t)(bp_params->pixel_clock_post_divider); clk.sPCLKInput.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom( dal_graphics_object_id_get_encoder_id( bp_params->encoder_object_id)); clk.sPCLKInput.ucEncoderMode = (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); /* We need to convert from 100Hz units into 10KHz units */ clk.sPCLKInput.usPixelClock = cpu_to_le16((uint16_t)(bp_params->target_pixel_clock_100hz / 100)); if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_FORCE_PROG_PPLL; if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; /* clkV5.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0: 24bpp * =1:30bpp, =2:32bpp * driver choose program it itself, i.e. here we program it * to 888 by default. */ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) switch (bp_params->color_depth) { case TRANSMITTER_COLOR_DEPTH_30: /* yes this is correct, the atom define is wrong */ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP; break; case TRANSMITTER_COLOR_DEPTH_36: /* yes this is correct, the atom define is wrong */ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; break; default: break; } if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk)) result = BP_RESULT_OK; } return result; } static enum bp_result set_pixel_clock_v6( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; SET_PIXEL_CLOCK_PS_ALLOCATION_V6 clk; uint8_t controller_id; uint32_t pll_id; memset(&clk, 0, sizeof(clk)); if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id) && bp->cmd_helper->controller_id_to_atom( bp_params->controller_id, &controller_id)) { /* Note: VBIOS still wants to use ucCRTC name which is now * 1 byte in ULONG *typedef struct _CRTC_PIXEL_CLOCK_FREQ *{ * target the pixel clock to drive the CRTC timing. * ULONG ulPixelClock:24; * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to * previous version. * ATOM_CRTC1~6, indicate the CRTC controller to * ULONG ucCRTC:8; * drive the pixel clock. not used for DCPLL case. *}CRTC_PIXEL_CLOCK_FREQ; *union *{ * pixel clock and CRTC id frequency * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; * ULONG ulDispEngClkFreq; dispclk frequency *}; */ clk.sPCLKInput.ulCrtcPclkFreq.ucCRTC = controller_id; clk.sPCLKInput.ucPpll = (uint8_t) pll_id; clk.sPCLKInput.ucRefDiv = (uint8_t) bp_params->reference_divider; clk.sPCLKInput.usFbDiv = cpu_to_le16((uint16_t) bp_params->feedback_divider); clk.sPCLKInput.ulFbDivDecFrac = cpu_to_le32(bp_params->fractional_feedback_divider); clk.sPCLKInput.ucPostDiv = (uint8_t) bp_params->pixel_clock_post_divider; clk.sPCLKInput.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom( dal_graphics_object_id_get_encoder_id( bp_params->encoder_object_id)); clk.sPCLKInput.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); /* We need to convert from 100 Hz units into 10KHz units */ clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock_100hz / 100); if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) { clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL; } if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) { clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; } /* clkV6.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0: * 24bpp =1:30bpp, =2:32bpp * driver choose program it itself, i.e. here we pass required * target rate that includes deep color. */ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) switch (bp_params->color_depth) { case TRANSMITTER_COLOR_DEPTH_30: clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6; break; case TRANSMITTER_COLOR_DEPTH_36: clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6; break; case TRANSMITTER_COLOR_DEPTH_48: clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; break; default: break; } if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk)) result = BP_RESULT_OK; } return result; } static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; PIXEL_CLOCK_PARAMETERS_V7 clk; uint8_t controller_id; uint32_t pll_id; memset(&clk, 0, sizeof(clk)); if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id) && bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &controller_id)) { /* Note: VBIOS still wants to use ucCRTC name which is now * 1 byte in ULONG *typedef struct _CRTC_PIXEL_CLOCK_FREQ *{ * target the pixel clock to drive the CRTC timing. * ULONG ulPixelClock:24; * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to * previous version. * ATOM_CRTC1~6, indicate the CRTC controller to * ULONG ucCRTC:8; * drive the pixel clock. not used for DCPLL case. *}CRTC_PIXEL_CLOCK_FREQ; *union *{ * pixel clock and CRTC id frequency * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; * ULONG ulDispEngClkFreq; dispclk frequency *}; */ clk.ucCRTC = controller_id; clk.ucPpll = (uint8_t) pll_id; clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id)); clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false); clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock_100hz); clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth); if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL; if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC; if (bp_params->flags.PROGRAM_PHY_PLL_ONLY) clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL; if (bp_params->flags.SUPPORT_YUV_420) clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE; if (bp_params->flags.SET_XTALIN_REF_SRC) clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN; if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC) clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK; if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK) clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN; if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk)) result = BP_RESULT_OK; } return result; } /******************************************************************************* ******************************************************************************** ** ** ENABLE PIXEL CLOCK SS ** ******************************************************************************** *******************************************************************************/ static enum bp_result enable_spread_spectrum_on_ppll_v1( struct bios_parser *bp, struct bp_spread_spectrum_parameters *bp_params, bool enable); static enum bp_result enable_spread_spectrum_on_ppll_v2( struct bios_parser *bp, struct bp_spread_spectrum_parameters *bp_params, bool enable); static enum bp_result enable_spread_spectrum_on_ppll_v3( struct bios_parser *bp, struct bp_spread_spectrum_parameters *bp_params, bool enable); static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)) { case 1: bp->cmd_tbl.enable_spread_spectrum_on_ppll = enable_spread_spectrum_on_ppll_v1; break; case 2: bp->cmd_tbl.enable_spread_spectrum_on_ppll = enable_spread_spectrum_on_ppll_v2; break; case 3: bp->cmd_tbl.enable_spread_spectrum_on_ppll = enable_spread_spectrum_on_ppll_v3; break; default: dm_output_to_console("Don't have enable_spread_spectrum_on_ppll for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)); bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL; break; } } static enum bp_result enable_spread_spectrum_on_ppll_v1( struct bios_parser *bp, struct bp_spread_spectrum_parameters *bp_params, bool enable) { enum bp_result result = BP_RESULT_FAILURE; ENABLE_SPREAD_SPECTRUM_ON_PPLL params; memset(&params, 0, sizeof(params)); if ((enable == true) && (bp_params->percentage > 0)) params.ucEnable = ATOM_ENABLE; else params.ucEnable = ATOM_DISABLE; params.usSpreadSpectrumPercentage = cpu_to_le16((uint16_t)bp_params->percentage); params.ucSpreadSpectrumStep = (uint8_t)bp_params->ver1.step; params.ucSpreadSpectrumDelay = (uint8_t)bp_params->ver1.delay; /* convert back to unit of 10KHz */ params.ucSpreadSpectrumRange = (uint8_t)(bp_params->ver1.range / 10000); if (bp_params->flags.EXTERNAL_SS) params.ucSpreadSpectrumType |= ATOM_EXTERNAL_SS_MASK; if (bp_params->flags.CENTER_SPREAD) params.ucSpreadSpectrumType |= ATOM_SS_CENTRE_SPREAD_MODE; if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1) params.ucPpll = ATOM_PPLL1; else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2) params.ucPpll = ATOM_PPLL2; else BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params)) result = BP_RESULT_OK; return result; } static enum bp_result enable_spread_spectrum_on_ppll_v2( struct bios_parser *bp, struct bp_spread_spectrum_parameters *bp_params, bool enable) { enum bp_result result = BP_RESULT_FAILURE; ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 params; memset(&params, 0, sizeof(params)); if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1) params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P1PLL; else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2) params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P2PLL; else BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */ if ((enable == true) && (bp_params->percentage > 0)) { params.ucEnable = ATOM_ENABLE; params.usSpreadSpectrumPercentage = cpu_to_le16((uint16_t)(bp_params->percentage)); params.usSpreadSpectrumStep = cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size)); if (bp_params->flags.EXTERNAL_SS) params.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD; if (bp_params->flags.CENTER_SPREAD) params.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD; /* Both amounts need to be left shifted first before bit * comparison. Otherwise, the result will always be zero here */ params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)( ((bp_params->ds.feedback_amount << ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK) | ((bp_params->ds.nfrac_amount << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK))); } else params.ucEnable = ATOM_DISABLE; if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params)) result = BP_RESULT_OK; return result; } static enum bp_result enable_spread_spectrum_on_ppll_v3( struct bios_parser *bp, struct bp_spread_spectrum_parameters *bp_params, bool enable) { enum bp_result result = BP_RESULT_FAILURE; ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 params; memset(&params, 0, sizeof(params)); switch (bp_params->pll_id) { case CLOCK_SOURCE_ID_PLL0: /* ATOM_PPLL_SS_TYPE_V3_P0PLL; this is pixel clock only, * not for SI display clock. */ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL; break; case CLOCK_SOURCE_ID_PLL1: params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P1PLL; break; case CLOCK_SOURCE_ID_PLL2: params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P2PLL; break; case CLOCK_SOURCE_ID_DCPLL: params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL; break; default: BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */ return result; } if (enable == true) { params.ucEnable = ATOM_ENABLE; params.usSpreadSpectrumAmountFrac = cpu_to_le16((uint16_t)(bp_params->ds_frac_amount)); params.usSpreadSpectrumStep = cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size)); if (bp_params->flags.EXTERNAL_SS) params.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD; if (bp_params->flags.CENTER_SPREAD) params.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD; /* Both amounts need to be left shifted first before bit * comparison. Otherwise, the result will always be zero here */ params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)( ((bp_params->ds.feedback_amount << ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT) & ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK) | ((bp_params->ds.nfrac_amount << ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT) & ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK))); } else params.ucEnable = ATOM_DISABLE; if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** ADJUST DISPLAY PLL ** ******************************************************************************** *******************************************************************************/ static enum bp_result adjust_display_pll_v2( struct bios_parser *bp, struct bp_adjust_pixel_clock_parameters *bp_params); static enum bp_result adjust_display_pll_v3( struct bios_parser *bp, struct bp_adjust_pixel_clock_parameters *bp_params); static void init_adjust_display_pll(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)) { case 2: bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v2; break; case 3: bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3; break; default: dm_output_to_console("Don't have adjust_display_pll for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)); bp->cmd_tbl.adjust_display_pll = NULL; break; } } static enum bp_result adjust_display_pll_v2( struct bios_parser *bp, struct bp_adjust_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; ADJUST_DISPLAY_PLL_PS_ALLOCATION params = { 0 }; /* We need to convert from KHz units into 10KHz units and then convert * output pixel clock back 10KHz-->KHz */ uint32_t pixel_clock_10KHz_in = bp_params->pixel_clock / 10; params.usPixelClock = cpu_to_le16((uint16_t)(pixel_clock_10KHz_in)); params.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom( dal_graphics_object_id_get_encoder_id( bp_params->encoder_object_id)); params.ucEncodeMode = (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) { /* Convert output pixel clock back 10KHz-->KHz: multiply * original pixel clock in KHz by ratio * [output pxlClk/input pxlClk] */ uint64_t pixel_clk_10_khz_out = (uint64_t)le16_to_cpu(params.usPixelClock); uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock; if (pixel_clock_10KHz_in != 0) { bp_params->adjusted_pixel_clock = div_u64(pixel_clk * pixel_clk_10_khz_out, pixel_clock_10KHz_in); } else { bp_params->adjusted_pixel_clock = 0; BREAK_TO_DEBUGGER(); } result = BP_RESULT_OK; } return result; } static enum bp_result adjust_display_pll_v3( struct bios_parser *bp, struct bp_adjust_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 params; uint32_t pixel_clk_10_kHz_in = bp_params->pixel_clock / 10; memset(&params, 0, sizeof(params)); /* We need to convert from KHz units into 10KHz units and then convert * output pixel clock back 10KHz-->KHz */ params.sInput.usPixelClock = cpu_to_le16((uint16_t)pixel_clk_10_kHz_in); params.sInput.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom( dal_graphics_object_id_get_encoder_id( bp_params->encoder_object_id)); params.sInput.ucEncodeMode = (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); if (bp_params->ss_enable == true) params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE; if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK) params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK; if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) { /* Convert output pixel clock back 10KHz-->KHz: multiply * original pixel clock in KHz by ratio * [output pxlClk/input pxlClk] */ uint64_t pixel_clk_10_khz_out = (uint64_t)le32_to_cpu(params.sOutput.ulDispPllFreq); uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock; if (pixel_clk_10_kHz_in != 0) { bp_params->adjusted_pixel_clock = div_u64(pixel_clk * pixel_clk_10_khz_out, pixel_clk_10_kHz_in); } else { bp_params->adjusted_pixel_clock = 0; BREAK_TO_DEBUGGER(); } bp_params->reference_divider = params.sOutput.ucRefDiv; bp_params->pixel_clock_post_divider = params.sOutput.ucPostDiv; result = BP_RESULT_OK; } return result; } /******************************************************************************* ******************************************************************************** ** ** DAC ENCODER CONTROL ** ******************************************************************************** *******************************************************************************/ static enum bp_result dac1_encoder_control_v1( struct bios_parser *bp, bool enable, uint32_t pixel_clock, uint8_t dac_standard); static enum bp_result dac2_encoder_control_v1( struct bios_parser *bp, bool enable, uint32_t pixel_clock, uint8_t dac_standard); static void init_dac_encoder_control(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1EncoderControl)) { case 1: bp->cmd_tbl.dac1_encoder_control = dac1_encoder_control_v1; break; default: bp->cmd_tbl.dac1_encoder_control = NULL; break; } switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2EncoderControl)) { case 1: bp->cmd_tbl.dac2_encoder_control = dac2_encoder_control_v1; break; default: bp->cmd_tbl.dac2_encoder_control = NULL; break; } } static void dac_encoder_control_prepare_params( DAC_ENCODER_CONTROL_PS_ALLOCATION *params, bool enable, uint32_t pixel_clock, uint8_t dac_standard) { params->ucDacStandard = dac_standard; if (enable) params->ucAction = ATOM_ENABLE; else params->ucAction = ATOM_DISABLE; /* We need to convert from KHz units into 10KHz units * it looks as if the TvControl do not care about pixel clock */ params->usPixelClock = cpu_to_le16((uint16_t)(pixel_clock / 10)); } static enum bp_result dac1_encoder_control_v1( struct bios_parser *bp, bool enable, uint32_t pixel_clock, uint8_t dac_standard) { enum bp_result result = BP_RESULT_FAILURE; DAC_ENCODER_CONTROL_PS_ALLOCATION params; dac_encoder_control_prepare_params( &params, enable, pixel_clock, dac_standard); if (EXEC_BIOS_CMD_TABLE(DAC1EncoderControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result dac2_encoder_control_v1( struct bios_parser *bp, bool enable, uint32_t pixel_clock, uint8_t dac_standard) { enum bp_result result = BP_RESULT_FAILURE; DAC_ENCODER_CONTROL_PS_ALLOCATION params; dac_encoder_control_prepare_params( &params, enable, pixel_clock, dac_standard); if (EXEC_BIOS_CMD_TABLE(DAC2EncoderControl, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** DAC OUTPUT CONTROL ** ******************************************************************************** *******************************************************************************/ static enum bp_result dac1_output_control_v1( struct bios_parser *bp, bool enable); static enum bp_result dac2_output_control_v1( struct bios_parser *bp, bool enable); static void init_dac_output_control(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1OutputControl)) { case 1: bp->cmd_tbl.dac1_output_control = dac1_output_control_v1; break; default: bp->cmd_tbl.dac1_output_control = NULL; break; } switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2OutputControl)) { case 1: bp->cmd_tbl.dac2_output_control = dac2_output_control_v1; break; default: bp->cmd_tbl.dac2_output_control = NULL; break; } } static enum bp_result dac1_output_control_v1( struct bios_parser *bp, bool enable) { enum bp_result result = BP_RESULT_FAILURE; DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params; if (enable) params.ucAction = ATOM_ENABLE; else params.ucAction = ATOM_DISABLE; if (EXEC_BIOS_CMD_TABLE(DAC1OutputControl, params)) result = BP_RESULT_OK; return result; } static enum bp_result dac2_output_control_v1( struct bios_parser *bp, bool enable) { enum bp_result result = BP_RESULT_FAILURE; DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params; if (enable) params.ucAction = ATOM_ENABLE; else params.ucAction = ATOM_DISABLE; if (EXEC_BIOS_CMD_TABLE(DAC2OutputControl, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** SET CRTC TIMING ** ******************************************************************************** *******************************************************************************/ static enum bp_result set_crtc_using_dtd_timing_v3( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params); static enum bp_result set_crtc_timing_v1( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params); static void init_set_crtc_timing(struct bios_parser *bp) { uint32_t dtd_version = BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_UsingDTDTiming); if (dtd_version > 2) switch (dtd_version) { case 3: bp->cmd_tbl.set_crtc_timing = set_crtc_using_dtd_timing_v3; break; default: dm_output_to_console("Don't have set_crtc_timing for dtd v%d\n", dtd_version); bp->cmd_tbl.set_crtc_timing = NULL; break; } else switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)) { case 1: bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1; break; default: dm_output_to_console("Don't have set_crtc_timing for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)); bp->cmd_tbl.set_crtc_timing = NULL; break; } } static enum bp_result set_crtc_timing_v1( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION params = {0}; uint8_t atom_controller_id; if (bp->cmd_helper->controller_id_to_atom( bp_params->controller_id, &atom_controller_id)) params.ucCRTC = atom_controller_id; params.usH_Total = cpu_to_le16((uint16_t)(bp_params->h_total)); params.usH_Disp = cpu_to_le16((uint16_t)(bp_params->h_addressable)); params.usH_SyncStart = cpu_to_le16((uint16_t)(bp_params->h_sync_start)); params.usH_SyncWidth = cpu_to_le16((uint16_t)(bp_params->h_sync_width)); params.usV_Total = cpu_to_le16((uint16_t)(bp_params->v_total)); params.usV_Disp = cpu_to_le16((uint16_t)(bp_params->v_addressable)); params.usV_SyncStart = cpu_to_le16((uint16_t)(bp_params->v_sync_start)); params.usV_SyncWidth = cpu_to_le16((uint16_t)(bp_params->v_sync_width)); /* VBIOS does not expect any value except zero into this call, for * underscan use another entry ProgramOverscan call but when mode * 1776x1000 with the overscan 72x44 .e.i. 1920x1080 @30 DAL2 is ok, * but when same ,but 60 Hz there is corruption * DAL1 does not allow the mode 1776x1000@60 */ params.ucOverscanRight = (uint8_t)bp_params->h_overscan_right; params.ucOverscanLeft = (uint8_t)bp_params->h_overscan_left; params.ucOverscanBottom = (uint8_t)bp_params->v_overscan_bottom; params.ucOverscanTop = (uint8_t)bp_params->v_overscan_top; if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY) params.susModeMiscInfo.usAccess = cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY); if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY) params.susModeMiscInfo.usAccess = cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY); if (bp_params->flags.INTERLACE) { params.susModeMiscInfo.usAccess = cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE); /* original DAL code has this condition to apply tis for * non-TV/CV only due to complex MV testing for possible * impact * if (pACParameters->signal != SignalType_YPbPr && * pACParameters->signal != SignalType_Composite && * pACParameters->signal != SignalType_SVideo) */ /* HW will deduct 0.5 line from 2nd feild. * i.e. for 1080i, it is 2 lines for 1st field, 2.5 * lines for the 2nd feild. we need input as 5 instead * of 4, but it is 4 either from Edid data * (spec CEA 861) or CEA timing table. */ params.usV_SyncStart = cpu_to_le16((uint16_t)(bp_params->v_sync_start + 1)); } if (bp_params->flags.HORZ_COUNT_BY_TWO) params.susModeMiscInfo.usAccess = cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE); if (EXEC_BIOS_CMD_TABLE(SetCRTC_Timing, params)) result = BP_RESULT_OK; return result; } static enum bp_result set_crtc_using_dtd_timing_v3( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; SET_CRTC_USING_DTD_TIMING_PARAMETERS params = {0}; uint8_t atom_controller_id; if (bp->cmd_helper->controller_id_to_atom( bp_params->controller_id, &atom_controller_id)) params.ucCRTC = atom_controller_id; /* bios usH_Size wants h addressable size */ params.usH_Size = cpu_to_le16((uint16_t)bp_params->h_addressable); /* bios usH_Blanking_Time wants borders included in blanking */ params.usH_Blanking_Time = cpu_to_le16((uint16_t)(bp_params->h_total - bp_params->h_addressable)); /* bios usV_Size wants v addressable size */ params.usV_Size = cpu_to_le16((uint16_t)bp_params->v_addressable); /* bios usV_Blanking_Time wants borders included in blanking */ params.usV_Blanking_Time = cpu_to_le16((uint16_t)(bp_params->v_total - bp_params->v_addressable)); /* bios usHSyncOffset is the offset from the end of h addressable, * our horizontalSyncStart is the offset from the beginning * of h addressable */ params.usH_SyncOffset = cpu_to_le16((uint16_t)(bp_params->h_sync_start - bp_params->h_addressable)); params.usH_SyncWidth = cpu_to_le16((uint16_t)bp_params->h_sync_width); /* bios usHSyncOffset is the offset from the end of v addressable, * our verticalSyncStart is the offset from the beginning of * v addressable */ params.usV_SyncOffset = cpu_to_le16((uint16_t)(bp_params->v_sync_start - bp_params->v_addressable)); params.usV_SyncWidth = cpu_to_le16((uint16_t)bp_params->v_sync_width); /* we assume that overscan from original timing does not get bigger * than 255 * we will program all the borders in the Set CRTC Overscan call below */ if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY) params.susModeMiscInfo.usAccess = cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY); if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY) params.susModeMiscInfo.usAccess = cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY); if (bp_params->flags.INTERLACE) { params.susModeMiscInfo.usAccess = cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE); /* original DAL code has this condition to apply this * for non-TV/CV only * due to complex MV testing for possible impact * if ( pACParameters->signal != SignalType_YPbPr && * pACParameters->signal != SignalType_Composite && * pACParameters->signal != SignalType_SVideo) */ { /* HW will deduct 0.5 line from 2nd feild. * i.e. for 1080i, it is 2 lines for 1st field, * 2.5 lines for the 2nd feild. we need input as 5 * instead of 4. * but it is 4 either from Edid data (spec CEA 861) * or CEA timing table. */ le16_add_cpu(&params.usV_SyncOffset, 1); } } if (bp_params->flags.HORZ_COUNT_BY_TWO) params.susModeMiscInfo.usAccess = cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE); if (EXEC_BIOS_CMD_TABLE(SetCRTC_UsingDTDTiming, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** ENABLE CRTC ** ******************************************************************************** *******************************************************************************/ static enum bp_result enable_crtc_v1( struct bios_parser *bp, enum controller_id controller_id, bool enable); static void init_enable_crtc(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)) { case 1: bp->cmd_tbl.enable_crtc = enable_crtc_v1; break; default: dm_output_to_console("Don't have enable_crtc for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)); bp->cmd_tbl.enable_crtc = NULL; break; } } static enum bp_result enable_crtc_v1( struct bios_parser *bp, enum controller_id controller_id, bool enable) { bool result = BP_RESULT_FAILURE; ENABLE_CRTC_PARAMETERS params = {0}; uint8_t id; if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) params.ucCRTC = id; else return BP_RESULT_BADINPUT; if (enable) params.ucEnable = ATOM_ENABLE; else params.ucEnable = ATOM_DISABLE; if (EXEC_BIOS_CMD_TABLE(EnableCRTC, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** ENABLE CRTC MEM REQ ** ******************************************************************************** *******************************************************************************/ static enum bp_result enable_crtc_mem_req_v1( struct bios_parser *bp, enum controller_id controller_id, bool enable); static void init_enable_crtc_mem_req(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTCMemReq)) { case 1: bp->cmd_tbl.enable_crtc_mem_req = enable_crtc_mem_req_v1; break; default: bp->cmd_tbl.enable_crtc_mem_req = NULL; break; } } static enum bp_result enable_crtc_mem_req_v1( struct bios_parser *bp, enum controller_id controller_id, bool enable) { bool result = BP_RESULT_BADINPUT; ENABLE_CRTC_PARAMETERS params = {0}; uint8_t id; if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) { params.ucCRTC = id; if (enable) params.ucEnable = ATOM_ENABLE; else params.ucEnable = ATOM_DISABLE; if (EXEC_BIOS_CMD_TABLE(EnableCRTCMemReq, params)) result = BP_RESULT_OK; else result = BP_RESULT_FAILURE; } return result; } /******************************************************************************* ******************************************************************************** ** ** DISPLAY PLL ** ******************************************************************************** *******************************************************************************/ static enum bp_result program_clock_v5( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); static enum bp_result program_clock_v6( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); static void init_program_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) { case 5: bp->cmd_tbl.program_clock = program_clock_v5; break; case 6: bp->cmd_tbl.program_clock = program_clock_v6; break; default: dm_output_to_console("Don't have program_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)); bp->cmd_tbl.program_clock = NULL; break; } } static enum bp_result program_clock_v5( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; SET_PIXEL_CLOCK_PS_ALLOCATION_V5 params; uint32_t atom_pll_id; memset(&params, 0, sizeof(params)); if (!bp->cmd_helper->clock_source_id_to_atom( bp_params->pll_id, &atom_pll_id)) { BREAK_TO_DEBUGGER(); /* Invalid Input!! */ return BP_RESULT_BADINPUT; } /* We need to convert from KHz units into 10KHz units */ params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id; params.sPCLKInput.usPixelClock = cpu_to_le16((uint16_t) (bp_params->target_pixel_clock_100hz / 100)); params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID; if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) result = BP_RESULT_OK; return result; } static enum bp_result program_clock_v6( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; SET_PIXEL_CLOCK_PS_ALLOCATION_V6 params; uint32_t atom_pll_id; memset(&params, 0, sizeof(params)); if (!bp->cmd_helper->clock_source_id_to_atom( bp_params->pll_id, &atom_pll_id)) { BREAK_TO_DEBUGGER(); /*Invalid Input!!*/ return BP_RESULT_BADINPUT; } /* We need to convert from KHz units into 10KHz units */ params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id; params.sPCLKInput.ulDispEngClkFreq = cpu_to_le32(bp_params->target_pixel_clock_100hz / 100); if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; if (bp_params->flags.SET_DISPCLK_DFS_BYPASS) params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_DPREFCLK_BYPASS; if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) { /* True display clock is returned by VBIOS if DFS bypass * is enabled. */ bp_params->dfs_bypass_display_clock = (uint32_t)(le32_to_cpu(params.sPCLKInput.ulDispEngClkFreq) * 10); result = BP_RESULT_OK; } return result; } /******************************************************************************* ******************************************************************************** ** ** EXTERNAL ENCODER CONTROL ** ******************************************************************************** *******************************************************************************/ static enum bp_result external_encoder_control_v3( struct bios_parser *bp, struct bp_external_encoder_control *cntl); static void init_external_encoder_control( struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(ExternalEncoderControl)) { case 3: bp->cmd_tbl.external_encoder_control = external_encoder_control_v3; break; default: bp->cmd_tbl.external_encoder_control = NULL; break; } } static enum bp_result external_encoder_control_v3( struct bios_parser *bp, struct bp_external_encoder_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; /* we need use _PS_Alloc struct */ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 params; EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 *cntl_params; struct graphics_object_id encoder; bool is_input_signal_dp = false; memset(&params, 0, sizeof(params)); cntl_params = &params.sExtEncoder; encoder = cntl->encoder_id; /* check if encoder supports external encoder control table */ switch (dal_graphics_object_id_get_encoder_id(encoder)) { case ENCODER_ID_EXTERNAL_NUTMEG: case ENCODER_ID_EXTERNAL_TRAVIS: is_input_signal_dp = true; break; default: BREAK_TO_DEBUGGER(); return BP_RESULT_BADINPUT; } /* Fill information based on the action * * Bit[6:4]: indicate external encoder, applied to all functions. * =0: external encoder1, mapped to external encoder enum id1 * =1: external encoder2, mapped to external encoder enum id2 * * enum ObjectEnumId * { * EnumId_Unknown = 0, * EnumId_1, * EnumId_2, * }; */ cntl_params->ucConfig = (uint8_t)((encoder.enum_id - 1) << 4); switch (cntl->action) { case EXTERNAL_ENCODER_CONTROL_INIT: /* output display connector type. Only valid in encoder * initialization */ cntl_params->usConnectorId = cpu_to_le16((uint16_t)cntl->connector_obj_id.id); break; case EXTERNAL_ENCODER_CONTROL_SETUP: /* EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 pixel clock unit in * 10KHz * output display device pixel clock frequency in unit of 10KHz. * Only valid in setup and enableoutput */ cntl_params->usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10)); /* Indicate display output signal type drive by external * encoder, only valid in setup and enableoutput */ cntl_params->ucEncoderMode = (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( cntl->signal, false); if (is_input_signal_dp) { /* Bit[0]: indicate link rate, =1: 2.7Ghz, =0: 1.62Ghz, * only valid in encoder setup with DP mode. */ if (LINK_RATE_HIGH == cntl->link_rate) cntl_params->ucConfig |= 1; /* output color depth Indicate encoder data bpc format * in DP mode, only valid in encoder setup in DP mode. */ cntl_params->ucBitPerColor = (uint8_t)(cntl->color_depth); } /* Indicate how many lanes used by external encoder, only valid * in encoder setup and enableoutput. */ cntl_params->ucLaneNum = (uint8_t)(cntl->lanes_number); break; case EXTERNAL_ENCODER_CONTROL_ENABLE: cntl_params->usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10)); cntl_params->ucEncoderMode = (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( cntl->signal, false); cntl_params->ucLaneNum = (uint8_t)cntl->lanes_number; break; default: break; } cntl_params->ucAction = (uint8_t)cntl->action; if (EXEC_BIOS_CMD_TABLE(ExternalEncoderControl, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** ENABLE DISPLAY POWER GATING ** ******************************************************************************** *******************************************************************************/ static enum bp_result enable_disp_power_gating_v2_1( struct bios_parser *bp, enum controller_id crtc_id, enum bp_pipe_control_action action); static void init_enable_disp_power_gating( struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)) { case 1: bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_v2_1; break; default: dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)); bp->cmd_tbl.enable_disp_power_gating = NULL; break; } } static enum bp_result enable_disp_power_gating_v2_1( struct bios_parser *bp, enum controller_id crtc_id, enum bp_pipe_control_action action) { enum bp_result result = BP_RESULT_FAILURE; ENABLE_DISP_POWER_GATING_PS_ALLOCATION params = {0}; uint8_t atom_crtc_id; if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id)) params.ucDispPipeId = atom_crtc_id; else return BP_RESULT_BADINPUT; params.ucEnable = bp->cmd_helper->disp_power_gating_action_to_atom(action); if (EXEC_BIOS_CMD_TABLE(EnableDispPowerGating, params)) result = BP_RESULT_OK; return result; } /******************************************************************************* ******************************************************************************** ** ** SET DCE CLOCK ** ******************************************************************************** *******************************************************************************/ static enum bp_result set_dce_clock_v2_1( struct bios_parser *bp, struct bp_set_dce_clock_parameters *bp_params); static void init_set_dce_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)) { case 1: bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1; break; default: dm_output_to_console("Don't have set_dce_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)); bp->cmd_tbl.set_dce_clock = NULL; break; } } static enum bp_result set_dce_clock_v2_1( struct bios_parser *bp, struct bp_set_dce_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; SET_DCE_CLOCK_PS_ALLOCATION_V2_1 params; uint32_t atom_pll_id; uint32_t atom_clock_type; const struct command_table_helper *cmd = bp->cmd_helper; memset(&params, 0, sizeof(params)); if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) || !cmd->dc_clock_type_to_atom(bp_params->clock_type, &atom_clock_type)) return BP_RESULT_BADINPUT; params.asParam.ucDCEClkSrc = atom_pll_id; params.asParam.ucDCEClkType = atom_clock_type; if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) { if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK) params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK; if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK) params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE; if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK) params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN; if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK) params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA; } else /* only program clock frequency if display clock is used; VBIOS will program DPREFCLK */ /* We need to convert from KHz units into 10KHz units */ params.asParam.ulDCEClkFreq = cpu_to_le32(bp_params->target_clock_frequency / 10); if (EXEC_BIOS_CMD_TABLE(SetDCEClock, params)) { /* Convert from 10KHz units back to KHz */ bp_params->target_clock_frequency = le32_to_cpu(params.asParam.ulDCEClkFreq) * 10; result = BP_RESULT_OK; } return result; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/command_table.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "atom.h" #include "dc_bios_types.h" #include "include/gpio_service_interface.h" #include "include/grph_object_ctrl_defs.h" #include "include/bios_parser_interface.h" #include "include/logger_interface.h" #include "command_table.h" #include "bios_parser_helper.h" #include "command_table_helper.h" #include "bios_parser.h" #include "bios_parser_types_internal.h" #include "bios_parser_interface.h" #include "bios_parser_common.h" #include "dc.h" #define THREE_PERCENT_OF_10000 300 #define LAST_RECORD_TYPE 0xff #define DC_LOGGER \ bp->base.ctx->logger #define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table) static void get_atom_data_table_revision( ATOM_COMMON_TABLE_HEADER *atom_data_tbl, struct atom_data_revision *tbl_revision); static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object, uint16_t **id_list); static ATOM_OBJECT *get_bios_object(struct bios_parser *bp, struct graphics_object_id id); static enum bp_result get_gpio_i2c_info(struct bios_parser *bp, ATOM_I2C_RECORD *record, struct graphics_object_i2c_info *info); static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp, ATOM_OBJECT *object); static struct device_id device_type_from_device_id(uint16_t device_id); static uint32_t signal_to_ss_id(enum as_signal_type signal); static uint32_t get_support_mask_for_device_id(struct device_id device_id); static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record( struct bios_parser *bp, ATOM_OBJECT *object); #define BIOS_IMAGE_SIZE_OFFSET 2 #define BIOS_IMAGE_SIZE_UNIT 512 /*****************************************************************************/ static bool bios_parser_construct( struct bios_parser *bp, struct bp_init_data *init, enum dce_version dce_version); static uint8_t bios_parser_get_connectors_number( struct dc_bios *dcb); static enum bp_result bios_parser_get_embedded_panel_info( struct dc_bios *dcb, struct embedded_panel_info *info); /*****************************************************************************/ struct dc_bios *bios_parser_create( struct bp_init_data *init, enum dce_version dce_version) { struct bios_parser *bp; bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL); if (!bp) return NULL; if (bios_parser_construct(bp, init, dce_version)) return &bp->base; kfree(bp); BREAK_TO_DEBUGGER(); return NULL; } static void bios_parser_destruct(struct bios_parser *bp) { kfree(bp->base.bios_local_image); kfree(bp->base.integrated_info); } static void bios_parser_destroy(struct dc_bios **dcb) { struct bios_parser *bp = BP_FROM_DCB(*dcb); if (!bp) { BREAK_TO_DEBUGGER(); return; } bios_parser_destruct(bp); kfree(bp); *dcb = NULL; } static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset) { ATOM_OBJECT_TABLE *table; uint32_t object_table_offset = bp->object_info_tbl_offset + offset; table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, object_table_offset, struct_size(table, asObjects, 1))); if (!table) return 0; else return table->ucNumberOfObjects; } static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); return get_number_of_objects(bp, le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset)); } static struct graphics_object_id bios_parser_get_connector_id( struct dc_bios *dcb, uint8_t i) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct graphics_object_id object_id = dal_graphics_object_id_init( 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN); uint16_t id; uint32_t connector_table_offset = bp->object_info_tbl_offset + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset); ATOM_OBJECT_TABLE *tbl = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, connector_table_offset, struct_size(tbl, asObjects, 1))); if (!tbl) { dm_error("Can't get connector table from atom bios.\n"); return object_id; } if (tbl->ucNumberOfObjects <= i) { dm_error("Can't find connector id %d in connector table of size %d.\n", i, tbl->ucNumberOfObjects); return object_id; } id = le16_to_cpu(tbl->asObjects[i].usObjectID); object_id = object_id_from_bios_object_id(id); return object_id; } static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb, struct graphics_object_id object_id, uint32_t index, struct graphics_object_id *src_object_id) { uint32_t number; uint16_t *id; ATOM_OBJECT *object; struct bios_parser *bp = BP_FROM_DCB(dcb); if (!src_object_id) return BP_RESULT_BADINPUT; object = get_bios_object(bp, object_id); if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object id */ return BP_RESULT_BADINPUT; } number = get_src_obj_list(bp, object, &id); if (number <= index) return BP_RESULT_BADINPUT; *src_object_id = object_id_from_bios_object_id(id[index]); return BP_RESULT_OK; } static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb, struct graphics_object_id id, struct graphics_object_i2c_info *info) { uint32_t offset; ATOM_OBJECT *object; ATOM_COMMON_RECORD_HEADER *header; ATOM_I2C_RECORD *record; struct bios_parser *bp = BP_FROM_DCB(dcb); if (!info) return BP_RESULT_BADINPUT; object = get_bios_object(bp, id); if (!object) return BP_RESULT_BADINPUT; offset = le16_to_cpu(object->usRecordOffset) + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset); if (!header) return BP_RESULT_BADBIOSTABLE; if (LAST_RECORD_TYPE == header->ucRecordType || !header->ucRecordSize) break; if (ATOM_I2C_RECORD_TYPE == header->ucRecordType && sizeof(ATOM_I2C_RECORD) <= header->ucRecordSize) { /* get the I2C info */ record = (ATOM_I2C_RECORD *) header; if (get_gpio_i2c_info(bp, record, info) == BP_RESULT_OK) return BP_RESULT_OK; } offset += header->ucRecordSize; } return BP_RESULT_NORECORD; } static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb, struct graphics_object_id id, struct graphics_object_hpd_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); ATOM_OBJECT *object; ATOM_HPD_INT_RECORD *record = NULL; if (!info) return BP_RESULT_BADINPUT; object = get_bios_object(bp, id); if (!object) return BP_RESULT_BADINPUT; record = get_hpd_record(bp, object); if (record != NULL) { info->hpd_int_gpio_uid = record->ucHPDIntGPIOID; info->hpd_active = record->ucPlugged_PinState; return BP_RESULT_OK; } return BP_RESULT_NORECORD; } static enum bp_result bios_parser_get_device_tag_record( struct bios_parser *bp, ATOM_OBJECT *object, ATOM_CONNECTOR_DEVICE_TAG_RECORD **record) { ATOM_COMMON_RECORD_HEADER *header; uint32_t offset; offset = le16_to_cpu(object->usRecordOffset) + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset); if (!header) return BP_RESULT_BADBIOSTABLE; offset += header->ucRecordSize; if (LAST_RECORD_TYPE == header->ucRecordType || !header->ucRecordSize) break; if (ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE != header->ucRecordType) continue; if (sizeof(ATOM_CONNECTOR_DEVICE_TAG) > header->ucRecordSize) continue; *record = (ATOM_CONNECTOR_DEVICE_TAG_RECORD *) header; return BP_RESULT_OK; } return BP_RESULT_NORECORD; } static enum bp_result bios_parser_get_device_tag( struct dc_bios *dcb, struct graphics_object_id connector_object_id, uint32_t device_tag_index, struct connector_device_tag_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); ATOM_OBJECT *object; ATOM_CONNECTOR_DEVICE_TAG_RECORD *record = NULL; ATOM_CONNECTOR_DEVICE_TAG *device_tag; if (!info) return BP_RESULT_BADINPUT; /* getBiosObject will return MXM object */ object = get_bios_object(bp, connector_object_id); if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object id */ return BP_RESULT_BADINPUT; } if (bios_parser_get_device_tag_record(bp, object, &record) != BP_RESULT_OK) return BP_RESULT_NORECORD; if (device_tag_index >= record->ucNumberOfDevice) return BP_RESULT_NORECORD; device_tag = &record->asDeviceTag[device_tag_index]; info->acpi_device = le32_to_cpu(device_tag->ulACPIDeviceEnum); info->dev_id = device_type_from_device_id(le16_to_cpu(device_tag->usDeviceID)); return BP_RESULT_OK; } static enum bp_result get_firmware_info_v1_4( struct bios_parser *bp, struct dc_firmware_info *info); static enum bp_result get_firmware_info_v2_1( struct bios_parser *bp, struct dc_firmware_info *info); static enum bp_result get_firmware_info_v2_2( struct bios_parser *bp, struct dc_firmware_info *info); static enum bp_result bios_parser_get_firmware_info( struct dc_bios *dcb, struct dc_firmware_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); enum bp_result result = BP_RESULT_BADBIOSTABLE; ATOM_COMMON_TABLE_HEADER *header; struct atom_data_revision revision; if (info && DATA_TABLES(FirmwareInfo)) { header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(FirmwareInfo)); get_atom_data_table_revision(header, &revision); switch (revision.major) { case 1: switch (revision.minor) { case 4: result = get_firmware_info_v1_4(bp, info); break; default: break; } break; case 2: switch (revision.minor) { case 1: result = get_firmware_info_v2_1(bp, info); break; case 2: result = get_firmware_info_v2_2(bp, info); break; default: break; } break; default: break; } } return result; } static enum bp_result get_firmware_info_v1_4( struct bios_parser *bp, struct dc_firmware_info *info) { ATOM_FIRMWARE_INFO_V1_4 *firmware_info = GET_IMAGE(ATOM_FIRMWARE_INFO_V1_4, DATA_TABLES(FirmwareInfo)); if (!info) return BP_RESULT_BADINPUT; if (!firmware_info) return BP_RESULT_BADBIOSTABLE; memset(info, 0, sizeof(*info)); /* Pixel clock pll information. We need to convert from 10KHz units into * KHz units */ info->pll_info.crystal_frequency = le16_to_cpu(firmware_info->usReferenceClock) * 10; info->pll_info.min_input_pxl_clk_pll_frequency = le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10; info->pll_info.max_input_pxl_clk_pll_frequency = le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10; info->pll_info.min_output_pxl_clk_pll_frequency = le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10; info->pll_info.max_output_pxl_clk_pll_frequency = le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10; if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support) /* Since there is no information on the SS, report conservative * value 3% for bandwidth calculation */ /* unit of 0.01% */ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000; if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support) /* Since there is no information on the SS,report conservative * value 3% for bandwidth calculation */ /* unit of 0.01% */ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000; return BP_RESULT_OK; } static enum bp_result get_ss_info_v3_1( struct bios_parser *bp, uint32_t id, uint32_t index, struct spread_spectrum_info *ss_info); static enum bp_result get_firmware_info_v2_1( struct bios_parser *bp, struct dc_firmware_info *info) { ATOM_FIRMWARE_INFO_V2_1 *firmwareInfo = GET_IMAGE(ATOM_FIRMWARE_INFO_V2_1, DATA_TABLES(FirmwareInfo)); struct spread_spectrum_info internalSS; uint32_t index; if (!info) return BP_RESULT_BADINPUT; if (!firmwareInfo) return BP_RESULT_BADBIOSTABLE; memset(info, 0, sizeof(*info)); /* Pixel clock pll information. We need to convert from 10KHz units into * KHz units */ info->pll_info.crystal_frequency = le16_to_cpu(firmwareInfo->usCoreReferenceClock) * 10; info->pll_info.min_input_pxl_clk_pll_frequency = le16_to_cpu(firmwareInfo->usMinPixelClockPLL_Input) * 10; info->pll_info.max_input_pxl_clk_pll_frequency = le16_to_cpu(firmwareInfo->usMaxPixelClockPLL_Input) * 10; info->pll_info.min_output_pxl_clk_pll_frequency = le32_to_cpu(firmwareInfo->ulMinPixelClockPLL_Output) * 10; info->pll_info.max_output_pxl_clk_pll_frequency = le32_to_cpu(firmwareInfo->ulMaxPixelClockPLL_Output) * 10; info->default_display_engine_pll_frequency = le32_to_cpu(firmwareInfo->ulDefaultDispEngineClkFreq) * 10; info->external_clock_source_frequency_for_dp = le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10; info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level; /* There should be only one entry in the SS info table for Memory Clock */ index = 0; if (firmwareInfo->usFirmwareCapability.sbfAccess.MemoryClockSS_Support) /* Since there is no information for external SS, report * conservative value 3% for bandwidth calculation */ /* unit of 0.01% */ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000; else if (get_ss_info_v3_1(bp, ASIC_INTERNAL_MEMORY_SS, index, &internalSS) == BP_RESULT_OK) { if (internalSS.spread_spectrum_percentage) { info->feature.memory_clk_ss_percentage = internalSS.spread_spectrum_percentage; if (internalSS.type.CENTER_MODE) { /* if it is centermode, the exact SS Percentage * will be round up of half of the percentage * reported in the SS table */ ++info->feature.memory_clk_ss_percentage; info->feature.memory_clk_ss_percentage /= 2; } } } /* There should be only one entry in the SS info table for Engine Clock */ index = 1; if (firmwareInfo->usFirmwareCapability.sbfAccess.EngineClockSS_Support) /* Since there is no information for external SS, report * conservative value 3% for bandwidth calculation */ /* unit of 0.01% */ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000; else if (get_ss_info_v3_1(bp, ASIC_INTERNAL_ENGINE_SS, index, &internalSS) == BP_RESULT_OK) { if (internalSS.spread_spectrum_percentage) { info->feature.engine_clk_ss_percentage = internalSS.spread_spectrum_percentage; if (internalSS.type.CENTER_MODE) { /* if it is centermode, the exact SS Percentage * will be round up of half of the percentage * reported in the SS table */ ++info->feature.engine_clk_ss_percentage; info->feature.engine_clk_ss_percentage /= 2; } } } return BP_RESULT_OK; } static enum bp_result get_firmware_info_v2_2( struct bios_parser *bp, struct dc_firmware_info *info) { ATOM_FIRMWARE_INFO_V2_2 *firmware_info; struct spread_spectrum_info internal_ss; uint32_t index; if (!info) return BP_RESULT_BADINPUT; firmware_info = GET_IMAGE(ATOM_FIRMWARE_INFO_V2_2, DATA_TABLES(FirmwareInfo)); if (!firmware_info) return BP_RESULT_BADBIOSTABLE; memset(info, 0, sizeof(*info)); /* Pixel clock pll information. We need to convert from 10KHz units into * KHz units */ info->pll_info.crystal_frequency = le16_to_cpu(firmware_info->usCoreReferenceClock) * 10; info->pll_info.min_input_pxl_clk_pll_frequency = le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10; info->pll_info.max_input_pxl_clk_pll_frequency = le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10; info->pll_info.min_output_pxl_clk_pll_frequency = le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10; info->pll_info.max_output_pxl_clk_pll_frequency = le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10; info->default_display_engine_pll_frequency = le32_to_cpu(firmware_info->ulDefaultDispEngineClkFreq) * 10; info->external_clock_source_frequency_for_dp = le16_to_cpu(firmware_info->usUniphyDPModeExtClkFreq) * 10; /* There should be only one entry in the SS info table for Memory Clock */ index = 0; if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support) /* Since there is no information for external SS, report * conservative value 3% for bandwidth calculation */ /* unit of 0.01% */ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000; else if (get_ss_info_v3_1(bp, ASIC_INTERNAL_MEMORY_SS, index, &internal_ss) == BP_RESULT_OK) { if (internal_ss.spread_spectrum_percentage) { info->feature.memory_clk_ss_percentage = internal_ss.spread_spectrum_percentage; if (internal_ss.type.CENTER_MODE) { /* if it is centermode, the exact SS Percentage * will be round up of half of the percentage * reported in the SS table */ ++info->feature.memory_clk_ss_percentage; info->feature.memory_clk_ss_percentage /= 2; } } } /* There should be only one entry in the SS info table for Engine Clock */ index = 1; if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support) /* Since there is no information for external SS, report * conservative value 3% for bandwidth calculation */ /* unit of 0.01% */ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000; else if (get_ss_info_v3_1(bp, ASIC_INTERNAL_ENGINE_SS, index, &internal_ss) == BP_RESULT_OK) { if (internal_ss.spread_spectrum_percentage) { info->feature.engine_clk_ss_percentage = internal_ss.spread_spectrum_percentage; if (internal_ss.type.CENTER_MODE) { /* if it is centermode, the exact SS Percentage * will be round up of half of the percentage * reported in the SS table */ ++info->feature.engine_clk_ss_percentage; info->feature.engine_clk_ss_percentage /= 2; } } } /* Remote Display */ info->remote_display_config = firmware_info->ucRemoteDisplayConfig; /* Is allowed minimum BL level */ info->min_allowed_bl_level = firmware_info->ucMinAllowedBL_Level; /* Used starting from CI */ info->smu_gpu_pll_output_freq = (uint32_t) (le32_to_cpu(firmware_info->ulGPUPLL_OutputFreq) * 10); return BP_RESULT_OK; } static enum bp_result get_ss_info_v3_1( struct bios_parser *bp, uint32_t id, uint32_t index, struct spread_spectrum_info *ss_info) { ATOM_ASIC_INTERNAL_SS_INFO_V3 *ss_table_header_include; ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl; uint32_t table_size; uint32_t i; uint32_t table_index = 0; if (!ss_info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(ASIC_InternalSS_Info)) return BP_RESULT_UNSUPPORTED; ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base, DATA_TABLES(ASIC_InternalSS_Info), struct_size(ss_table_header_include, asSpreadSpectrum, 1))); table_size = (le16_to_cpu(ss_table_header_include->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *) &ss_table_header_include->asSpreadSpectrum[0]; memset(ss_info, 0, sizeof(struct spread_spectrum_info)); for (i = 0; i < table_size; i++) { if (tbl[i].ucClockIndication != (uint8_t) id) continue; if (table_index != index) { table_index++; continue; } /* VBIOS introduced new defines for Version 3, same values as * before, so now use these new ones for Version 3. * Shouldn't affect field VBIOS's V3 as define values are still * same. * #define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01 * #define SS_MODE_V3_EXTERNAL_SS_MASK 0x02 * Old VBIOS defines: * #define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001 * #define ATOM_EXTERNAL_SS_MASK 0x00000002 */ if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode) ss_info->type.EXTERNAL = true; if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode) ss_info->type.CENTER_MODE = true; /* Older VBIOS (in field) always provides SS percentage in 0.01% * units set Divider to 100 */ ss_info->spread_percentage_divider = 100; /* #define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 */ if (SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK & tbl[i].ucSpreadSpectrumMode) ss_info->spread_percentage_divider = 1000; ss_info->type.STEP_AND_DELAY_INFO = false; /* convert [10KHz] into [KHz] */ ss_info->target_clock_range = le32_to_cpu(tbl[i].ulTargetClockRange) * 10; ss_info->spread_spectrum_percentage = (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage); ss_info->spread_spectrum_range = (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10); return BP_RESULT_OK; } return BP_RESULT_NORECORD; } static enum bp_result bios_parser_transmitter_control( struct dc_bios *dcb, struct bp_transmitter_control *cntl) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.transmitter_control) return BP_RESULT_FAILURE; return bp->cmd_tbl.transmitter_control(bp, cntl); } static enum bp_result bios_parser_encoder_control( struct dc_bios *dcb, struct bp_encoder_control *cntl) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.dig_encoder_control) return BP_RESULT_FAILURE; return bp->cmd_tbl.dig_encoder_control(bp, cntl); } static enum bp_result bios_parser_adjust_pixel_clock( struct dc_bios *dcb, struct bp_adjust_pixel_clock_parameters *bp_params) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.adjust_display_pll) return BP_RESULT_FAILURE; return bp->cmd_tbl.adjust_display_pll(bp, bp_params); } static enum bp_result bios_parser_set_pixel_clock( struct dc_bios *dcb, struct bp_pixel_clock_parameters *bp_params) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.set_pixel_clock) return BP_RESULT_FAILURE; return bp->cmd_tbl.set_pixel_clock(bp, bp_params); } static enum bp_result bios_parser_set_dce_clock( struct dc_bios *dcb, struct bp_set_dce_clock_parameters *bp_params) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.set_dce_clock) return BP_RESULT_FAILURE; return bp->cmd_tbl.set_dce_clock(bp, bp_params); } static enum bp_result bios_parser_enable_spread_spectrum_on_ppll( struct dc_bios *dcb, struct bp_spread_spectrum_parameters *bp_params, bool enable) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_spread_spectrum_on_ppll) return BP_RESULT_FAILURE; return bp->cmd_tbl.enable_spread_spectrum_on_ppll( bp, bp_params, enable); } static enum bp_result bios_parser_program_crtc_timing( struct dc_bios *dcb, struct bp_hw_crtc_timing_parameters *bp_params) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.set_crtc_timing) return BP_RESULT_FAILURE; return bp->cmd_tbl.set_crtc_timing(bp, bp_params); } static enum bp_result bios_parser_program_display_engine_pll( struct dc_bios *dcb, struct bp_pixel_clock_parameters *bp_params) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.program_clock) return BP_RESULT_FAILURE; return bp->cmd_tbl.program_clock(bp, bp_params); } static enum bp_result bios_parser_enable_crtc( struct dc_bios *dcb, enum controller_id id, bool enable) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_crtc) return BP_RESULT_FAILURE; return bp->cmd_tbl.enable_crtc(bp, id, enable); } static enum bp_result bios_parser_enable_disp_power_gating( struct dc_bios *dcb, enum controller_id controller_id, enum bp_pipe_control_action action) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_disp_power_gating) return BP_RESULT_FAILURE; return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id, action); } static bool bios_parser_is_device_id_supported( struct dc_bios *dcb, struct device_id id) { struct bios_parser *bp = BP_FROM_DCB(dcb); uint32_t mask = get_support_mask_for_device_id(id); return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0; } static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp, ATOM_OBJECT *object) { ATOM_COMMON_RECORD_HEADER *header; uint32_t offset; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object */ return NULL; } offset = le16_to_cpu(object->usRecordOffset) + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset); if (!header) return NULL; if (LAST_RECORD_TYPE == header->ucRecordType || !header->ucRecordSize) break; if (ATOM_HPD_INT_RECORD_TYPE == header->ucRecordType && sizeof(ATOM_HPD_INT_RECORD) <= header->ucRecordSize) return (ATOM_HPD_INT_RECORD *) header; offset += header->ucRecordSize; } return NULL; } static enum bp_result get_ss_info_from_ss_info_table( struct bios_parser *bp, uint32_t id, struct spread_spectrum_info *ss_info); static enum bp_result get_ss_info_from_tbl( struct bios_parser *bp, uint32_t id, struct spread_spectrum_info *ss_info); /** * bios_parser_get_spread_spectrum_info * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1, * there is only one entry for each signal /ss id. However, there is * no planning of supporting multiple spread Sprectum entry for EverGreen * @dcb: pointer to the DC BIOS * @signal: ASSignalType to be converted to info index * @index: number of entries that match the converted info index * @ss_info: sprectrum information structure, * return: Bios parser result code */ static enum bp_result bios_parser_get_spread_spectrum_info( struct dc_bios *dcb, enum as_signal_type signal, uint32_t index, struct spread_spectrum_info *ss_info) { struct bios_parser *bp = BP_FROM_DCB(dcb); enum bp_result result = BP_RESULT_UNSUPPORTED; uint32_t clk_id_ss = 0; ATOM_COMMON_TABLE_HEADER *header; struct atom_data_revision tbl_revision; if (!ss_info) /* check for bad input */ return BP_RESULT_BADINPUT; /* signal translation */ clk_id_ss = signal_to_ss_id(signal); if (!DATA_TABLES(ASIC_InternalSS_Info)) if (!index) return get_ss_info_from_ss_info_table(bp, clk_id_ss, ss_info); header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(ASIC_InternalSS_Info)); get_atom_data_table_revision(header, &tbl_revision); switch (tbl_revision.major) { case 2: switch (tbl_revision.minor) { case 1: /* there can not be more then one entry for Internal * SS Info table version 2.1 */ if (!index) return get_ss_info_from_tbl(bp, clk_id_ss, ss_info); break; default: break; } break; case 3: switch (tbl_revision.minor) { case 1: return get_ss_info_v3_1(bp, clk_id_ss, index, ss_info); default: break; } break; default: break; } /* there can not be more then one entry for SS Info table */ return result; } static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1( struct bios_parser *bp, uint32_t id, struct spread_spectrum_info *info); /** * get_ss_info_from_tbl * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or * SS_Info table from the VBIOS * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or * SS_Info. * * @bp: pointer to the BIOS parser * @id: spread sprectrum info index * @ss_info: sprectrum information structure, * return: BIOS parser result code */ static enum bp_result get_ss_info_from_tbl( struct bios_parser *bp, uint32_t id, struct spread_spectrum_info *ss_info) { if (!ss_info) /* check for bad input, if ss_info is not NULL */ return BP_RESULT_BADINPUT; /* for SS_Info table only support DP and LVDS */ if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS) return get_ss_info_from_ss_info_table(bp, id, ss_info); else return get_ss_info_from_internal_ss_info_tbl_V2_1(bp, id, ss_info); } /** * get_ss_info_from_internal_ss_info_tbl_V2_1 * Get spread sprectrum information from the ASIC_InternalSS_Info table Ver 2.1 * from the VBIOS * There will not be multiple entry for Ver 2.1 * * @bp: pointer to the Bios parser * @id: spread sprectrum info index * @info: sprectrum information structure, * return: Bios parser result code */ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1( struct bios_parser *bp, uint32_t id, struct spread_spectrum_info *info) { enum bp_result result = BP_RESULT_UNSUPPORTED; ATOM_ASIC_INTERNAL_SS_INFO_V2 *header; ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl; uint32_t tbl_size, i; if (!DATA_TABLES(ASIC_InternalSS_Info)) return result; header = ((ATOM_ASIC_INTERNAL_SS_INFO_V2 *) bios_get_image( &bp->base, DATA_TABLES(ASIC_InternalSS_Info), struct_size(header, asSpreadSpectrum, 1))); memset(info, 0, sizeof(struct spread_spectrum_info)); tbl_size = (le16_to_cpu(header->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *) &(header->asSpreadSpectrum[0]); for (i = 0; i < tbl_size; i++) { result = BP_RESULT_NORECORD; if (tbl[i].ucClockIndication != (uint8_t)id) continue; if (ATOM_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode) { info->type.EXTERNAL = true; } if (ATOM_SS_CENTRE_SPREAD_MODE_MASK & tbl[i].ucSpreadSpectrumMode) { info->type.CENTER_MODE = true; } info->type.STEP_AND_DELAY_INFO = false; /* convert [10KHz] into [KHz] */ info->target_clock_range = le32_to_cpu(tbl[i].ulTargetClockRange) * 10; info->spread_spectrum_percentage = (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage); info->spread_spectrum_range = (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10); result = BP_RESULT_OK; break; } return result; } /** * get_ss_info_from_ss_info_table * Get spread sprectrum information from the SS_Info table from the VBIOS * if the pointer to info is NULL, indicate the caller what to know the number * of entries that matches the id * for, the SS_Info table, there should not be more than 1 entry match. * * @bp: pointer to the Bios parser * @id: spread sprectrum id * @ss_info: sprectrum information structure, * return: Bios parser result code */ static enum bp_result get_ss_info_from_ss_info_table( struct bios_parser *bp, uint32_t id, struct spread_spectrum_info *ss_info) { enum bp_result result = BP_RESULT_UNSUPPORTED; ATOM_SPREAD_SPECTRUM_INFO *tbl; ATOM_COMMON_TABLE_HEADER *header; uint32_t table_size; uint32_t i; uint32_t id_local = SS_ID_UNKNOWN; struct atom_data_revision revision; /* exist of the SS_Info table */ /* check for bad input, pSSinfo can not be NULL */ if (!DATA_TABLES(SS_Info) || !ss_info) return result; header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(SS_Info)); get_atom_data_table_revision(header, &revision); tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info)); if (1 != revision.major || 2 > revision.minor) return result; /* have to convert from Internal_SS format to SS_Info format */ switch (id) { case ASIC_INTERNAL_SS_ON_DP: id_local = SS_ID_DP1; break; case ASIC_INTERNAL_SS_ON_LVDS: { struct embedded_panel_info panel_info; if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info) == BP_RESULT_OK) id_local = panel_info.ss_id; break; } default: break; } if (id_local == SS_ID_UNKNOWN) return result; table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); for (i = 0; i < table_size; i++) { if (id_local != (uint32_t)tbl->asSS_Info[i].ucSS_Id) continue; memset(ss_info, 0, sizeof(struct spread_spectrum_info)); if (ATOM_EXTERNAL_SS_MASK & tbl->asSS_Info[i].ucSpreadSpectrumType) ss_info->type.EXTERNAL = true; if (ATOM_SS_CENTRE_SPREAD_MODE_MASK & tbl->asSS_Info[i].ucSpreadSpectrumType) ss_info->type.CENTER_MODE = true; ss_info->type.STEP_AND_DELAY_INFO = true; ss_info->spread_spectrum_percentage = (uint32_t)le16_to_cpu(tbl->asSS_Info[i].usSpreadSpectrumPercentage); ss_info->step_and_delay_info.step = tbl->asSS_Info[i].ucSS_Step; ss_info->step_and_delay_info.delay = tbl->asSS_Info[i].ucSS_Delay; ss_info->step_and_delay_info.recommended_ref_div = tbl->asSS_Info[i].ucRecommendedRef_Div; ss_info->spread_spectrum_range = (uint32_t)tbl->asSS_Info[i].ucSS_Range * 10000; /* there will be only one entry for each display type in SS_info * table */ result = BP_RESULT_OK; break; } return result; } static enum bp_result get_embedded_panel_info_v1_2( struct bios_parser *bp, struct embedded_panel_info *info); static enum bp_result get_embedded_panel_info_v1_3( struct bios_parser *bp, struct embedded_panel_info *info); static enum bp_result bios_parser_get_embedded_panel_info( struct dc_bios *dcb, struct embedded_panel_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); ATOM_COMMON_TABLE_HEADER *hdr; if (!DATA_TABLES(LCD_Info)) return BP_RESULT_FAILURE; hdr = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(LCD_Info)); if (!hdr) return BP_RESULT_BADBIOSTABLE; switch (hdr->ucTableFormatRevision) { case 1: switch (hdr->ucTableContentRevision) { case 0: case 1: case 2: return get_embedded_panel_info_v1_2(bp, info); case 3: return get_embedded_panel_info_v1_3(bp, info); default: break; } break; default: break; } return BP_RESULT_FAILURE; } static enum bp_result get_embedded_panel_info_v1_2( struct bios_parser *bp, struct embedded_panel_info *info) { ATOM_LVDS_INFO_V12 *lvds; if (!info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(LVDS_Info)) return BP_RESULT_UNSUPPORTED; lvds = GET_IMAGE(ATOM_LVDS_INFO_V12, DATA_TABLES(LVDS_Info)); if (!lvds) return BP_RESULT_BADBIOSTABLE; if (1 != lvds->sHeader.ucTableFormatRevision || 2 > lvds->sHeader.ucTableContentRevision) return BP_RESULT_UNSUPPORTED; memset(info, 0, sizeof(struct embedded_panel_info)); /* We need to convert from 10KHz units into KHz units*/ info->lcd_timing.pixel_clk = le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10; /* usHActive does not include borders, according to VBIOS team*/ info->lcd_timing.horizontal_addressable = le16_to_cpu(lvds->sLCDTiming.usHActive); /* usHBlanking_Time includes borders, so we should really be subtracting * borders duing this translation, but LVDS generally*/ /* doesn't have borders, so we should be okay leaving this as is for * now. May need to revisit if we ever have LVDS with borders*/ info->lcd_timing.horizontal_blanking_time = le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time); /* usVActive does not include borders, according to VBIOS team*/ info->lcd_timing.vertical_addressable = le16_to_cpu(lvds->sLCDTiming.usVActive); /* usVBlanking_Time includes borders, so we should really be subtracting * borders duing this translation, but LVDS generally*/ /* doesn't have borders, so we should be okay leaving this as is for * now. May need to revisit if we ever have LVDS with borders*/ info->lcd_timing.vertical_blanking_time = le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time); info->lcd_timing.horizontal_sync_offset = le16_to_cpu(lvds->sLCDTiming.usHSyncOffset); info->lcd_timing.horizontal_sync_width = le16_to_cpu(lvds->sLCDTiming.usHSyncWidth); info->lcd_timing.vertical_sync_offset = le16_to_cpu(lvds->sLCDTiming.usVSyncOffset); info->lcd_timing.vertical_sync_width = le16_to_cpu(lvds->sLCDTiming.usVSyncWidth); info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder; info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder; info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff; info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity; info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity; info->lcd_timing.misc_info.VERTICAL_CUT_OFF = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff; info->lcd_timing.misc_info.H_REPLICATION_BY2 = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2; info->lcd_timing.misc_info.V_REPLICATION_BY2 = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2; info->lcd_timing.misc_info.COMPOSITE_SYNC = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync; info->lcd_timing.misc_info.INTERLACE = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace; info->lcd_timing.misc_info.DOUBLE_CLOCK = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock; info->ss_id = lvds->ucSS_Id; { uint8_t rr = le16_to_cpu(lvds->usSupportedRefreshRate); /* Get minimum supported refresh rate*/ if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr) info->supported_rr.REFRESH_RATE_30HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr) info->supported_rr.REFRESH_RATE_40HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr) info->supported_rr.REFRESH_RATE_48HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr) info->supported_rr.REFRESH_RATE_50HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr) info->supported_rr.REFRESH_RATE_60HZ = 1; } /*Drr panel support can be reported by VBIOS*/ if (LCDPANEL_CAP_DRR_SUPPORTED & lvds->ucLCDPanel_SpecialHandlingCap) info->drr_enabled = 1; if (ATOM_PANEL_MISC_DUAL & lvds->ucLVDS_Misc) info->lcd_timing.misc_info.DOUBLE_CLOCK = true; if (ATOM_PANEL_MISC_888RGB & lvds->ucLVDS_Misc) info->lcd_timing.misc_info.RGB888 = true; info->lcd_timing.misc_info.GREY_LEVEL = (uint32_t) (ATOM_PANEL_MISC_GREY_LEVEL & lvds->ucLVDS_Misc) >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT; if (ATOM_PANEL_MISC_SPATIAL & lvds->ucLVDS_Misc) info->lcd_timing.misc_info.SPATIAL = true; if (ATOM_PANEL_MISC_TEMPORAL & lvds->ucLVDS_Misc) info->lcd_timing.misc_info.TEMPORAL = true; if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc) info->lcd_timing.misc_info.API_ENABLED = true; return BP_RESULT_OK; } static enum bp_result get_embedded_panel_info_v1_3( struct bios_parser *bp, struct embedded_panel_info *info) { ATOM_LCD_INFO_V13 *lvds; if (!info) return BP_RESULT_BADINPUT; if (!DATA_TABLES(LCD_Info)) return BP_RESULT_UNSUPPORTED; lvds = GET_IMAGE(ATOM_LCD_INFO_V13, DATA_TABLES(LCD_Info)); if (!lvds) return BP_RESULT_BADBIOSTABLE; if (!((1 == lvds->sHeader.ucTableFormatRevision) && (3 <= lvds->sHeader.ucTableContentRevision))) return BP_RESULT_UNSUPPORTED; memset(info, 0, sizeof(struct embedded_panel_info)); /* We need to convert from 10KHz units into KHz units */ info->lcd_timing.pixel_clk = le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10; /* usHActive does not include borders, according to VBIOS team */ info->lcd_timing.horizontal_addressable = le16_to_cpu(lvds->sLCDTiming.usHActive); /* usHBlanking_Time includes borders, so we should really be subtracting * borders duing this translation, but LVDS generally*/ /* doesn't have borders, so we should be okay leaving this as is for * now. May need to revisit if we ever have LVDS with borders*/ info->lcd_timing.horizontal_blanking_time = le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time); /* usVActive does not include borders, according to VBIOS team*/ info->lcd_timing.vertical_addressable = le16_to_cpu(lvds->sLCDTiming.usVActive); /* usVBlanking_Time includes borders, so we should really be subtracting * borders duing this translation, but LVDS generally*/ /* doesn't have borders, so we should be okay leaving this as is for * now. May need to revisit if we ever have LVDS with borders*/ info->lcd_timing.vertical_blanking_time = le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time); info->lcd_timing.horizontal_sync_offset = le16_to_cpu(lvds->sLCDTiming.usHSyncOffset); info->lcd_timing.horizontal_sync_width = le16_to_cpu(lvds->sLCDTiming.usHSyncWidth); info->lcd_timing.vertical_sync_offset = le16_to_cpu(lvds->sLCDTiming.usVSyncOffset); info->lcd_timing.vertical_sync_width = le16_to_cpu(lvds->sLCDTiming.usVSyncWidth); info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder; info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder; info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff; info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity; info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity; info->lcd_timing.misc_info.VERTICAL_CUT_OFF = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff; info->lcd_timing.misc_info.H_REPLICATION_BY2 = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2; info->lcd_timing.misc_info.V_REPLICATION_BY2 = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2; info->lcd_timing.misc_info.COMPOSITE_SYNC = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync; info->lcd_timing.misc_info.INTERLACE = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace; info->lcd_timing.misc_info.DOUBLE_CLOCK = lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock; info->ss_id = lvds->ucSS_Id; /* Drr panel support can be reported by VBIOS*/ if (LCDPANEL_CAP_V13_DRR_SUPPORTED & lvds->ucLCDPanel_SpecialHandlingCap) info->drr_enabled = 1; /* Get supported refresh rate*/ if (info->drr_enabled == 1) { uint8_t min_rr = lvds->sRefreshRateSupport.ucMinRefreshRateForDRR; uint8_t rr = lvds->sRefreshRateSupport.ucSupportedRefreshRate; if (min_rr != 0) { if (SUPPORTED_LCD_REFRESHRATE_30Hz & min_rr) info->supported_rr.REFRESH_RATE_30HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_40Hz & min_rr) info->supported_rr.REFRESH_RATE_40HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_48Hz & min_rr) info->supported_rr.REFRESH_RATE_48HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_50Hz & min_rr) info->supported_rr.REFRESH_RATE_50HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_60Hz & min_rr) info->supported_rr.REFRESH_RATE_60HZ = 1; } else { if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr) info->supported_rr.REFRESH_RATE_30HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr) info->supported_rr.REFRESH_RATE_40HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr) info->supported_rr.REFRESH_RATE_48HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr) info->supported_rr.REFRESH_RATE_50HZ = 1; else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr) info->supported_rr.REFRESH_RATE_60HZ = 1; } } if (ATOM_PANEL_MISC_V13_DUAL & lvds->ucLCD_Misc) info->lcd_timing.misc_info.DOUBLE_CLOCK = true; if (ATOM_PANEL_MISC_V13_8BIT_PER_COLOR & lvds->ucLCD_Misc) info->lcd_timing.misc_info.RGB888 = true; info->lcd_timing.misc_info.GREY_LEVEL = (uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL & lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT; return BP_RESULT_OK; } /** * bios_parser_get_encoder_cap_info - get encoder capability * information of input object id * * @dcb: pointer to the DC BIOS * @object_id: object id * @info: encoder cap information structure * * return: Bios parser result code */ static enum bp_result bios_parser_get_encoder_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, struct bp_encoder_cap_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); ATOM_OBJECT *object; ATOM_ENCODER_CAP_RECORD_V2 *record = NULL; if (!info) return BP_RESULT_BADINPUT; object = get_bios_object(bp, object_id); if (!object) return BP_RESULT_BADINPUT; record = get_encoder_cap_record(bp, object); if (!record) return BP_RESULT_NORECORD; info->DP_HBR2_EN = record->usHBR2En; info->DP_HBR3_EN = record->usHBR3En; info->HDMI_6GB_EN = record->usHDMI6GEn; return BP_RESULT_OK; } /** * get_encoder_cap_record - Get encoder cap record for the object * * @bp: pointer to the BIOS parser * @object: ATOM object * return: atom encoder cap record * note: search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record */ static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record( struct bios_parser *bp, ATOM_OBJECT *object) { ATOM_COMMON_RECORD_HEADER *header; uint32_t offset; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object */ return NULL; } offset = le16_to_cpu(object->usRecordOffset) + bp->object_info_tbl_offset; for (;;) { header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset); if (!header) return NULL; offset += header->ucRecordSize; if (LAST_RECORD_TYPE == header->ucRecordType || !header->ucRecordSize) break; if (ATOM_ENCODER_CAP_RECORD_TYPE != header->ucRecordType) continue; if (sizeof(ATOM_ENCODER_CAP_RECORD_V2) <= header->ucRecordSize) return (ATOM_ENCODER_CAP_RECORD_V2 *)header; } return NULL; } static uint32_t get_ss_entry_number( struct bios_parser *bp, uint32_t id); static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1( struct bios_parser *bp, uint32_t id); static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1( struct bios_parser *bp, uint32_t id); static uint32_t get_ss_entry_number_from_ss_info_tbl( struct bios_parser *bp, uint32_t id); /** * bios_parser_get_ss_entry_number * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from * the VBIOS that match the SSid (to be converted from signal) * * @dcb: pointer to the DC BIOS * @signal: ASSignalType to be converted to SSid * return: number of SS Entry that match the signal */ static uint32_t bios_parser_get_ss_entry_number( struct dc_bios *dcb, enum as_signal_type signal) { struct bios_parser *bp = BP_FROM_DCB(dcb); uint32_t ss_id = 0; ATOM_COMMON_TABLE_HEADER *header; struct atom_data_revision revision; ss_id = signal_to_ss_id(signal); if (!DATA_TABLES(ASIC_InternalSS_Info)) return get_ss_entry_number_from_ss_info_tbl(bp, ss_id); header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(ASIC_InternalSS_Info)); get_atom_data_table_revision(header, &revision); switch (revision.major) { case 2: switch (revision.minor) { case 1: return get_ss_entry_number(bp, ss_id); default: break; } break; case 3: switch (revision.minor) { case 1: return get_ss_entry_number_from_internal_ss_info_tbl_V3_1( bp, ss_id); default: break; } break; default: break; } return 0; } /** * get_ss_entry_number_from_ss_info_tbl * Get Number of spread spectrum entry from the SS_Info table from the VBIOS. * * @bp: pointer to the BIOS parser * @id: spread spectrum id * return: number of SS Entry that match the id * note: There can only be one entry for each id for SS_Info Table */ static uint32_t get_ss_entry_number_from_ss_info_tbl( struct bios_parser *bp, uint32_t id) { ATOM_SPREAD_SPECTRUM_INFO *tbl; ATOM_COMMON_TABLE_HEADER *header; uint32_t table_size; uint32_t i; uint32_t number = 0; uint32_t id_local = SS_ID_UNKNOWN; struct atom_data_revision revision; /* SS_Info table exist */ if (!DATA_TABLES(SS_Info)) return number; header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(SS_Info)); get_atom_data_table_revision(header, &revision); tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info)); if (1 != revision.major || 2 > revision.minor) return number; /* have to convert from Internal_SS format to SS_Info format */ switch (id) { case ASIC_INTERNAL_SS_ON_DP: id_local = SS_ID_DP1; break; case ASIC_INTERNAL_SS_ON_LVDS: { struct embedded_panel_info panel_info; if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info) == BP_RESULT_OK) id_local = panel_info.ss_id; break; } default: break; } if (id_local == SS_ID_UNKNOWN) return number; table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); for (i = 0; i < table_size; i++) if (id_local == (uint32_t)tbl->asSS_Info[i].ucSS_Id) { number = 1; break; } return number; } /** * get_ss_entry_number * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or * SS_Info table from the VBIOS * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or * SS_Info. * * @bp: pointer to the BIOS parser * @id: spread sprectrum info index * return: Bios parser result code */ static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id) { if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS) return get_ss_entry_number_from_ss_info_tbl(bp, id); return get_ss_entry_number_from_internal_ss_info_tbl_v2_1(bp, id); } /** * get_ss_entry_number_from_internal_ss_info_tbl_v2_1 * Get NUmber of spread sprectrum entry from the ASIC_InternalSS_Info table * Ver 2.1 from the VBIOS * There will not be multiple entry for Ver 2.1 * * @bp: pointer to the BIOS parser * @id: spread sprectrum info index * return: number of SS Entry that match the id */ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1( struct bios_parser *bp, uint32_t id) { ATOM_ASIC_INTERNAL_SS_INFO_V2 *header_include; ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl; uint32_t size; uint32_t i; if (!DATA_TABLES(ASIC_InternalSS_Info)) return 0; header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V2 *) bios_get_image( &bp->base, DATA_TABLES(ASIC_InternalSS_Info), struct_size(header_include, asSpreadSpectrum, 1))); size = (le16_to_cpu(header_include->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *) &header_include->asSpreadSpectrum[0]; for (i = 0; i < size; i++) if (tbl[i].ucClockIndication == (uint8_t)id) return 1; return 0; } /** * get_ss_entry_number_from_internal_ss_info_tbl_V3_1 * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of * the VBIOS that matches id * * @bp: pointer to the BIOS parser * @id: spread sprectrum id * return: number of SS Entry that match the id */ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1( struct bios_parser *bp, uint32_t id) { uint32_t number = 0; ATOM_ASIC_INTERNAL_SS_INFO_V3 *header_include; ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl; uint32_t size; uint32_t i; if (!DATA_TABLES(ASIC_InternalSS_Info)) return number; header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base, DATA_TABLES(ASIC_InternalSS_Info), struct_size(header_include, asSpreadSpectrum, 1))); size = (le16_to_cpu(header_include->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *) &header_include->asSpreadSpectrum[0]; for (i = 0; i < size; i++) if (tbl[i].ucClockIndication == (uint8_t)id) number++; return number; } /** * bios_parser_get_gpio_pin_info * Get GpioPin information of input gpio id * * @dcb: pointer to the DC BIOS * @gpio_id: GPIO ID * @info: GpioPin information structure * return: Bios parser result code * note: * to get the GPIO PIN INFO, we need: * 1. get the GPIO_ID from other object table, see GetHPDInfo() * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA * offset/mask */ static enum bp_result bios_parser_get_gpio_pin_info( struct dc_bios *dcb, uint32_t gpio_id, struct gpio_pin_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); ATOM_GPIO_PIN_LUT *header; uint32_t count = 0; uint32_t i = 0; if (!DATA_TABLES(GPIO_Pin_LUT)) return BP_RESULT_BADBIOSTABLE; header = ((ATOM_GPIO_PIN_LUT *) bios_get_image(&bp->base, DATA_TABLES(GPIO_Pin_LUT), struct_size(header, asGPIO_Pin, 1))); if (!header) return BP_RESULT_BADBIOSTABLE; if (sizeof(ATOM_COMMON_TABLE_HEADER) + struct_size(header, asGPIO_Pin, 1) > le16_to_cpu(header->sHeader.usStructureSize)) return BP_RESULT_BADBIOSTABLE; if (1 != header->sHeader.ucTableContentRevision) return BP_RESULT_UNSUPPORTED; count = (le16_to_cpu(header->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); for (i = 0; i < count; ++i) { if (header->asGPIO_Pin[i].ucGPIO_ID != gpio_id) continue; info->offset = (uint32_t) le16_to_cpu(header->asGPIO_Pin[i].usGpioPin_AIndex); info->offset_y = info->offset + 2; info->offset_en = info->offset + 1; info->offset_mask = info->offset - 1; info->mask = (uint32_t) (1 << header->asGPIO_Pin[i].ucGpioPinBitShift); info->mask_y = info->mask + 2; info->mask_en = info->mask + 1; info->mask_mask = info->mask - 1; return BP_RESULT_OK; } return BP_RESULT_NORECORD; } static enum bp_result get_gpio_i2c_info(struct bios_parser *bp, ATOM_I2C_RECORD *record, struct graphics_object_i2c_info *info) { ATOM_GPIO_I2C_INFO *header; uint32_t count = 0; if (!info) return BP_RESULT_BADINPUT; /* get the GPIO_I2C info */ if (!DATA_TABLES(GPIO_I2C_Info)) return BP_RESULT_BADBIOSTABLE; header = GET_IMAGE(ATOM_GPIO_I2C_INFO, DATA_TABLES(GPIO_I2C_Info)); if (!header) return BP_RESULT_BADBIOSTABLE; if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_I2C_ASSIGMENT) > le16_to_cpu(header->sHeader.usStructureSize)) return BP_RESULT_BADBIOSTABLE; if (1 != header->sHeader.ucTableContentRevision) return BP_RESULT_UNSUPPORTED; /* get data count */ count = (le16_to_cpu(header->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); if (count < record->sucI2cId.bfI2C_LineMux) return BP_RESULT_BADBIOSTABLE; /* get the GPIO_I2C_INFO */ info->i2c_hw_assist = record->sucI2cId.bfHW_Capable; info->i2c_line = record->sucI2cId.bfI2C_LineMux; info->i2c_engine_id = record->sucI2cId.bfHW_EngineID; info->i2c_slave_address = record->ucI2CAddr; info->gpio_info.clk_mask_register_index = le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkMaskRegisterIndex); info->gpio_info.clk_en_register_index = le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkEnRegisterIndex); info->gpio_info.clk_y_register_index = le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkY_RegisterIndex); info->gpio_info.clk_a_register_index = le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkA_RegisterIndex); info->gpio_info.data_mask_register_index = le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataMaskRegisterIndex); info->gpio_info.data_en_register_index = le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataEnRegisterIndex); info->gpio_info.data_y_register_index = le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataY_RegisterIndex); info->gpio_info.data_a_register_index = le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataA_RegisterIndex); info->gpio_info.clk_mask_shift = header->asGPIO_Info[info->i2c_line].ucClkMaskShift; info->gpio_info.clk_en_shift = header->asGPIO_Info[info->i2c_line].ucClkEnShift; info->gpio_info.clk_y_shift = header->asGPIO_Info[info->i2c_line].ucClkY_Shift; info->gpio_info.clk_a_shift = header->asGPIO_Info[info->i2c_line].ucClkA_Shift; info->gpio_info.data_mask_shift = header->asGPIO_Info[info->i2c_line].ucDataMaskShift; info->gpio_info.data_en_shift = header->asGPIO_Info[info->i2c_line].ucDataEnShift; info->gpio_info.data_y_shift = header->asGPIO_Info[info->i2c_line].ucDataY_Shift; info->gpio_info.data_a_shift = header->asGPIO_Info[info->i2c_line].ucDataA_Shift; return BP_RESULT_OK; } static bool dal_graphics_object_id_is_valid(struct graphics_object_id id) { bool rc = true; switch (id.type) { case OBJECT_TYPE_UNKNOWN: rc = false; break; case OBJECT_TYPE_GPU: case OBJECT_TYPE_ENGINE: /* do NOT check for id.id == 0 */ if (id.enum_id == ENUM_ID_UNKNOWN) rc = false; break; default: if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN) rc = false; break; } return rc; } static bool dal_graphics_object_id_is_equal( struct graphics_object_id id1, struct graphics_object_id id2) { if (false == dal_graphics_object_id_is_valid(id1)) { dm_output_to_console( "%s: Warning: comparing invalid object 'id1'!\n", __func__); return false; } if (false == dal_graphics_object_id_is_valid(id2)) { dm_output_to_console( "%s: Warning: comparing invalid object 'id2'!\n", __func__); return false; } if (id1.id == id2.id && id1.enum_id == id2.enum_id && id1.type == id2.type) return true; return false; } static ATOM_OBJECT *get_bios_object(struct bios_parser *bp, struct graphics_object_id id) { uint32_t offset; ATOM_OBJECT_TABLE *tbl; uint32_t i; switch (id.type) { case OBJECT_TYPE_ENCODER: offset = le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset); break; case OBJECT_TYPE_CONNECTOR: offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset); break; case OBJECT_TYPE_ROUTER: offset = le16_to_cpu(bp->object_info_tbl.v1_1->usRouterObjectTableOffset); break; case OBJECT_TYPE_GENERIC: if (bp->object_info_tbl.revision.minor < 3) return NULL; offset = le16_to_cpu(bp->object_info_tbl.v1_3->usMiscObjectTableOffset); break; default: return NULL; } offset += bp->object_info_tbl_offset; tbl = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, offset, struct_size(tbl, asObjects, 1))); if (!tbl) return NULL; for (i = 0; i < tbl->ucNumberOfObjects; i++) if (dal_graphics_object_id_is_equal(id, object_id_from_bios_object_id( le16_to_cpu(tbl->asObjects[i].usObjectID)))) return &tbl->asObjects[i]; return NULL; } static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object, uint16_t **id_list) { uint32_t offset; uint8_t *number; if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object id */ return 0; } offset = le16_to_cpu(object->usSrcDstTableOffset) + bp->object_info_tbl_offset; number = GET_IMAGE(uint8_t, offset); if (!number) return 0; offset += sizeof(uint8_t); *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t)); if (!*id_list) return 0; return *number; } static struct device_id device_type_from_device_id(uint16_t device_id) { struct device_id result_device_id = {0}; switch (device_id) { case ATOM_DEVICE_LCD1_SUPPORT: result_device_id.device_type = DEVICE_TYPE_LCD; result_device_id.enum_id = 1; break; case ATOM_DEVICE_LCD2_SUPPORT: result_device_id.device_type = DEVICE_TYPE_LCD; result_device_id.enum_id = 2; break; case ATOM_DEVICE_CRT1_SUPPORT: result_device_id.device_type = DEVICE_TYPE_CRT; result_device_id.enum_id = 1; break; case ATOM_DEVICE_CRT2_SUPPORT: result_device_id.device_type = DEVICE_TYPE_CRT; result_device_id.enum_id = 2; break; case ATOM_DEVICE_DFP1_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 1; break; case ATOM_DEVICE_DFP2_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 2; break; case ATOM_DEVICE_DFP3_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 3; break; case ATOM_DEVICE_DFP4_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 4; break; case ATOM_DEVICE_DFP5_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 5; break; case ATOM_DEVICE_DFP6_SUPPORT: result_device_id.device_type = DEVICE_TYPE_DFP; result_device_id.enum_id = 6; break; default: BREAK_TO_DEBUGGER(); /* Invalid device Id */ result_device_id.device_type = DEVICE_TYPE_UNKNOWN; result_device_id.enum_id = 0; } return result_device_id; } static void get_atom_data_table_revision( ATOM_COMMON_TABLE_HEADER *atom_data_tbl, struct atom_data_revision *tbl_revision) { if (!tbl_revision) return; /* initialize the revision to 0 which is invalid revision */ tbl_revision->major = 0; tbl_revision->minor = 0; if (!atom_data_tbl) return; tbl_revision->major = (uint32_t) GET_DATA_TABLE_MAJOR_REVISION(atom_data_tbl); tbl_revision->minor = (uint32_t) GET_DATA_TABLE_MINOR_REVISION(atom_data_tbl); } static uint32_t signal_to_ss_id(enum as_signal_type signal) { uint32_t clk_id_ss = 0; switch (signal) { case AS_SIGNAL_TYPE_DVI: clk_id_ss = ASIC_INTERNAL_SS_ON_TMDS; break; case AS_SIGNAL_TYPE_HDMI: clk_id_ss = ASIC_INTERNAL_SS_ON_HDMI; break; case AS_SIGNAL_TYPE_LVDS: clk_id_ss = ASIC_INTERNAL_SS_ON_LVDS; break; case AS_SIGNAL_TYPE_DISPLAY_PORT: clk_id_ss = ASIC_INTERNAL_SS_ON_DP; break; case AS_SIGNAL_TYPE_GPU_PLL: clk_id_ss = ASIC_INTERNAL_GPUPLL_SS; break; default: break; } return clk_id_ss; } static uint32_t get_support_mask_for_device_id(struct device_id device_id) { enum dal_device_type device_type = device_id.device_type; uint32_t enum_id = device_id.enum_id; switch (device_type) { case DEVICE_TYPE_LCD: switch (enum_id) { case 1: return ATOM_DEVICE_LCD1_SUPPORT; case 2: return ATOM_DEVICE_LCD2_SUPPORT; default: break; } break; case DEVICE_TYPE_CRT: switch (enum_id) { case 1: return ATOM_DEVICE_CRT1_SUPPORT; case 2: return ATOM_DEVICE_CRT2_SUPPORT; default: break; } break; case DEVICE_TYPE_DFP: switch (enum_id) { case 1: return ATOM_DEVICE_DFP1_SUPPORT; case 2: return ATOM_DEVICE_DFP2_SUPPORT; case 3: return ATOM_DEVICE_DFP3_SUPPORT; case 4: return ATOM_DEVICE_DFP4_SUPPORT; case 5: return ATOM_DEVICE_DFP5_SUPPORT; case 6: return ATOM_DEVICE_DFP6_SUPPORT; default: break; } break; case DEVICE_TYPE_CV: switch (enum_id) { case 1: return ATOM_DEVICE_CV_SUPPORT; default: break; } break; case DEVICE_TYPE_TV: switch (enum_id) { case 1: return ATOM_DEVICE_TV1_SUPPORT; default: break; } break; default: break; } /* Unidentified device ID, return empty support mask. */ return 0; } /** * bios_parser_set_scratch_critical_state - update critical state * bit in VBIOS scratch register * @dcb: pointer to the DC BIOS * @state: set or reset state */ static void bios_parser_set_scratch_critical_state( struct dc_bios *dcb, bool state) { bios_set_scratch_critical_state(dcb, state); } /* * get_integrated_info_v8 * * @brief * Get V8 integrated BIOS information * * @param * bios_parser *bp - [in]BIOS parser handler to get master data table * integrated_info *info - [out] store and output integrated info * * return: * enum bp_result - BP_RESULT_OK if information is available, * BP_RESULT_BADBIOSTABLE otherwise. */ static enum bp_result get_integrated_info_v8( struct bios_parser *bp, struct integrated_info *info) { ATOM_INTEGRATED_SYSTEM_INFO_V1_8 *info_v8; uint32_t i; info_v8 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_8, bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo); if (info_v8 == NULL) return BP_RESULT_BADBIOSTABLE; info->boot_up_engine_clock = le32_to_cpu(info_v8->ulBootUpEngineClock) * 10; info->dentist_vco_freq = le32_to_cpu(info_v8->ulDentistVCOFreq) * 10; info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10; for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { /* Convert [10KHz] into [KHz] */ info->disp_clk_voltage[i].max_supported_clk = le32_to_cpu(info_v8->sDISPCLK_Voltage[i]. ulMaximumSupportedCLK) * 10; info->disp_clk_voltage[i].voltage_index = le32_to_cpu(info_v8->sDISPCLK_Voltage[i].ulVoltageIndex); } info->boot_up_req_display_vector = le32_to_cpu(info_v8->ulBootUpReqDisplayVector); info->gpu_cap_info = le32_to_cpu(info_v8->ulGPUCapInfo); /* * system_config: Bit[0] = 0 : PCIE power gating disabled * = 1 : PCIE power gating enabled * Bit[1] = 0 : DDR-PLL shut down disabled * = 1 : DDR-PLL shut down enabled * Bit[2] = 0 : DDR-PLL power down disabled * = 1 : DDR-PLL power down enabled */ info->system_config = le32_to_cpu(info_v8->ulSystemConfig); info->cpu_cap_info = le32_to_cpu(info_v8->ulCPUCapInfo); info->boot_up_nb_voltage = le16_to_cpu(info_v8->usBootUpNBVoltage); info->ext_disp_conn_info_offset = le16_to_cpu(info_v8->usExtDispConnInfoOffset); info->memory_type = info_v8->ucMemoryType; info->ma_channel_number = info_v8->ucUMAChannelNumber; info->gmc_restore_reset_time = le32_to_cpu(info_v8->ulGMCRestoreResetTime); info->minimum_n_clk = le32_to_cpu(info_v8->ulNbpStateNClkFreq[0]); for (i = 1; i < 4; ++i) info->minimum_n_clk = info->minimum_n_clk < le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]) ? info->minimum_n_clk : le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]); info->idle_n_clk = le32_to_cpu(info_v8->ulIdleNClk); info->ddr_dll_power_up_time = le32_to_cpu(info_v8->ulDDR_DLL_PowerUpTime); info->ddr_pll_power_up_time = le32_to_cpu(info_v8->ulDDR_PLL_PowerUpTime); info->pcie_clk_ss_type = le16_to_cpu(info_v8->usPCIEClkSSType); info->lvds_ss_percentage = le16_to_cpu(info_v8->usLvdsSSPercentage); info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v8->usLvdsSSpreadRateIn10Hz); info->hdmi_ss_percentage = le16_to_cpu(info_v8->usHDMISSPercentage); info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v8->usHDMISSpreadRateIn10Hz); info->dvi_ss_percentage = le16_to_cpu(info_v8->usDVISSPercentage); info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v8->usDVISSpreadRateIn10Hz); info->max_lvds_pclk_freq_in_single_link = le16_to_cpu(info_v8->usMaxLVDSPclkFreqInSingleLink); info->lvds_misc = info_v8->ucLvdsMisc; info->lvds_pwr_on_seq_dig_on_to_de_in_4ms = info_v8->ucLVDSPwrOnSeqDIGONtoDE_in4Ms; info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms = info_v8->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms = info_v8->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; info->lvds_pwr_off_seq_vary_bl_to_de_in4ms = info_v8->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; info->lvds_pwr_off_seq_de_to_dig_on_in4ms = info_v8->ucLVDSPwrOffSeqDEtoDIGON_in4Ms; info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms = info_v8->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; info->lvds_off_to_on_delay_in_4ms = info_v8->ucLVDSOffToOnDelay_in4Ms; info->lvds_bit_depth_control_val = le32_to_cpu(info_v8->ulLCDBitDepthControlVal); for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) { /* Convert [10KHz] into [KHz] */ info->avail_s_clk[i].supported_s_clk = le32_to_cpu(info_v8->sAvail_SCLK[i].ulSupportedSCLK) * 10; info->avail_s_clk[i].voltage_index = le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageIndex); info->avail_s_clk[i].voltage_id = le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageID); } for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) { info->ext_disp_conn_info.gu_id[i] = info_v8->sExtDispConnInfo.ucGuid[i]; } for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) { info->ext_disp_conn_info.path[i].device_connector_id = object_id_from_bios_object_id( le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceConnector)); info->ext_disp_conn_info.path[i].ext_encoder_obj_id = object_id_from_bios_object_id( le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usExtEncoderObjId)); info->ext_disp_conn_info.path[i].device_tag = le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceTag); info->ext_disp_conn_info.path[i].device_acpi_enum = le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceACPIEnum); info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index = info_v8->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex; info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index = info_v8->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex; info->ext_disp_conn_info.path[i].channel_mapping.raw = info_v8->sExtDispConnInfo.sPath[i].ucChannelMapping; } info->ext_disp_conn_info.checksum = info_v8->sExtDispConnInfo.ucChecksum; return BP_RESULT_OK; } /* * get_integrated_info_v8 * * @brief * Get V8 integrated BIOS information * * @param * bios_parser *bp - [in]BIOS parser handler to get master data table * integrated_info *info - [out] store and output integrated info * * return: * enum bp_result - BP_RESULT_OK if information is available, * BP_RESULT_BADBIOSTABLE otherwise. */ static enum bp_result get_integrated_info_v9( struct bios_parser *bp, struct integrated_info *info) { ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info_v9; uint32_t i; info_v9 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_9, bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo); if (!info_v9) return BP_RESULT_BADBIOSTABLE; info->boot_up_engine_clock = le32_to_cpu(info_v9->ulBootUpEngineClock) * 10; info->dentist_vco_freq = le32_to_cpu(info_v9->ulDentistVCOFreq) * 10; info->boot_up_uma_clock = le32_to_cpu(info_v9->ulBootUpUMAClock) * 10; for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { /* Convert [10KHz] into [KHz] */ info->disp_clk_voltage[i].max_supported_clk = le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulMaximumSupportedCLK) * 10; info->disp_clk_voltage[i].voltage_index = le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulVoltageIndex); } info->boot_up_req_display_vector = le32_to_cpu(info_v9->ulBootUpReqDisplayVector); info->gpu_cap_info = le32_to_cpu(info_v9->ulGPUCapInfo); /* * system_config: Bit[0] = 0 : PCIE power gating disabled * = 1 : PCIE power gating enabled * Bit[1] = 0 : DDR-PLL shut down disabled * = 1 : DDR-PLL shut down enabled * Bit[2] = 0 : DDR-PLL power down disabled * = 1 : DDR-PLL power down enabled */ info->system_config = le32_to_cpu(info_v9->ulSystemConfig); info->cpu_cap_info = le32_to_cpu(info_v9->ulCPUCapInfo); info->boot_up_nb_voltage = le16_to_cpu(info_v9->usBootUpNBVoltage); info->ext_disp_conn_info_offset = le16_to_cpu(info_v9->usExtDispConnInfoOffset); info->memory_type = info_v9->ucMemoryType; info->ma_channel_number = info_v9->ucUMAChannelNumber; info->gmc_restore_reset_time = le32_to_cpu(info_v9->ulGMCRestoreResetTime); info->minimum_n_clk = le32_to_cpu(info_v9->ulNbpStateNClkFreq[0]); for (i = 1; i < 4; ++i) info->minimum_n_clk = info->minimum_n_clk < le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]) ? info->minimum_n_clk : le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]); info->idle_n_clk = le32_to_cpu(info_v9->ulIdleNClk); info->ddr_dll_power_up_time = le32_to_cpu(info_v9->ulDDR_DLL_PowerUpTime); info->ddr_pll_power_up_time = le32_to_cpu(info_v9->ulDDR_PLL_PowerUpTime); info->pcie_clk_ss_type = le16_to_cpu(info_v9->usPCIEClkSSType); info->lvds_ss_percentage = le16_to_cpu(info_v9->usLvdsSSPercentage); info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v9->usLvdsSSpreadRateIn10Hz); info->hdmi_ss_percentage = le16_to_cpu(info_v9->usHDMISSPercentage); info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v9->usHDMISSpreadRateIn10Hz); info->dvi_ss_percentage = le16_to_cpu(info_v9->usDVISSPercentage); info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v9->usDVISSpreadRateIn10Hz); info->max_lvds_pclk_freq_in_single_link = le16_to_cpu(info_v9->usMaxLVDSPclkFreqInSingleLink); info->lvds_misc = info_v9->ucLvdsMisc; info->lvds_pwr_on_seq_dig_on_to_de_in_4ms = info_v9->ucLVDSPwrOnSeqDIGONtoDE_in4Ms; info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms = info_v9->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms = info_v9->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; info->lvds_pwr_off_seq_vary_bl_to_de_in4ms = info_v9->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; info->lvds_pwr_off_seq_de_to_dig_on_in4ms = info_v9->ucLVDSPwrOffSeqDEtoDIGON_in4Ms; info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms = info_v9->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; info->lvds_off_to_on_delay_in_4ms = info_v9->ucLVDSOffToOnDelay_in4Ms; info->lvds_bit_depth_control_val = le32_to_cpu(info_v9->ulLCDBitDepthControlVal); for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) { /* Convert [10KHz] into [KHz] */ info->avail_s_clk[i].supported_s_clk = le32_to_cpu(info_v9->sAvail_SCLK[i].ulSupportedSCLK) * 10; info->avail_s_clk[i].voltage_index = le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageIndex); info->avail_s_clk[i].voltage_id = le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageID); } for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) { info->ext_disp_conn_info.gu_id[i] = info_v9->sExtDispConnInfo.ucGuid[i]; } for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) { info->ext_disp_conn_info.path[i].device_connector_id = object_id_from_bios_object_id( le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceConnector)); info->ext_disp_conn_info.path[i].ext_encoder_obj_id = object_id_from_bios_object_id( le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usExtEncoderObjId)); info->ext_disp_conn_info.path[i].device_tag = le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceTag); info->ext_disp_conn_info.path[i].device_acpi_enum = le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceACPIEnum); info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index = info_v9->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex; info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index = info_v9->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex; info->ext_disp_conn_info.path[i].channel_mapping.raw = info_v9->sExtDispConnInfo.sPath[i].ucChannelMapping; } info->ext_disp_conn_info.checksum = info_v9->sExtDispConnInfo.ucChecksum; return BP_RESULT_OK; } /* * construct_integrated_info * * @brief * Get integrated BIOS information based on table revision * * @param * bios_parser *bp - [in]BIOS parser handler to get master data table * integrated_info *info - [out] store and output integrated info * * return: * enum bp_result - BP_RESULT_OK if information is available, * BP_RESULT_BADBIOSTABLE otherwise. */ static enum bp_result construct_integrated_info( struct bios_parser *bp, struct integrated_info *info) { enum bp_result result = BP_RESULT_BADBIOSTABLE; ATOM_COMMON_TABLE_HEADER *header; struct atom_data_revision revision; if (bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo) { header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo); get_atom_data_table_revision(header, &revision); /* Don't need to check major revision as they are all 1 */ switch (revision.minor) { case 8: result = get_integrated_info_v8(bp, info); break; case 9: result = get_integrated_info_v9(bp, info); break; default: return result; } } /* Sort voltage table from low to high*/ if (result == BP_RESULT_OK) { uint32_t i; uint32_t j; for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { for (j = i; j > 0; --j) { if ( info->disp_clk_voltage[j].max_supported_clk < info->disp_clk_voltage[j-1].max_supported_clk) { /* swap j and j - 1*/ swap(info->disp_clk_voltage[j - 1], info->disp_clk_voltage[j]); } } } } return result; } static struct integrated_info *bios_parser_create_integrated_info( struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); struct integrated_info *info; info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL); if (info == NULL) { ASSERT_CRITICAL(0); return NULL; } if (construct_integrated_info(bp, info) == BP_RESULT_OK) return info; kfree(info); return NULL; } static enum bp_result update_slot_layout_info(struct dc_bios *dcb, unsigned int i, struct slot_layout_info *slot_layout_info, unsigned int record_offset) { unsigned int j; struct bios_parser *bp; ATOM_BRACKET_LAYOUT_RECORD *record; ATOM_COMMON_RECORD_HEADER *record_header; enum bp_result result = BP_RESULT_NORECORD; bp = BP_FROM_DCB(dcb); record = NULL; record_header = NULL; for (;;) { record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset); if (record_header == NULL) { result = BP_RESULT_BADBIOSTABLE; break; } /* the end of the list */ if (record_header->ucRecordType == 0xff || record_header->ucRecordSize == 0) { break; } if (record_header->ucRecordType == ATOM_BRACKET_LAYOUT_RECORD_TYPE && struct_size(record, asConnInfo, 1) <= record_header->ucRecordSize) { record = (ATOM_BRACKET_LAYOUT_RECORD *) (record_header); result = BP_RESULT_OK; break; } record_offset += record_header->ucRecordSize; } /* return if the record not found */ if (result != BP_RESULT_OK) return result; /* get slot sizes */ slot_layout_info->length = record->ucLength; slot_layout_info->width = record->ucWidth; /* get info for each connector in the slot */ slot_layout_info->num_of_connectors = record->ucConnNum; for (j = 0; j < slot_layout_info->num_of_connectors; ++j) { slot_layout_info->connectors[j].connector_type = (enum connector_layout_type) (record->asConnInfo[j].ucConnectorType); switch (record->asConnInfo[j].ucConnectorType) { case CONNECTOR_TYPE_DVI_D: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_DVI_D; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_DVI; break; case CONNECTOR_TYPE_HDMI: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_HDMI; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_HDMI; break; case CONNECTOR_TYPE_DISPLAY_PORT: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_DP; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_DP; break; case CONNECTOR_TYPE_MINI_DISPLAY_PORT: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_MINI_DP; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_MINI_DP; break; default: slot_layout_info->connectors[j].connector_type = CONNECTOR_LAYOUT_TYPE_UNKNOWN; slot_layout_info->connectors[j].length = CONNECTOR_SIZE_UNKNOWN; } slot_layout_info->connectors[j].position = record->asConnInfo[j].ucPosition; slot_layout_info->connectors[j].connector_id = object_id_from_bios_object_id( record->asConnInfo[j].usConnectorObjectId); } return result; } static enum bp_result get_bracket_layout_record(struct dc_bios *dcb, unsigned int bracket_layout_id, struct slot_layout_info *slot_layout_info) { unsigned int i; unsigned int record_offset; struct bios_parser *bp; enum bp_result result; ATOM_OBJECT *object; ATOM_OBJECT_TABLE *object_table; unsigned int genericTableOffset; bp = BP_FROM_DCB(dcb); object = NULL; if (slot_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n"); return BP_RESULT_BADINPUT; } genericTableOffset = bp->object_info_tbl_offset + bp->object_info_tbl.v1_3->usMiscObjectTableOffset; object_table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, genericTableOffset, struct_size(object_table, asObjects, 1))); if (!object_table) return BP_RESULT_FAILURE; result = BP_RESULT_NORECORD; for (i = 0; i < object_table->ucNumberOfObjects; ++i) { if (bracket_layout_id == object_table->asObjects[i].usObjectID) { object = &object_table->asObjects[i]; record_offset = object->usRecordOffset + bp->object_info_tbl_offset; result = update_slot_layout_info(dcb, i, slot_layout_info, record_offset); break; } } return result; } static enum bp_result bios_get_board_layout_info( struct dc_bios *dcb, struct board_layout_info *board_layout_info) { unsigned int i; enum bp_result record_result; const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1, GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2, 0, 0 }; if (board_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n"); return BP_RESULT_BADINPUT; } board_layout_info->num_of_slots = 0; for (i = 0; i < MAX_BOARD_SLOTS; ++i) { record_result = get_bracket_layout_record(dcb, slot_index_to_vbios_id[i], &board_layout_info->slots[i]); if (record_result == BP_RESULT_NORECORD && i > 0) break; /* no more slots present in bios */ else if (record_result != BP_RESULT_OK) return record_result; /* fail */ ++board_layout_info->num_of_slots; } /* all data is valid */ board_layout_info->is_number_of_slots_valid = 1; board_layout_info->is_slots_size_valid = 1; board_layout_info->is_connector_offsets_valid = 1; board_layout_info->is_connector_lengths_valid = 1; return BP_RESULT_OK; } /******************************************************************************/ static const struct dc_vbios_funcs vbios_funcs = { .get_connectors_number = bios_parser_get_connectors_number, .get_connector_id = bios_parser_get_connector_id, .get_src_obj = bios_parser_get_src_obj, .get_i2c_info = bios_parser_get_i2c_info, .get_hpd_info = bios_parser_get_hpd_info, .get_device_tag = bios_parser_get_device_tag, .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info, .get_ss_entry_number = bios_parser_get_ss_entry_number, .get_embedded_panel_info = bios_parser_get_embedded_panel_info, .get_gpio_pin_info = bios_parser_get_gpio_pin_info, .get_encoder_cap_info = bios_parser_get_encoder_cap_info, /* bios scratch register communication */ .is_accelerated_mode = bios_is_accelerated_mode, .set_scratch_critical_state = bios_parser_set_scratch_critical_state, .is_device_id_supported = bios_parser_is_device_id_supported, /* COMMANDS */ .encoder_control = bios_parser_encoder_control, .transmitter_control = bios_parser_transmitter_control, .enable_crtc = bios_parser_enable_crtc, .adjust_pixel_clock = bios_parser_adjust_pixel_clock, .set_pixel_clock = bios_parser_set_pixel_clock, .set_dce_clock = bios_parser_set_dce_clock, .enable_spread_spectrum_on_ppll = bios_parser_enable_spread_spectrum_on_ppll, .program_crtc_timing = bios_parser_program_crtc_timing, /* still use. should probably retire and program directly */ .program_display_engine_pll = bios_parser_program_display_engine_pll, .enable_disp_power_gating = bios_parser_enable_disp_power_gating, /* SW init and patch */ .bios_parser_destroy = bios_parser_destroy, .get_board_layout_info = bios_get_board_layout_info, .get_atom_dc_golden_table = NULL }; static bool bios_parser_construct( struct bios_parser *bp, struct bp_init_data *init, enum dce_version dce_version) { uint16_t *rom_header_offset = NULL; ATOM_ROM_HEADER *rom_header = NULL; ATOM_OBJECT_HEADER *object_info_tbl; struct atom_data_revision tbl_rev = {0}; if (!init) return false; if (!init->bios) return false; bp->base.funcs = &vbios_funcs; bp->base.bios = init->bios; bp->base.bios_size = bp->base.bios[BIOS_IMAGE_SIZE_OFFSET] * BIOS_IMAGE_SIZE_UNIT; bp->base.ctx = init->ctx; bp->base.bios_local_image = NULL; rom_header_offset = GET_IMAGE(uint16_t, OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER); if (!rom_header_offset) return false; rom_header = GET_IMAGE(ATOM_ROM_HEADER, *rom_header_offset); if (!rom_header) return false; get_atom_data_table_revision(&rom_header->sHeader, &tbl_rev); if (tbl_rev.major >= 2 && tbl_rev.minor >= 2) return false; bp->master_data_tbl = GET_IMAGE(ATOM_MASTER_DATA_TABLE, rom_header->usMasterDataTableOffset); if (!bp->master_data_tbl) return false; bp->object_info_tbl_offset = DATA_TABLES(Object_Header); if (!bp->object_info_tbl_offset) return false; object_info_tbl = GET_IMAGE(ATOM_OBJECT_HEADER, bp->object_info_tbl_offset); if (!object_info_tbl) return false; get_atom_data_table_revision(&object_info_tbl->sHeader, &bp->object_info_tbl.revision); if (bp->object_info_tbl.revision.major == 1 && bp->object_info_tbl.revision.minor >= 3) { ATOM_OBJECT_HEADER_V3 *tbl_v3; tbl_v3 = GET_IMAGE(ATOM_OBJECT_HEADER_V3, bp->object_info_tbl_offset); if (!tbl_v3) return false; bp->object_info_tbl.v1_3 = tbl_v3; } else if (bp->object_info_tbl.revision.major == 1 && bp->object_info_tbl.revision.minor >= 1) bp->object_info_tbl.v1_1 = object_info_tbl; else return false; dal_bios_parser_init_cmd_tbl(bp); dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version); bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base); bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK; return true; } /******************************************************************************/
linux-master
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "atom.h" #include "include/bios_parser_types.h" #include "command_table_helper.h" bool dal_bios_parser_init_cmd_tbl_helper( const struct command_table_helper **h, enum dce_version dce) { switch (dce) { #if defined(CONFIG_DRM_AMD_DC_SI) case DCE_VERSION_6_0: case DCE_VERSION_6_1: case DCE_VERSION_6_4: *h = dal_cmd_tbl_helper_dce60_get_table(); return true; #endif case DCE_VERSION_8_0: case DCE_VERSION_8_1: case DCE_VERSION_8_3: *h = dal_cmd_tbl_helper_dce80_get_table(); return true; case DCE_VERSION_10_0: *h = dal_cmd_tbl_helper_dce110_get_table(); return true; case DCE_VERSION_11_0: *h = dal_cmd_tbl_helper_dce110_get_table(); return true; case DCE_VERSION_11_2: case DCE_VERSION_11_22: *h = dal_cmd_tbl_helper_dce112_get_table(); return true; default: /* Unsupported DCE */ BREAK_TO_DEBUGGER(); return false; } } /* real implementations */ bool dal_cmd_table_helper_controller_id_to_atom( enum controller_id id, uint8_t *atom_id) { if (atom_id == NULL) { BREAK_TO_DEBUGGER(); return false; } switch (id) { case CONTROLLER_ID_D0: *atom_id = ATOM_CRTC1; return true; case CONTROLLER_ID_D1: *atom_id = ATOM_CRTC2; return true; case CONTROLLER_ID_D2: *atom_id = ATOM_CRTC3; return true; case CONTROLLER_ID_D3: *atom_id = ATOM_CRTC4; return true; case CONTROLLER_ID_D4: *atom_id = ATOM_CRTC5; return true; case CONTROLLER_ID_D5: *atom_id = ATOM_CRTC6; return true; case CONTROLLER_ID_UNDERLAY0: *atom_id = ATOM_UNDERLAY_PIPE0; return true; case CONTROLLER_ID_UNDEFINED: *atom_id = ATOM_CRTC_INVALID; return true; default: /* Wrong controller id */ BREAK_TO_DEBUGGER(); return false; } } /** * dal_cmd_table_helper_transmitter_bp_to_atom - Translate the Transmitter to the * corresponding ATOM BIOS value * @t: transmitter * returns: output digitalTransmitter * // =00: Digital Transmitter1 ( UNIPHY linkAB ) * // =01: Digital Transmitter2 ( UNIPHY linkCD ) * // =02: Digital Transmitter3 ( UNIPHY linkEF ) */ uint8_t dal_cmd_table_helper_transmitter_bp_to_atom( enum transmitter t) { switch (t) { case TRANSMITTER_UNIPHY_A: case TRANSMITTER_UNIPHY_B: case TRANSMITTER_TRAVIS_LCD: return 0; case TRANSMITTER_UNIPHY_C: case TRANSMITTER_UNIPHY_D: return 1; case TRANSMITTER_UNIPHY_E: case TRANSMITTER_UNIPHY_F: return 2; default: /* Invalid Transmitter Type! */ BREAK_TO_DEBUGGER(); return 0; } } uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom( enum signal_type s, bool enable_dp_audio) { switch (s) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: return ATOM_ENCODER_MODE_DVI; case SIGNAL_TYPE_HDMI_TYPE_A: return ATOM_ENCODER_MODE_HDMI; case SIGNAL_TYPE_LVDS: return ATOM_ENCODER_MODE_LVDS; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_VIRTUAL: if (enable_dp_audio) return ATOM_ENCODER_MODE_DP_AUDIO; else return ATOM_ENCODER_MODE_DP; case SIGNAL_TYPE_RGB: return ATOM_ENCODER_MODE_CRT; default: return ATOM_ENCODER_MODE_CRT; } } void dal_cmd_table_helper_assign_control_parameter( const struct command_table_helper *h, struct bp_encoder_control *control, DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param) { /* there are three transmitter blocks, each one has two links 4-lanes * each, A+B, C+D, E+F, Uniphy A, C and E are enumerated as link 0 in * each transmitter block B, D and F as link 1, third transmitter block * has non splitable links (UniphyE and UniphyF can not be configured * separately to drive two different streams) */ if ((control->transmitter == TRANSMITTER_UNIPHY_B) || (control->transmitter == TRANSMITTER_UNIPHY_D) || (control->transmitter == TRANSMITTER_UNIPHY_F)) { /* Bit2: Link Select * =0: PHY linkA/C/E * =1: PHY linkB/D/F */ ctrl_param->acConfig.ucLinkSel = 1; } /* Bit[4:3]: Transmitter Selection * =00: Digital Transmitter1 ( UNIPHY linkAB ) * =01: Digital Transmitter2 ( UNIPHY linkCD ) * =02: Digital Transmitter3 ( UNIPHY linkEF ) * =03: Reserved */ ctrl_param->acConfig.ucTransmitterSel = (uint8_t)(h->transmitter_bp_to_atom(control->transmitter)); /* We need to convert from KHz units into 10KHz units */ ctrl_param->ucAction = h->encoder_action_to_atom(control->action); ctrl_param->usPixelClock = cpu_to_le16((uint16_t)(control->pixel_clock / 10)); ctrl_param->ucEncoderMode = (uint8_t)(h->encoder_mode_bp_to_atom( control->signal, control->enable_dp_audio)); ctrl_param->ucLaneNum = (uint8_t)(control->lanes_number); } bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src( enum clock_source_id id, uint32_t *ref_clk_src_id) { if (ref_clk_src_id == NULL) { BREAK_TO_DEBUGGER(); return false; } switch (id) { case CLOCK_SOURCE_ID_PLL1: *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL; return true; case CLOCK_SOURCE_ID_PLL2: *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL; return true; case CLOCK_SOURCE_ID_DCPLL: *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL; return true; case CLOCK_SOURCE_ID_EXTERNAL: *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK; return true; case CLOCK_SOURCE_ID_UNDEFINED: *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID; return true; default: /* Unsupported clock source id */ BREAK_TO_DEBUGGER(); return false; } } uint8_t dal_cmd_table_helper_encoder_id_to_atom( enum encoder_id id) { switch (id) { case ENCODER_ID_INTERNAL_LVDS: return ENCODER_OBJECT_ID_INTERNAL_LVDS; case ENCODER_ID_INTERNAL_TMDS1: return ENCODER_OBJECT_ID_INTERNAL_TMDS1; case ENCODER_ID_INTERNAL_TMDS2: return ENCODER_OBJECT_ID_INTERNAL_TMDS2; case ENCODER_ID_INTERNAL_DAC1: return ENCODER_OBJECT_ID_INTERNAL_DAC1; case ENCODER_ID_INTERNAL_DAC2: return ENCODER_OBJECT_ID_INTERNAL_DAC2; case ENCODER_ID_INTERNAL_LVTM1: return ENCODER_OBJECT_ID_INTERNAL_LVTM1; case ENCODER_ID_INTERNAL_HDMI: return ENCODER_OBJECT_ID_HDMI_INTERNAL; case ENCODER_ID_EXTERNAL_TRAVIS: return ENCODER_OBJECT_ID_TRAVIS; case ENCODER_ID_EXTERNAL_NUTMEG: return ENCODER_OBJECT_ID_NUTMEG; case ENCODER_ID_INTERNAL_KLDSCP_TMDS1: return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1; case ENCODER_ID_INTERNAL_KLDSCP_DAC1: return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1; case ENCODER_ID_INTERNAL_KLDSCP_DAC2: return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2; case ENCODER_ID_EXTERNAL_MVPU_FPGA: return ENCODER_OBJECT_ID_MVPU_FPGA; case ENCODER_ID_INTERNAL_DDI: return ENCODER_OBJECT_ID_INTERNAL_DDI; case ENCODER_ID_INTERNAL_UNIPHY: return ENCODER_OBJECT_ID_INTERNAL_UNIPHY; case ENCODER_ID_INTERNAL_KLDSCP_LVTMA: return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA; case ENCODER_ID_INTERNAL_UNIPHY1: return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1; case ENCODER_ID_INTERNAL_UNIPHY2: return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2; case ENCODER_ID_INTERNAL_UNIPHY3: return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3; case ENCODER_ID_INTERNAL_WIRELESS: return ENCODER_OBJECT_ID_INTERNAL_VCE; case ENCODER_ID_UNKNOWN: return ENCODER_OBJECT_ID_NONE; default: /* Invalid encoder id */ BREAK_TO_DEBUGGER(); return ENCODER_OBJECT_ID_NONE; } }
linux-master
drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "atom.h" #include "include/bios_parser_types.h" #include "bios_parser_helper.h" #include "command_table_helper.h" #include "command_table.h" #include "bios_parser_types_internal.h" uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset, uint32_t size) { if (bp->bios && offset + size < bp->bios_size) return bp->bios + offset; else return NULL; } #include "reg_helper.h" #define CTX \ bios->ctx #define REG(reg)\ (bios->regs->reg) #undef FN #define FN(reg_name, field_name) \ ATOM_ ## field_name ## _SHIFT, ATOM_ ## field_name bool bios_is_accelerated_mode( struct dc_bios *bios) { uint32_t acc_mode; REG_GET(BIOS_SCRATCH_6, S6_ACC_MODE, &acc_mode); return (acc_mode == 1); } void bios_set_scratch_acc_mode_change( struct dc_bios *bios, uint32_t state) { REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, state); } void bios_set_scratch_critical_state( struct dc_bios *bios, bool state) { uint32_t critial_state = state ? 1 : 0; REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state); } uint32_t bios_get_vga_enabled_displays( struct dc_bios *bios) { return REG_READ(BIOS_SCRATCH_3) & 0XFFFF; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "ObjectID.h" #include "atomfirmware.h" #include "atom.h" #include "include/bios_parser_interface.h" #include "command_table2.h" #include "command_table_helper2.h" #include "bios_parser_helper.h" #include "bios_parser_types_internal2.h" #include "amdgpu.h" #include "dc_dmub_srv.h" #include "dc.h" #define DC_LOGGER \ bp->base.ctx->logger #define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\ (offsetof(struct atom_master_list_of_##MasterOrData##_functions_v2_1, FieldName) / sizeof(uint16_t)) #define EXEC_BIOS_CMD_TABLE(fname, params)\ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GET_INDEX_INTO_MASTER_TABLE(command, fname), \ (uint32_t *)&params) == 0) #define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\ amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev) #define BIOS_CMD_TABLE_PARA_REVISION(fname)\ bios_cmd_table_para_revision(bp->base.ctx->driver_context, \ GET_INDEX_INTO_MASTER_TABLE(command, fname)) static uint32_t bios_cmd_table_para_revision(void *dev, uint32_t index) { struct amdgpu_device *adev = dev; uint8_t frev, crev; if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) return crev; else return 0; } /****************************************************************************** ****************************************************************************** ** ** D I G E N C O D E R C O N T R O L ** ****************************************************************************** *****************************************************************************/ static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl); static enum bp_result encoder_control_fallback( struct bios_parser *bp, struct bp_encoder_control *cntl); static void init_dig_encoder_control(struct bios_parser *bp) { uint32_t version = BIOS_CMD_TABLE_PARA_REVISION(digxencodercontrol); switch (version) { case 5: bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5; break; default: dm_output_to_console("Don't have dig_encoder_control for v%d\n", version); bp->cmd_tbl.dig_encoder_control = encoder_control_fallback; break; } } static void encoder_control_dmcub( struct dc_dmub_srv *dmcub, struct dig_encoder_stream_setup_parameters_v1_5 *dig) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.digx_encoder_control.header.type = DMUB_CMD__VBIOS; cmd.digx_encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL; cmd.digx_encoder_control.header.payload_bytes = sizeof(cmd.digx_encoder_control) - sizeof(cmd.digx_encoder_control.header); cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig; dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; struct dig_encoder_stream_setup_parameters_v1_5 params = {0}; params.digid = (uint8_t)(cntl->engine_id); params.action = bp->cmd_helper->encoder_action_to_atom(cntl->action); params.pclk_10khz = cntl->pixel_clock / 10; params.digmode = (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom( cntl->signal, cntl->enable_dp_audio)); params.lanenum = (uint8_t)(cntl->lanes_number); switch (cntl->color_depth) { case COLOR_DEPTH_888: params.bitpercolor = PANEL_8BIT_PER_COLOR; break; case COLOR_DEPTH_101010: params.bitpercolor = PANEL_10BIT_PER_COLOR; break; case COLOR_DEPTH_121212: params.bitpercolor = PANEL_12BIT_PER_COLOR; break; case COLOR_DEPTH_161616: params.bitpercolor = PANEL_16BIT_PER_COLOR; break; default: break; } if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) switch (cntl->color_depth) { case COLOR_DEPTH_101010: params.pclk_10khz = (params.pclk_10khz * 30) / 24; break; case COLOR_DEPTH_121212: params.pclk_10khz = (params.pclk_10khz * 36) / 24; break; case COLOR_DEPTH_161616: params.pclk_10khz = (params.pclk_10khz * 48) / 24; break; default: break; } if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { encoder_control_dmcub(bp->base.ctx->dmub_srv, &params); return BP_RESULT_OK; } if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params)) result = BP_RESULT_OK; return result; } static enum bp_result encoder_control_fallback( struct bios_parser *bp, struct bp_encoder_control *cntl) { if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { return encoder_control_digx_v1_5(bp, cntl); } return BP_RESULT_FAILURE; } /***************************************************************************** ****************************************************************************** ** ** TRANSMITTER CONTROL ** ****************************************************************************** *****************************************************************************/ static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl); static enum bp_result transmitter_control_v1_7( struct bios_parser *bp, struct bp_transmitter_control *cntl); static enum bp_result transmitter_control_fallback( struct bios_parser *bp, struct bp_transmitter_control *cntl); static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; uint8_t crev; BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev); switch (crev) { case 6: bp->cmd_tbl.transmitter_control = transmitter_control_v1_6; break; case 7: bp->cmd_tbl.transmitter_control = transmitter_control_v1_7; break; default: dm_output_to_console("Don't have transmitter_control for v%d\n", crev); bp->cmd_tbl.transmitter_control = transmitter_control_fallback; break; } } static void transmitter_control_dmcub( struct dc_dmub_srv *dmcub, struct dig_transmitter_control_parameters_v1_6 *dig) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.dig1_transmitter_control.header.type = DMUB_CMD__VBIOS; cmd.dig1_transmitter_control.header.sub_type = DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL; cmd.dig1_transmitter_control.header.payload_bytes = sizeof(cmd.dig1_transmitter_control) - sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig = *dig; dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; const struct command_table_helper *cmd = bp->cmd_helper; struct dig_transmitter_control_ps_allocation_v1_6 ps = { { 0 } }; ps.param.phyid = cmd->phy_id_to_atom(cntl->transmitter); ps.param.action = (uint8_t)cntl->action; if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS) ps.param.mode_laneset.dplaneset = (uint8_t)cntl->lane_settings; else ps.param.mode_laneset.digmode = cmd->signal_type_to_atom_dig_mode(cntl->signal); ps.param.lanenum = (uint8_t)cntl->lanes_number; ps.param.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel); ps.param.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); ps.param.connobj_id = (uint8_t)cntl->connector_obj_id.id; ps.param.symclk_10khz = cntl->pixel_clock/10; if (cntl->action == TRANSMITTER_CONTROL_ENABLE || cntl->action == TRANSMITTER_CONTROL_ACTIAVATE || cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) { DC_LOG_BIOS("%s:ps.param.symclk_10khz = %d\n",\ __func__, ps.param.symclk_10khz); } if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param); return BP_RESULT_OK; } /*color_depth not used any more, driver has deep color factor in the Phyclk*/ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps)) result = BP_RESULT_OK; return result; } static void transmitter_control_dmcub_v1_7( struct dc_dmub_srv *dmcub, struct dmub_dig_transmitter_control_data_v1_7 *dig) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.dig1_transmitter_control.header.type = DMUB_CMD__VBIOS; cmd.dig1_transmitter_control.header.sub_type = DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL; cmd.dig1_transmitter_control.header.payload_bytes = sizeof(cmd.dig1_transmitter_control) - sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig; dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_7( struct bios_parser *bp, struct bp_transmitter_control *cntl) { enum bp_result result = BP_RESULT_FAILURE; const struct command_table_helper *cmd = bp->cmd_helper; struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0}; uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0; if (dc_is_dp_signal(cntl->signal)) hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0; dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter); dig_v1_7.action = (uint8_t)cntl->action; if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS) dig_v1_7.mode_laneset.dplaneset = (uint8_t)cntl->lane_settings; else dig_v1_7.mode_laneset.digmode = cmd->signal_type_to_atom_dig_mode(cntl->signal); dig_v1_7.lanenum = (uint8_t)cntl->lanes_number; dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel); dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id; dig_v1_7.HPO_instance = hpo_instance; dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10; if (cntl->action == TRANSMITTER_CONTROL_ENABLE || cntl->action == TRANSMITTER_CONTROL_ACTIAVATE || cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) { DC_LOG_BIOS("%s:dig_v1_7.symclk_units.symclk_10khz = %d\n", __func__, dig_v1_7.symclk_units.symclk_10khz); } if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { transmitter_control_dmcub_v1_7(bp->base.ctx->dmub_srv, &dig_v1_7); return BP_RESULT_OK; } /*color_depth not used any more, driver has deep color factor in the Phyclk*/ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, dig_v1_7)) result = BP_RESULT_OK; return result; } static enum bp_result transmitter_control_fallback( struct bios_parser *bp, struct bp_transmitter_control *cntl) { if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { return transmitter_control_v1_7(bp, cntl); } return BP_RESULT_FAILURE; } /****************************************************************************** ****************************************************************************** ** ** SET PIXEL CLOCK ** ****************************************************************************** *****************************************************************************/ static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); static enum bp_result set_pixel_clock_fallback( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); static void init_set_pixel_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) { case 7: bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7; break; default: dm_output_to_console("Don't have set_pixel_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)); bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback; break; } } static void set_pixel_clock_dmcub( struct dc_dmub_srv *dmcub, struct set_pixel_clock_parameter_v1_7 *clk) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.set_pixel_clock.header.type = DMUB_CMD__VBIOS; cmd.set_pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK; cmd.set_pixel_clock.header.payload_bytes = sizeof(cmd.set_pixel_clock) - sizeof(cmd.set_pixel_clock.header); cmd.set_pixel_clock.pixel_clock.clk = *clk; dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; struct set_pixel_clock_parameter_v1_7 clk; uint8_t controller_id; uint32_t pll_id; memset(&clk, 0, sizeof(clk)); if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id) && bp->cmd_helper->controller_id_to_atom(bp_params-> controller_id, &controller_id)) { /* Note: VBIOS still wants to use ucCRTC name which is now * 1 byte in ULONG *typedef struct _CRTC_PIXEL_CLOCK_FREQ *{ * target the pixel clock to drive the CRTC timing. * ULONG ulPixelClock:24; * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to * previous version. * ATOM_CRTC1~6, indicate the CRTC controller to * ULONG ucCRTC:8; * drive the pixel clock. not used for DCPLL case. *}CRTC_PIXEL_CLOCK_FREQ; *union *{ * pixel clock and CRTC id frequency * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; * ULONG ulDispEngClkFreq; dispclk frequency *}; */ clk.crtc_id = controller_id; clk.pll_id = (uint8_t) pll_id; clk.encoderobjid = bp->cmd_helper->encoder_id_to_atom( dal_graphics_object_id_get_encoder_id( bp_params->encoder_object_id)); clk.encoder_mode = (uint8_t) bp-> cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock_100hz); clk.deep_color_ratio = (uint8_t) bp->cmd_helper-> transmitter_color_depth_to_atom( bp_params->color_depth); DC_LOG_BIOS("%s:program display clock = %d, tg = %d, pll = %d, "\ "colorDepth = %d\n", __func__, bp_params->target_pixel_clock_100hz, (int)controller_id, pll_id, bp_params->color_depth); if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL; if (bp_params->flags.PROGRAM_PHY_PLL_ONLY) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL; if (bp_params->flags.SUPPORT_YUV_420) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE; if (bp_params->flags.SET_XTALIN_REF_SRC) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN; if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK; if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN; if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk); return BP_RESULT_OK; } if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk)) result = BP_RESULT_OK; } return result; } static enum bp_result set_pixel_clock_fallback( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params) { if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { return set_pixel_clock_v7(bp, bp_params); } return BP_RESULT_FAILURE; } /****************************************************************************** ****************************************************************************** ** ** SET CRTC TIMING ** ****************************************************************************** *****************************************************************************/ static enum bp_result set_crtc_using_dtd_timing_v3( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params); static void init_set_crtc_timing(struct bios_parser *bp) { uint32_t dtd_version = BIOS_CMD_TABLE_PARA_REVISION(setcrtc_usingdtdtiming); switch (dtd_version) { case 3: bp->cmd_tbl.set_crtc_timing = set_crtc_using_dtd_timing_v3; break; default: dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version); bp->cmd_tbl.set_crtc_timing = NULL; break; } } static enum bp_result set_crtc_using_dtd_timing_v3( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; struct set_crtc_using_dtd_timing_parameters params = {0}; uint8_t atom_controller_id; if (bp->cmd_helper->controller_id_to_atom( bp_params->controller_id, &atom_controller_id)) params.crtc_id = atom_controller_id; /* bios usH_Size wants h addressable size */ params.h_size = cpu_to_le16((uint16_t)bp_params->h_addressable); /* bios usH_Blanking_Time wants borders included in blanking */ params.h_blanking_time = cpu_to_le16((uint16_t)(bp_params->h_total - bp_params->h_addressable)); /* bios usV_Size wants v addressable size */ params.v_size = cpu_to_le16((uint16_t)bp_params->v_addressable); /* bios usV_Blanking_Time wants borders included in blanking */ params.v_blanking_time = cpu_to_le16((uint16_t)(bp_params->v_total - bp_params->v_addressable)); /* bios usHSyncOffset is the offset from the end of h addressable, * our horizontalSyncStart is the offset from the beginning * of h addressable */ params.h_syncoffset = cpu_to_le16((uint16_t)(bp_params->h_sync_start - bp_params->h_addressable)); params.h_syncwidth = cpu_to_le16((uint16_t)bp_params->h_sync_width); /* bios usHSyncOffset is the offset from the end of v addressable, * our verticalSyncStart is the offset from the beginning of * v addressable */ params.v_syncoffset = cpu_to_le16((uint16_t)(bp_params->v_sync_start - bp_params->v_addressable)); params.v_syncwidth = cpu_to_le16((uint16_t)bp_params->v_sync_width); /* we assume that overscan from original timing does not get bigger * than 255 * we will program all the borders in the Set CRTC Overscan call below */ if (bp_params->flags.HSYNC_POSITIVE_POLARITY == 0) params.modemiscinfo = cpu_to_le16(le16_to_cpu(params.modemiscinfo) | ATOM_HSYNC_POLARITY); if (bp_params->flags.VSYNC_POSITIVE_POLARITY == 0) params.modemiscinfo = cpu_to_le16(le16_to_cpu(params.modemiscinfo) | ATOM_VSYNC_POLARITY); if (bp_params->flags.INTERLACE) { params.modemiscinfo = cpu_to_le16(le16_to_cpu(params.modemiscinfo) | ATOM_INTERLACE); /* original DAL code has this condition to apply this * for non-TV/CV only * due to complex MV testing for possible impact * if ( pACParameters->signal != SignalType_YPbPr && * pACParameters->signal != SignalType_Composite && * pACParameters->signal != SignalType_SVideo) */ { /* HW will deduct 0.5 line from 2nd feild. * i.e. for 1080i, it is 2 lines for 1st field, * 2.5 lines for the 2nd feild. we need input as 5 * instead of 4. * but it is 4 either from Edid data (spec CEA 861) * or CEA timing table. */ le16_add_cpu(&params.v_syncoffset, 1); } } if (bp_params->flags.HORZ_COUNT_BY_TWO) params.modemiscinfo = cpu_to_le16(le16_to_cpu(params.modemiscinfo) | 0x100); /* ATOM_DOUBLE_CLOCK_MODE */ if (EXEC_BIOS_CMD_TABLE(setcrtc_usingdtdtiming, params)) result = BP_RESULT_OK; return result; } /****************************************************************************** ****************************************************************************** ** ** ENABLE CRTC ** ****************************************************************************** *****************************************************************************/ static enum bp_result enable_crtc_v1( struct bios_parser *bp, enum controller_id controller_id, bool enable); static void init_enable_crtc(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(enablecrtc)) { case 1: bp->cmd_tbl.enable_crtc = enable_crtc_v1; break; default: dm_output_to_console("Don't have enable_crtc for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(enablecrtc)); bp->cmd_tbl.enable_crtc = NULL; break; } } static enum bp_result enable_crtc_v1( struct bios_parser *bp, enum controller_id controller_id, bool enable) { bool result = BP_RESULT_FAILURE; struct enable_crtc_parameters params = {0}; uint8_t id; if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) params.crtc_id = id; else return BP_RESULT_BADINPUT; if (enable) params.enable = ATOM_ENABLE; else params.enable = ATOM_DISABLE; if (EXEC_BIOS_CMD_TABLE(enablecrtc, params)) result = BP_RESULT_OK; return result; } /****************************************************************************** ****************************************************************************** ** ** DISPLAY PLL ** ****************************************************************************** *****************************************************************************/ /****************************************************************************** ****************************************************************************** ** ** EXTERNAL ENCODER CONTROL ** ****************************************************************************** *****************************************************************************/ static enum bp_result external_encoder_control_v3( struct bios_parser *bp, struct bp_external_encoder_control *cntl); static void init_external_encoder_control( struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(externalencodercontrol)) { case 3: bp->cmd_tbl.external_encoder_control = external_encoder_control_v3; break; default: bp->cmd_tbl.external_encoder_control = NULL; break; } } static enum bp_result external_encoder_control_v3( struct bios_parser *bp, struct bp_external_encoder_control *cntl) { /* TODO */ return BP_RESULT_OK; } /****************************************************************************** ****************************************************************************** ** ** ENABLE DISPLAY POWER GATING ** ****************************************************************************** *****************************************************************************/ static enum bp_result enable_disp_power_gating_v2_1( struct bios_parser *bp, enum controller_id crtc_id, enum bp_pipe_control_action action); static enum bp_result enable_disp_power_gating_fallback( struct bios_parser *bp, enum controller_id crtc_id, enum bp_pipe_control_action action); static void init_enable_disp_power_gating( struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)) { case 1: bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_v2_1; break; default: dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)); bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback; break; } } static void enable_disp_power_gating_dmcub( struct dc_dmub_srv *dmcub, struct enable_disp_power_gating_parameters_v2_1 *pwr) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.enable_disp_power_gating.header.type = DMUB_CMD__VBIOS; cmd.enable_disp_power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; cmd.enable_disp_power_gating.header.payload_bytes = sizeof(cmd.enable_disp_power_gating) - sizeof(cmd.enable_disp_power_gating.header); cmd.enable_disp_power_gating.power_gating.pwr = *pwr; dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_disp_power_gating_v2_1( struct bios_parser *bp, enum controller_id crtc_id, enum bp_pipe_control_action action) { enum bp_result result = BP_RESULT_FAILURE; struct enable_disp_power_gating_ps_allocation ps = { { 0 } }; uint8_t atom_crtc_id; if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id)) ps.param.disp_pipe_id = atom_crtc_id; else return BP_RESULT_BADINPUT; ps.param.enable = bp->cmd_helper->disp_power_gating_action_to_atom(action); if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv, &ps.param); return BP_RESULT_OK; } if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param)) result = BP_RESULT_OK; return result; } static enum bp_result enable_disp_power_gating_fallback( struct bios_parser *bp, enum controller_id crtc_id, enum bp_pipe_control_action action) { if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { return enable_disp_power_gating_v2_1(bp, crtc_id, action); } return BP_RESULT_FAILURE; } /****************************************************************************** ******************************************************************************* ** ** SET DCE CLOCK ** ******************************************************************************* *******************************************************************************/ static enum bp_result set_dce_clock_v2_1( struct bios_parser *bp, struct bp_set_dce_clock_parameters *bp_params); static void init_set_dce_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(setdceclock)) { case 1: bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1; break; default: dm_output_to_console("Don't have set_dce_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(setdceclock)); bp->cmd_tbl.set_dce_clock = NULL; break; } } static enum bp_result set_dce_clock_v2_1( struct bios_parser *bp, struct bp_set_dce_clock_parameters *bp_params) { enum bp_result result = BP_RESULT_FAILURE; struct set_dce_clock_ps_allocation_v2_1 params; uint32_t atom_pll_id; uint32_t atom_clock_type; const struct command_table_helper *cmd = bp->cmd_helper; memset(&params, 0, sizeof(params)); if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) || !cmd->dc_clock_type_to_atom(bp_params->clock_type, &atom_clock_type)) return BP_RESULT_BADINPUT; params.param.dceclksrc = atom_pll_id; params.param.dceclktype = atom_clock_type; if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) { if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK) params.param.dceclkflag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK; if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK) params.param.dceclkflag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE; if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK) params.param.dceclkflag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN; if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK) params.param.dceclkflag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA; } else /* only program clock frequency if display clock is used; * VBIOS will program DPREFCLK * We need to convert from KHz units into 10KHz units */ params.param.dceclk_10khz = cpu_to_le32( bp_params->target_clock_frequency / 10); DC_LOG_BIOS("%s:target_clock_frequency = %d"\ "clock_type = %d \n", __func__,\ bp_params->target_clock_frequency,\ bp_params->clock_type); if (EXEC_BIOS_CMD_TABLE(setdceclock, params)) { /* Convert from 10KHz units back to KHz */ bp_params->target_clock_frequency = le32_to_cpu( params.param.dceclk_10khz) * 10; result = BP_RESULT_OK; } return result; } /****************************************************************************** ****************************************************************************** ** ** GET SMU CLOCK INFO ** ****************************************************************************** *****************************************************************************/ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id); static void init_get_smu_clock_info(struct bios_parser *bp) { /* TODO add switch for table vrsion */ bp->cmd_tbl.get_smu_clock_info = get_smu_clock_info_v3_1; } static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id) { struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0}; struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output; smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ; smu_input.syspll_id = id; /* Get Specific Clock */ if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) { memmove(&smu_output, &smu_input, sizeof( struct atom_get_smu_clock_info_parameters_v3_1)); return smu_output.atom_smu_outputclkfreq.syspllvcofreq_10khz; } return 0; } /****************************************************************************** ****************************************************************************** ** ** LVTMA CONTROL ** ****************************************************************************** *****************************************************************************/ static enum bp_result enable_lvtma_control( struct bios_parser *bp, uint8_t uc_pwr_on, uint8_t panel_instance, uint8_t bypass_panel_control_wait); static void init_enable_lvtma_control(struct bios_parser *bp) { /* TODO add switch for table vrsion */ bp->cmd_tbl.enable_lvtma_control = enable_lvtma_control; } static void enable_lvtma_control_dmcub( struct dc_dmub_srv *dmcub, uint8_t uc_pwr_on, uint8_t panel_instance, uint8_t bypass_panel_control_wait) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.lvtma_control.header.type = DMUB_CMD__VBIOS; cmd.lvtma_control.header.sub_type = DMUB_CMD__VBIOS_LVTMA_CONTROL; cmd.lvtma_control.data.uc_pwr_action = uc_pwr_on; cmd.lvtma_control.data.panel_inst = panel_instance; cmd.lvtma_control.data.bypass_panel_control_wait = bypass_panel_control_wait; dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_lvtma_control( struct bios_parser *bp, uint8_t uc_pwr_on, uint8_t panel_instance, uint8_t bypass_panel_control_wait) { enum bp_result result = BP_RESULT_FAILURE; if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv, uc_pwr_on, panel_instance, bypass_panel_control_wait); return BP_RESULT_OK; } return result; } void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp) { init_dig_encoder_control(bp); init_transmitter_control(bp); init_set_pixel_clock(bp); init_set_crtc_timing(bp); init_enable_crtc(bp); init_external_encoder_control(bp); init_enable_disp_power_gating(bp); init_set_dce_clock(bp); init_get_smu_clock_info(bp); init_enable_lvtma_control(bp); }
linux-master
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "bios_parser_common.h" #include "include/grph_object_ctrl_defs.h" static enum object_type object_type_from_bios_object_id(uint32_t bios_object_id) { uint32_t bios_object_type = (bios_object_id & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; enum object_type object_type; switch (bios_object_type) { case GRAPH_OBJECT_TYPE_GPU: object_type = OBJECT_TYPE_GPU; break; case GRAPH_OBJECT_TYPE_ENCODER: object_type = OBJECT_TYPE_ENCODER; break; case GRAPH_OBJECT_TYPE_CONNECTOR: object_type = OBJECT_TYPE_CONNECTOR; break; case GRAPH_OBJECT_TYPE_ROUTER: object_type = OBJECT_TYPE_ROUTER; break; case GRAPH_OBJECT_TYPE_GENERIC: object_type = OBJECT_TYPE_GENERIC; break; default: object_type = OBJECT_TYPE_UNKNOWN; break; } return object_type; } static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id) { uint32_t bios_enum_id = (bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT; enum object_enum_id id; switch (bios_enum_id) { case GRAPH_OBJECT_ENUM_ID1: id = ENUM_ID_1; break; case GRAPH_OBJECT_ENUM_ID2: id = ENUM_ID_2; break; case GRAPH_OBJECT_ENUM_ID3: id = ENUM_ID_3; break; case GRAPH_OBJECT_ENUM_ID4: id = ENUM_ID_4; break; case GRAPH_OBJECT_ENUM_ID5: id = ENUM_ID_5; break; case GRAPH_OBJECT_ENUM_ID6: id = ENUM_ID_6; break; case GRAPH_OBJECT_ENUM_ID7: id = ENUM_ID_7; break; default: id = ENUM_ID_UNKNOWN; break; } return id; } static uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id) { return (bios_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; } static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id) { uint32_t bios_encoder_id = gpu_id_from_bios_object_id(bios_object_id); enum encoder_id id; switch (bios_encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: id = ENCODER_ID_INTERNAL_LVDS; break; case ENCODER_OBJECT_ID_INTERNAL_TMDS1: id = ENCODER_ID_INTERNAL_TMDS1; break; case ENCODER_OBJECT_ID_INTERNAL_TMDS2: id = ENCODER_ID_INTERNAL_TMDS2; break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: id = ENCODER_ID_INTERNAL_DAC1; break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: id = ENCODER_ID_INTERNAL_DAC2; break; case ENCODER_OBJECT_ID_INTERNAL_LVTM1: id = ENCODER_ID_INTERNAL_LVTM1; break; case ENCODER_OBJECT_ID_HDMI_INTERNAL: id = ENCODER_ID_INTERNAL_HDMI; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: id = ENCODER_ID_INTERNAL_KLDSCP_TMDS1; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: id = ENCODER_ID_INTERNAL_KLDSCP_DAC1; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: id = ENCODER_ID_INTERNAL_KLDSCP_DAC2; break; case ENCODER_OBJECT_ID_MVPU_FPGA: id = ENCODER_ID_EXTERNAL_MVPU_FPGA; break; case ENCODER_OBJECT_ID_INTERNAL_DDI: id = ENCODER_ID_INTERNAL_DDI; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: id = ENCODER_ID_INTERNAL_UNIPHY; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: id = ENCODER_ID_INTERNAL_KLDSCP_LVTMA; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: id = ENCODER_ID_INTERNAL_UNIPHY1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: id = ENCODER_ID_INTERNAL_UNIPHY2; break; case ENCODER_OBJECT_ID_ALMOND: /* ENCODER_OBJECT_ID_NUTMEG */ id = ENCODER_ID_EXTERNAL_NUTMEG; break; case ENCODER_OBJECT_ID_TRAVIS: id = ENCODER_ID_EXTERNAL_TRAVIS; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: id = ENCODER_ID_INTERNAL_UNIPHY3; break; default: id = ENCODER_ID_UNKNOWN; ASSERT(0); break; } return id; } static enum connector_id connector_id_from_bios_object_id( uint32_t bios_object_id) { uint32_t bios_connector_id = gpu_id_from_bios_object_id(bios_object_id); enum connector_id id; switch (bios_connector_id) { case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I: id = CONNECTOR_ID_SINGLE_LINK_DVII; break; case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I: id = CONNECTOR_ID_DUAL_LINK_DVII; break; case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D: id = CONNECTOR_ID_SINGLE_LINK_DVID; break; case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D: id = CONNECTOR_ID_DUAL_LINK_DVID; break; case CONNECTOR_OBJECT_ID_VGA: id = CONNECTOR_ID_VGA; break; case CONNECTOR_OBJECT_ID_HDMI_TYPE_A: id = CONNECTOR_ID_HDMI_TYPE_A; break; case CONNECTOR_OBJECT_ID_LVDS: id = CONNECTOR_ID_LVDS; break; case CONNECTOR_OBJECT_ID_PCIE_CONNECTOR: id = CONNECTOR_ID_PCIE; break; case CONNECTOR_OBJECT_ID_HARDCODE_DVI: id = CONNECTOR_ID_HARDCODE_DVI; break; case CONNECTOR_OBJECT_ID_DISPLAYPORT: id = CONNECTOR_ID_DISPLAY_PORT; break; case CONNECTOR_OBJECT_ID_eDP: id = CONNECTOR_ID_EDP; break; case CONNECTOR_OBJECT_ID_MXM: id = CONNECTOR_ID_MXM; break; case CONNECTOR_OBJECT_ID_USBC: id = CONNECTOR_ID_USBC; break; default: id = CONNECTOR_ID_UNKNOWN; break; } return id; } static enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id) { uint32_t bios_generic_id = gpu_id_from_bios_object_id(bios_object_id); enum generic_id id; switch (bios_generic_id) { case GENERIC_OBJECT_ID_MXM_OPM: id = GENERIC_ID_MXM_OPM; break; case GENERIC_OBJECT_ID_GLSYNC: id = GENERIC_ID_GLSYNC; break; case GENERIC_OBJECT_ID_STEREO_PIN: id = GENERIC_ID_STEREO; break; default: id = GENERIC_ID_UNKNOWN; break; } return id; } static uint32_t id_from_bios_object_id(enum object_type type, uint32_t bios_object_id) { switch (type) { case OBJECT_TYPE_GPU: return gpu_id_from_bios_object_id(bios_object_id); case OBJECT_TYPE_ENCODER: return (uint32_t)encoder_id_from_bios_object_id(bios_object_id); case OBJECT_TYPE_CONNECTOR: return (uint32_t)connector_id_from_bios_object_id( bios_object_id); case OBJECT_TYPE_GENERIC: return generic_id_from_bios_object_id(bios_object_id); default: return 0; } } struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id) { enum object_type type; enum object_enum_id enum_id; struct graphics_object_id go_id = { 0 }; type = object_type_from_bios_object_id(bios_object_id); if (OBJECT_TYPE_UNKNOWN == type) return go_id; enum_id = enum_id_from_bios_object_id(bios_object_id); if (ENUM_ID_UNKNOWN == enum_id) return go_id; go_id = dal_graphics_object_id_init( id_from_bios_object_id(type, bios_object_id), enum_id, type); return go_id; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "ObjectID.h" #include "atomfirmware.h" #include "include/bios_parser_types.h" #include "command_table_helper2.h" bool dal_bios_parser_init_cmd_tbl_helper2( const struct command_table_helper **h, enum dce_version dce) { switch (dce) { #if defined(CONFIG_DRM_AMD_DC_SI) case DCE_VERSION_6_0: case DCE_VERSION_6_1: case DCE_VERSION_6_4: *h = dal_cmd_tbl_helper_dce60_get_table(); return true; #endif case DCE_VERSION_8_0: case DCE_VERSION_8_1: case DCE_VERSION_8_3: *h = dal_cmd_tbl_helper_dce80_get_table(); return true; case DCE_VERSION_10_0: *h = dal_cmd_tbl_helper_dce110_get_table(); return true; case DCE_VERSION_11_0: *h = dal_cmd_tbl_helper_dce110_get_table(); return true; case DCE_VERSION_11_2: case DCE_VERSION_11_22: case DCE_VERSION_12_0: case DCE_VERSION_12_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; case DCN_VERSION_1_0: case DCN_VERSION_1_01: case DCN_VERSION_2_0: case DCN_VERSION_2_1: case DCN_VERSION_2_01: case DCN_VERSION_3_0: case DCN_VERSION_3_01: case DCN_VERSION_3_02: case DCN_VERSION_3_03: case DCN_VERSION_3_1: case DCN_VERSION_3_14: case DCN_VERSION_3_15: case DCN_VERSION_3_16: case DCN_VERSION_3_2: case DCN_VERSION_3_21: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; default: /* Unsupported DCE */ BREAK_TO_DEBUGGER(); return false; } } /* real implementations */ bool dal_cmd_table_helper_controller_id_to_atom2( enum controller_id id, uint8_t *atom_id) { if (atom_id == NULL) { BREAK_TO_DEBUGGER(); return false; } switch (id) { case CONTROLLER_ID_D0: *atom_id = ATOM_CRTC1; return true; case CONTROLLER_ID_D1: *atom_id = ATOM_CRTC2; return true; case CONTROLLER_ID_D2: *atom_id = ATOM_CRTC3; return true; case CONTROLLER_ID_D3: *atom_id = ATOM_CRTC4; return true; case CONTROLLER_ID_D4: *atom_id = ATOM_CRTC5; return true; case CONTROLLER_ID_D5: *atom_id = ATOM_CRTC6; return true; /* TODO :case CONTROLLER_ID_UNDERLAY0: *atom_id = ATOM_UNDERLAY_PIPE0; return true; */ case CONTROLLER_ID_UNDEFINED: *atom_id = ATOM_CRTC_INVALID; return true; default: /* Wrong controller id */ BREAK_TO_DEBUGGER(); return false; } } /** * dal_cmd_table_helper_transmitter_bp_to_atom2 - Translate the Transmitter to the * corresponding ATOM BIOS value * @t: transmitter * returns: digitalTransmitter * // =00: Digital Transmitter1 ( UNIPHY linkAB ) * // =01: Digital Transmitter2 ( UNIPHY linkCD ) * // =02: Digital Transmitter3 ( UNIPHY linkEF ) */ uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2( enum transmitter t) { switch (t) { case TRANSMITTER_UNIPHY_A: case TRANSMITTER_UNIPHY_B: case TRANSMITTER_TRAVIS_LCD: return 0; case TRANSMITTER_UNIPHY_C: case TRANSMITTER_UNIPHY_D: return 1; case TRANSMITTER_UNIPHY_E: case TRANSMITTER_UNIPHY_F: return 2; default: /* Invalid Transmitter Type! */ BREAK_TO_DEBUGGER(); return 0; } } uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2( enum signal_type s, bool enable_dp_audio) { switch (s) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: return ATOM_ENCODER_MODE_DVI; case SIGNAL_TYPE_HDMI_TYPE_A: return ATOM_ENCODER_MODE_HDMI; case SIGNAL_TYPE_LVDS: return ATOM_ENCODER_MODE_LVDS; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_VIRTUAL: if (enable_dp_audio) return ATOM_ENCODER_MODE_DP_AUDIO; else return ATOM_ENCODER_MODE_DP; case SIGNAL_TYPE_RGB: return ATOM_ENCODER_MODE_CRT; default: return ATOM_ENCODER_MODE_CRT; } } bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2( enum clock_source_id id, uint32_t *ref_clk_src_id) { if (ref_clk_src_id == NULL) { BREAK_TO_DEBUGGER(); return false; } switch (id) { case CLOCK_SOURCE_ID_PLL1: *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL; return true; case CLOCK_SOURCE_ID_PLL2: *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL; return true; /*TODO:case CLOCK_SOURCE_ID_DCPLL: *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL; return true; */ case CLOCK_SOURCE_ID_EXTERNAL: *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK; return true; case CLOCK_SOURCE_ID_UNDEFINED: *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID; return true; default: /* Unsupported clock source id */ BREAK_TO_DEBUGGER(); return false; } } uint8_t dal_cmd_table_helper_encoder_id_to_atom2( enum encoder_id id) { switch (id) { case ENCODER_ID_INTERNAL_LVDS: return ENCODER_OBJECT_ID_INTERNAL_LVDS; case ENCODER_ID_INTERNAL_TMDS1: return ENCODER_OBJECT_ID_INTERNAL_TMDS1; case ENCODER_ID_INTERNAL_TMDS2: return ENCODER_OBJECT_ID_INTERNAL_TMDS2; case ENCODER_ID_INTERNAL_DAC1: return ENCODER_OBJECT_ID_INTERNAL_DAC1; case ENCODER_ID_INTERNAL_DAC2: return ENCODER_OBJECT_ID_INTERNAL_DAC2; case ENCODER_ID_INTERNAL_LVTM1: return ENCODER_OBJECT_ID_INTERNAL_LVTM1; case ENCODER_ID_INTERNAL_HDMI: return ENCODER_OBJECT_ID_HDMI_INTERNAL; case ENCODER_ID_EXTERNAL_TRAVIS: return ENCODER_OBJECT_ID_TRAVIS; case ENCODER_ID_EXTERNAL_NUTMEG: return ENCODER_OBJECT_ID_NUTMEG; case ENCODER_ID_INTERNAL_KLDSCP_TMDS1: return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1; case ENCODER_ID_INTERNAL_KLDSCP_DAC1: return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1; case ENCODER_ID_INTERNAL_KLDSCP_DAC2: return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2; case ENCODER_ID_EXTERNAL_MVPU_FPGA: return ENCODER_OBJECT_ID_MVPU_FPGA; case ENCODER_ID_INTERNAL_DDI: return ENCODER_OBJECT_ID_INTERNAL_DDI; case ENCODER_ID_INTERNAL_UNIPHY: return ENCODER_OBJECT_ID_INTERNAL_UNIPHY; case ENCODER_ID_INTERNAL_KLDSCP_LVTMA: return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA; case ENCODER_ID_INTERNAL_UNIPHY1: return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1; case ENCODER_ID_INTERNAL_UNIPHY2: return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2; case ENCODER_ID_INTERNAL_UNIPHY3: return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3; case ENCODER_ID_INTERNAL_WIRELESS: return ENCODER_OBJECT_ID_INTERNAL_VCE; case ENCODER_ID_INTERNAL_VIRTUAL: return ENCODER_OBJECT_ID_NONE; case ENCODER_ID_UNKNOWN: return ENCODER_OBJECT_ID_NONE; default: /* Invalid encoder id */ BREAK_TO_DEBUGGER(); return ENCODER_OBJECT_ID_NONE; } }
linux-master
drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "atom.h" #include "include/grph_object_id.h" #include "include/grph_object_defs.h" #include "include/bios_parser_types.h" #include "../command_table_helper.h" static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action) { uint8_t atom_action = 0; switch (action) { case ENCODER_CONTROL_ENABLE: atom_action = ATOM_ENABLE; break; case ENCODER_CONTROL_DISABLE: atom_action = ATOM_DISABLE; break; case ENCODER_CONTROL_SETUP: atom_action = ATOM_ENCODER_CMD_SETUP; break; case ENCODER_CONTROL_INIT: atom_action = ATOM_ENCODER_INIT; break; default: BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */ break; } return atom_action; } static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id) { bool result = false; if (atom_engine_id != NULL) switch (id) { case ENGINE_ID_DIGA: *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID; result = true; break; case ENGINE_ID_DIGB: *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID; result = true; break; case ENGINE_ID_DIGC: *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID; result = true; break; case ENGINE_ID_DIGD: *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID; result = true; break; case ENGINE_ID_DIGE: *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID; result = true; break; case ENGINE_ID_DIGF: *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID; result = true; break; case ENGINE_ID_DIGG: *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID; result = true; break; case ENGINE_ID_DACA: *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID; result = true; break; default: break; } return result; } static bool clock_source_id_to_atom( enum clock_source_id id, uint32_t *atom_pll_id) { bool result = true; if (atom_pll_id != NULL) switch (id) { case CLOCK_SOURCE_ID_PLL0: *atom_pll_id = ATOM_PPLL0; break; case CLOCK_SOURCE_ID_PLL1: *atom_pll_id = ATOM_PPLL1; break; case CLOCK_SOURCE_ID_PLL2: *atom_pll_id = ATOM_PPLL2; break; case CLOCK_SOURCE_ID_EXTERNAL: *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_DFS: *atom_pll_id = ATOM_EXT_PLL1; break; case CLOCK_SOURCE_ID_VCE: /* for VCE encoding, * we need to pass in ATOM_PPLL_INVALID */ *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_DP_DTO: /* When programming DP DTO PLL ID should be invalid */ *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_UNDEFINED: BREAK_TO_DEBUGGER(); /* check when this will happen! */ *atom_pll_id = ATOM_PPLL_INVALID; result = false; break; default: result = false; break; } return result; } static uint8_t clock_source_id_to_atom_phy_clk_src_id( enum clock_source_id id) { uint8_t atom_phy_clk_src_id = 0; switch (id) { case CLOCK_SOURCE_ID_PLL0: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL; break; case CLOCK_SOURCE_ID_PLL1: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; case CLOCK_SOURCE_ID_PLL2: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL; break; case CLOCK_SOURCE_ID_EXTERNAL: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT; break; default: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; } return atom_phy_clk_src_id >> 2; } static uint8_t signal_type_to_atom_dig_mode(enum signal_type s) { uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP; switch (s) { case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_EDP: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP; break; case SIGNAL_TYPE_LVDS: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS; break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI; break; case SIGNAL_TYPE_HDMI_TYPE_A: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI; break; case SIGNAL_TYPE_DISPLAY_PORT_MST: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST; break; default: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI; break; } return atom_dig_mode; } static uint8_t hpd_sel_to_atom(enum hpd_source_id id) { uint8_t atom_hpd_sel = 0; switch (id) { case HPD_SOURCEID1: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL; break; case HPD_SOURCEID2: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL; break; case HPD_SOURCEID3: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL; break; case HPD_SOURCEID4: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL; break; case HPD_SOURCEID5: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL; break; case HPD_SOURCEID6: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL; break; case HPD_SOURCEID_UNKNOWN: default: atom_hpd_sel = 0; break; } return atom_hpd_sel >> 4; } static uint8_t dig_encoder_sel_to_atom(enum engine_id id) { uint8_t atom_dig_encoder_sel = 0; switch (id) { case ENGINE_ID_DIGA: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL; break; case ENGINE_ID_DIGB: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL; break; case ENGINE_ID_DIGC: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL; break; case ENGINE_ID_DIGD: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL; break; case ENGINE_ID_DIGE: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL; break; case ENGINE_ID_DIGF: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL; break; case ENGINE_ID_DIGG: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL; break; default: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL; break; } return atom_dig_encoder_sel; } static uint8_t phy_id_to_atom(enum transmitter t) { uint8_t atom_phy_id; switch (t) { case TRANSMITTER_UNIPHY_A: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; case TRANSMITTER_UNIPHY_B: atom_phy_id = ATOM_PHY_ID_UNIPHYB; break; case TRANSMITTER_UNIPHY_C: atom_phy_id = ATOM_PHY_ID_UNIPHYC; break; case TRANSMITTER_UNIPHY_D: atom_phy_id = ATOM_PHY_ID_UNIPHYD; break; case TRANSMITTER_UNIPHY_E: atom_phy_id = ATOM_PHY_ID_UNIPHYE; break; case TRANSMITTER_UNIPHY_F: atom_phy_id = ATOM_PHY_ID_UNIPHYF; break; case TRANSMITTER_UNIPHY_G: atom_phy_id = ATOM_PHY_ID_UNIPHYG; break; default: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; } return atom_phy_id; } static uint8_t disp_power_gating_action_to_atom( enum bp_pipe_control_action action) { uint8_t atom_pipe_action = 0; switch (action) { case ASIC_PIPE_DISABLE: atom_pipe_action = ATOM_DISABLE; break; case ASIC_PIPE_ENABLE: atom_pipe_action = ATOM_ENABLE; break; case ASIC_PIPE_INIT: atom_pipe_action = ATOM_INIT; break; default: BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */ break; } return atom_pipe_action; } static const struct command_table_helper command_table_helper_funcs = { .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom, .encoder_action_to_atom = encoder_action_to_atom, .engine_bp_to_atom = engine_bp_to_atom, .clock_source_id_to_atom = clock_source_id_to_atom, .clock_source_id_to_atom_phy_clk_src_id = clock_source_id_to_atom_phy_clk_src_id, .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode, .hpd_sel_to_atom = hpd_sel_to_atom, .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom, .phy_id_to_atom = phy_id_to_atom, .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom, .assign_control_parameter = dal_cmd_table_helper_assign_control_parameter, .clock_source_id_to_ref_clk_src = dal_cmd_table_helper_clock_source_id_to_ref_clk_src, .transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom, .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom, .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom, }; const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void) { return &command_table_helper_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "atom.h" #include "include/bios_parser_types.h" #include "../command_table_helper2.h" static uint8_t phy_id_to_atom(enum transmitter t) { uint8_t atom_phy_id; switch (t) { case TRANSMITTER_UNIPHY_A: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; case TRANSMITTER_UNIPHY_B: atom_phy_id = ATOM_PHY_ID_UNIPHYB; break; case TRANSMITTER_UNIPHY_C: atom_phy_id = ATOM_PHY_ID_UNIPHYC; break; case TRANSMITTER_UNIPHY_D: atom_phy_id = ATOM_PHY_ID_UNIPHYD; break; case TRANSMITTER_UNIPHY_E: atom_phy_id = ATOM_PHY_ID_UNIPHYE; break; case TRANSMITTER_UNIPHY_F: atom_phy_id = ATOM_PHY_ID_UNIPHYF; break; case TRANSMITTER_UNIPHY_G: atom_phy_id = ATOM_PHY_ID_UNIPHYG; break; default: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; } return atom_phy_id; } static uint8_t signal_type_to_atom_dig_mode(enum signal_type s) { uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP; switch (s) { case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_EDP: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP; break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI; break; case SIGNAL_TYPE_HDMI_TYPE_A: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI; break; case SIGNAL_TYPE_DISPLAY_PORT_MST: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST; break; default: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI; break; } return atom_dig_mode; } static uint8_t clock_source_id_to_atom_phy_clk_src_id( enum clock_source_id id) { uint8_t atom_phy_clk_src_id = 0; switch (id) { case CLOCK_SOURCE_ID_PLL0: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL; break; case CLOCK_SOURCE_ID_PLL1: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; case CLOCK_SOURCE_ID_PLL2: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL; break; case CLOCK_SOURCE_ID_EXTERNAL: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT; break; default: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; } return atom_phy_clk_src_id >> 2; } static uint8_t hpd_sel_to_atom(enum hpd_source_id id) { uint8_t atom_hpd_sel = 0; switch (id) { case HPD_SOURCEID1: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL; break; case HPD_SOURCEID2: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL; break; case HPD_SOURCEID3: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL; break; case HPD_SOURCEID4: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL; break; case HPD_SOURCEID5: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL; break; case HPD_SOURCEID6: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL; break; case HPD_SOURCEID_UNKNOWN: default: atom_hpd_sel = 0; break; } return atom_hpd_sel; } static uint8_t dig_encoder_sel_to_atom(enum engine_id id) { /* On any ASIC after DCE80, we manually program the DIG_FE * selection (see connect_dig_be_to_fe function of the link * encoder), so translation should always return 0 (no FE). */ return 0; } static bool clock_source_id_to_atom( enum clock_source_id id, uint32_t *atom_pll_id) { bool result = true; if (atom_pll_id != NULL) switch (id) { case CLOCK_SOURCE_COMBO_PHY_PLL0: *atom_pll_id = ATOM_COMBOPHY_PLL0; break; case CLOCK_SOURCE_COMBO_PHY_PLL1: *atom_pll_id = ATOM_COMBOPHY_PLL1; break; case CLOCK_SOURCE_COMBO_PHY_PLL2: *atom_pll_id = ATOM_COMBOPHY_PLL2; break; case CLOCK_SOURCE_COMBO_PHY_PLL3: *atom_pll_id = ATOM_COMBOPHY_PLL3; break; case CLOCK_SOURCE_COMBO_PHY_PLL4: *atom_pll_id = ATOM_COMBOPHY_PLL4; break; case CLOCK_SOURCE_COMBO_PHY_PLL5: *atom_pll_id = ATOM_COMBOPHY_PLL5; break; case CLOCK_SOURCE_COMBO_DISPLAY_PLL0: *atom_pll_id = ATOM_PPLL0; break; case CLOCK_SOURCE_ID_DFS: *atom_pll_id = ATOM_GCK_DFS; break; case CLOCK_SOURCE_ID_VCE: *atom_pll_id = ATOM_DP_DTO; break; case CLOCK_SOURCE_ID_DP_DTO: *atom_pll_id = ATOM_DP_DTO; break; case CLOCK_SOURCE_ID_UNDEFINED: /* Should not happen */ *atom_pll_id = ATOM_PPLL_INVALID; result = false; break; default: result = false; break; } return result; } static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id) { bool result = false; if (atom_engine_id != NULL) switch (id) { case ENGINE_ID_DIGA: *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID; result = true; break; case ENGINE_ID_DIGB: *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID; result = true; break; case ENGINE_ID_DIGC: *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID; result = true; break; case ENGINE_ID_DIGD: *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID; result = true; break; case ENGINE_ID_DIGE: *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID; result = true; break; case ENGINE_ID_DIGF: *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID; result = true; break; case ENGINE_ID_DIGG: *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID; result = true; break; case ENGINE_ID_DACA: *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID; result = true; break; default: break; } return result; } static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action) { uint8_t atom_action = 0; switch (action) { case ENCODER_CONTROL_ENABLE: atom_action = ATOM_ENABLE; break; case ENCODER_CONTROL_DISABLE: atom_action = ATOM_DISABLE; break; case ENCODER_CONTROL_SETUP: atom_action = ATOM_ENCODER_CMD_STREAM_SETUP; break; case ENCODER_CONTROL_INIT: atom_action = ATOM_ENCODER_INIT; break; default: BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */ break; } return atom_action; } static uint8_t disp_power_gating_action_to_atom( enum bp_pipe_control_action action) { uint8_t atom_pipe_action = 0; switch (action) { case ASIC_PIPE_DISABLE: atom_pipe_action = ATOM_DISABLE; break; case ASIC_PIPE_ENABLE: atom_pipe_action = ATOM_ENABLE; break; case ASIC_PIPE_INIT: atom_pipe_action = ATOM_INIT; break; default: ASSERT_CRITICAL(false); /* Unhandle action in driver! */ break; } return atom_pipe_action; } static bool dc_clock_type_to_atom( enum bp_dce_clock_type id, uint32_t *atom_clock_type) { bool retCode = true; if (atom_clock_type != NULL) { switch (id) { case DCECLOCK_TYPE_DISPLAY_CLOCK: *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK; break; case DCECLOCK_TYPE_DPREFCLK: *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK; break; default: ASSERT_CRITICAL(false); /* Unhandle action in driver! */ break; } } return retCode; } static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id) { uint8_t atomColorDepth = 0; switch (id) { case TRANSMITTER_COLOR_DEPTH_24: atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS; break; case TRANSMITTER_COLOR_DEPTH_30: atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4; break; case TRANSMITTER_COLOR_DEPTH_36: atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2; break; case TRANSMITTER_COLOR_DEPTH_48: atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1; break; default: ASSERT_CRITICAL(false); /* Unhandle action in driver! */ break; } return atomColorDepth; } /* function table */ static const struct command_table_helper command_table_helper_funcs = { .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom2, .encoder_action_to_atom = encoder_action_to_atom, .engine_bp_to_atom = engine_bp_to_atom, .clock_source_id_to_atom = clock_source_id_to_atom, .clock_source_id_to_atom_phy_clk_src_id = clock_source_id_to_atom_phy_clk_src_id, .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode, .hpd_sel_to_atom = hpd_sel_to_atom, .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom, .phy_id_to_atom = phy_id_to_atom, .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom, .clock_source_id_to_ref_clk_src = NULL, .transmitter_bp_to_atom = NULL, .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom2, .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom2, .dc_clock_type_to_atom = dc_clock_type_to_atom, .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom, }; /* * dal_cmd_tbl_helper_dce110_get_table * * @brief * Initialize command table helper functions * * @param * const struct command_table_helper **h - [out] struct of functions * */ const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void) { return &command_table_helper_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "atom.h" #include "include/bios_parser_types.h" #include "../command_table_helper.h" static uint8_t phy_id_to_atom(enum transmitter t) { uint8_t atom_phy_id; switch (t) { case TRANSMITTER_UNIPHY_A: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; case TRANSMITTER_UNIPHY_B: atom_phy_id = ATOM_PHY_ID_UNIPHYB; break; case TRANSMITTER_UNIPHY_C: atom_phy_id = ATOM_PHY_ID_UNIPHYC; break; case TRANSMITTER_UNIPHY_D: atom_phy_id = ATOM_PHY_ID_UNIPHYD; break; case TRANSMITTER_UNIPHY_E: atom_phy_id = ATOM_PHY_ID_UNIPHYE; break; case TRANSMITTER_UNIPHY_F: atom_phy_id = ATOM_PHY_ID_UNIPHYF; break; case TRANSMITTER_UNIPHY_G: atom_phy_id = ATOM_PHY_ID_UNIPHYG; break; default: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; } return atom_phy_id; } static uint8_t signal_type_to_atom_dig_mode(enum signal_type s) { uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP; switch (s) { case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_EDP: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP; break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI; break; case SIGNAL_TYPE_HDMI_TYPE_A: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI; break; case SIGNAL_TYPE_DISPLAY_PORT_MST: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST; break; default: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI; break; } return atom_dig_mode; } static uint8_t clock_source_id_to_atom_phy_clk_src_id( enum clock_source_id id) { uint8_t atom_phy_clk_src_id = 0; switch (id) { case CLOCK_SOURCE_ID_PLL0: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL; break; case CLOCK_SOURCE_ID_PLL1: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; case CLOCK_SOURCE_ID_PLL2: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL; break; case CLOCK_SOURCE_ID_EXTERNAL: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT; break; default: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; } return atom_phy_clk_src_id >> 2; } static uint8_t hpd_sel_to_atom(enum hpd_source_id id) { uint8_t atom_hpd_sel = 0; switch (id) { case HPD_SOURCEID1: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL; break; case HPD_SOURCEID2: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL; break; case HPD_SOURCEID3: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL; break; case HPD_SOURCEID4: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL; break; case HPD_SOURCEID5: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL; break; case HPD_SOURCEID6: atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL; break; case HPD_SOURCEID_UNKNOWN: default: atom_hpd_sel = 0; break; } return atom_hpd_sel; } static uint8_t dig_encoder_sel_to_atom(enum engine_id id) { /* On any ASIC after DCE80, we manually program the DIG_FE * selection (see connect_dig_be_to_fe function of the link * encoder), so translation should always return 0 (no FE). */ return 0; } static bool clock_source_id_to_atom( enum clock_source_id id, uint32_t *atom_pll_id) { bool result = true; if (atom_pll_id != NULL) switch (id) { case CLOCK_SOURCE_COMBO_PHY_PLL0: *atom_pll_id = ATOM_COMBOPHY_PLL0; break; case CLOCK_SOURCE_COMBO_PHY_PLL1: *atom_pll_id = ATOM_COMBOPHY_PLL1; break; case CLOCK_SOURCE_COMBO_PHY_PLL2: *atom_pll_id = ATOM_COMBOPHY_PLL2; break; case CLOCK_SOURCE_COMBO_PHY_PLL3: *atom_pll_id = ATOM_COMBOPHY_PLL3; break; case CLOCK_SOURCE_COMBO_PHY_PLL4: *atom_pll_id = ATOM_COMBOPHY_PLL4; break; case CLOCK_SOURCE_COMBO_PHY_PLL5: *atom_pll_id = ATOM_COMBOPHY_PLL5; break; case CLOCK_SOURCE_COMBO_DISPLAY_PLL0: *atom_pll_id = ATOM_PPLL0; break; case CLOCK_SOURCE_ID_DFS: *atom_pll_id = ATOM_GCK_DFS; break; case CLOCK_SOURCE_ID_VCE: *atom_pll_id = ATOM_DP_DTO; break; case CLOCK_SOURCE_ID_DP_DTO: *atom_pll_id = ATOM_DP_DTO; break; case CLOCK_SOURCE_ID_UNDEFINED: /* Should not happen */ *atom_pll_id = ATOM_PPLL_INVALID; result = false; break; default: result = false; break; } return result; } static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id) { bool result = false; if (atom_engine_id != NULL) switch (id) { case ENGINE_ID_DIGA: *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID; result = true; break; case ENGINE_ID_DIGB: *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID; result = true; break; case ENGINE_ID_DIGC: *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID; result = true; break; case ENGINE_ID_DIGD: *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID; result = true; break; case ENGINE_ID_DIGE: *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID; result = true; break; case ENGINE_ID_DIGF: *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID; result = true; break; case ENGINE_ID_DIGG: *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID; result = true; break; case ENGINE_ID_DACA: *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID; result = true; break; default: break; } return result; } static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action) { uint8_t atom_action = 0; switch (action) { case ENCODER_CONTROL_ENABLE: atom_action = ATOM_ENABLE; break; case ENCODER_CONTROL_DISABLE: atom_action = ATOM_DISABLE; break; case ENCODER_CONTROL_SETUP: atom_action = ATOM_ENCODER_CMD_STREAM_SETUP; break; case ENCODER_CONTROL_INIT: atom_action = ATOM_ENCODER_INIT; break; default: BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */ break; } return atom_action; } static uint8_t disp_power_gating_action_to_atom( enum bp_pipe_control_action action) { uint8_t atom_pipe_action = 0; switch (action) { case ASIC_PIPE_DISABLE: atom_pipe_action = ATOM_DISABLE; break; case ASIC_PIPE_ENABLE: atom_pipe_action = ATOM_ENABLE; break; case ASIC_PIPE_INIT: atom_pipe_action = ATOM_INIT; break; default: ASSERT_CRITICAL(false); /* Unhandle action in driver! */ break; } return atom_pipe_action; } static bool dc_clock_type_to_atom( enum bp_dce_clock_type id, uint32_t *atom_clock_type) { bool retCode = true; if (atom_clock_type != NULL) { switch (id) { case DCECLOCK_TYPE_DISPLAY_CLOCK: *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK; break; case DCECLOCK_TYPE_DPREFCLK: *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK; break; default: ASSERT_CRITICAL(false); /* Unhandle action in driver! */ break; } } return retCode; } static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id) { uint8_t atomColorDepth = 0; switch (id) { case TRANSMITTER_COLOR_DEPTH_24: atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS; break; case TRANSMITTER_COLOR_DEPTH_30: atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4; break; case TRANSMITTER_COLOR_DEPTH_36: atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2; break; case TRANSMITTER_COLOR_DEPTH_48: atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1; break; default: ASSERT_CRITICAL(false); /* Unhandle action in driver! */ break; } return atomColorDepth; } /* function table */ static const struct command_table_helper command_table_helper_funcs = { .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom, .encoder_action_to_atom = encoder_action_to_atom, .engine_bp_to_atom = engine_bp_to_atom, .clock_source_id_to_atom = clock_source_id_to_atom, .clock_source_id_to_atom_phy_clk_src_id = clock_source_id_to_atom_phy_clk_src_id, .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode, .hpd_sel_to_atom = hpd_sel_to_atom, .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom, .phy_id_to_atom = phy_id_to_atom, .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom, .assign_control_parameter = NULL, .clock_source_id_to_ref_clk_src = NULL, .transmitter_bp_to_atom = NULL, .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom, .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom, .dc_clock_type_to_atom = dc_clock_type_to_atom, .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom, }; /* * dal_cmd_tbl_helper_dce110_get_table * * @brief * Initialize command table helper functions * * @param * const struct command_table_helper **h - [out] struct of functions * */ const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void) { return &command_table_helper_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
/* * Copyright 2020 Mauro Rossi <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "atom.h" #include "include/grph_object_id.h" #include "include/grph_object_defs.h" #include "include/bios_parser_types.h" #include "../command_table_helper.h" static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action) { uint8_t atom_action = 0; switch (action) { case ENCODER_CONTROL_ENABLE: atom_action = ATOM_ENABLE; break; case ENCODER_CONTROL_DISABLE: atom_action = ATOM_DISABLE; break; case ENCODER_CONTROL_SETUP: atom_action = ATOM_ENCODER_CMD_SETUP; break; case ENCODER_CONTROL_INIT: atom_action = ATOM_ENCODER_INIT; break; default: BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */ break; } return atom_action; } static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id) { bool result = false; if (atom_engine_id != NULL) switch (id) { case ENGINE_ID_DIGA: *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID; result = true; break; case ENGINE_ID_DIGB: *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID; result = true; break; case ENGINE_ID_DIGC: *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID; result = true; break; case ENGINE_ID_DIGD: *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID; result = true; break; case ENGINE_ID_DIGE: *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID; result = true; break; case ENGINE_ID_DIGF: *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID; result = true; break; case ENGINE_ID_DIGG: *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID; result = true; break; case ENGINE_ID_DACA: *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID; result = true; break; default: break; } return result; } static bool clock_source_id_to_atom( enum clock_source_id id, uint32_t *atom_pll_id) { bool result = true; if (atom_pll_id != NULL) switch (id) { case CLOCK_SOURCE_ID_PLL0: *atom_pll_id = ATOM_PPLL0; break; case CLOCK_SOURCE_ID_PLL1: *atom_pll_id = ATOM_PPLL1; break; case CLOCK_SOURCE_ID_PLL2: *atom_pll_id = ATOM_PPLL2; break; case CLOCK_SOURCE_ID_EXTERNAL: *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_DFS: *atom_pll_id = ATOM_EXT_PLL1; break; case CLOCK_SOURCE_ID_VCE: /* for VCE encoding, * we need to pass in ATOM_PPLL_INVALID */ *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_DP_DTO: /* When programming DP DTO PLL ID should be invalid */ *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_UNDEFINED: BREAK_TO_DEBUGGER(); /* check when this will happen! */ *atom_pll_id = ATOM_PPLL_INVALID; result = false; break; default: result = false; break; } return result; } static uint8_t clock_source_id_to_atom_phy_clk_src_id( enum clock_source_id id) { uint8_t atom_phy_clk_src_id = 0; switch (id) { case CLOCK_SOURCE_ID_PLL0: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL; break; case CLOCK_SOURCE_ID_PLL1: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; case CLOCK_SOURCE_ID_PLL2: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL; break; case CLOCK_SOURCE_ID_EXTERNAL: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT; break; default: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; } return atom_phy_clk_src_id >> 2; } static uint8_t signal_type_to_atom_dig_mode(enum signal_type s) { uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP; switch (s) { case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_EDP: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP; break; case SIGNAL_TYPE_LVDS: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS; break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI; break; case SIGNAL_TYPE_HDMI_TYPE_A: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI; break; case SIGNAL_TYPE_DISPLAY_PORT_MST: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST; break; default: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI; break; } return atom_dig_mode; } static uint8_t hpd_sel_to_atom(enum hpd_source_id id) { uint8_t atom_hpd_sel = 0; switch (id) { case HPD_SOURCEID1: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL; break; case HPD_SOURCEID2: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL; break; case HPD_SOURCEID3: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL; break; case HPD_SOURCEID4: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL; break; case HPD_SOURCEID5: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL; break; case HPD_SOURCEID6: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL; break; case HPD_SOURCEID_UNKNOWN: default: atom_hpd_sel = 0; break; } return atom_hpd_sel >> 4; } static uint8_t dig_encoder_sel_to_atom(enum engine_id id) { uint8_t atom_dig_encoder_sel = 0; switch (id) { case ENGINE_ID_DIGA: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL; break; case ENGINE_ID_DIGB: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL; break; case ENGINE_ID_DIGC: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL; break; case ENGINE_ID_DIGD: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL; break; case ENGINE_ID_DIGE: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL; break; case ENGINE_ID_DIGF: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL; break; case ENGINE_ID_DIGG: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL; break; default: atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL; break; } return atom_dig_encoder_sel; } static uint8_t phy_id_to_atom(enum transmitter t) { uint8_t atom_phy_id; switch (t) { case TRANSMITTER_UNIPHY_A: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; case TRANSMITTER_UNIPHY_B: atom_phy_id = ATOM_PHY_ID_UNIPHYB; break; case TRANSMITTER_UNIPHY_C: atom_phy_id = ATOM_PHY_ID_UNIPHYC; break; case TRANSMITTER_UNIPHY_D: atom_phy_id = ATOM_PHY_ID_UNIPHYD; break; case TRANSMITTER_UNIPHY_E: atom_phy_id = ATOM_PHY_ID_UNIPHYE; break; case TRANSMITTER_UNIPHY_F: atom_phy_id = ATOM_PHY_ID_UNIPHYF; break; case TRANSMITTER_UNIPHY_G: atom_phy_id = ATOM_PHY_ID_UNIPHYG; break; default: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; } return atom_phy_id; } static uint8_t disp_power_gating_action_to_atom( enum bp_pipe_control_action action) { uint8_t atom_pipe_action = 0; switch (action) { case ASIC_PIPE_DISABLE: atom_pipe_action = ATOM_DISABLE; break; case ASIC_PIPE_ENABLE: atom_pipe_action = ATOM_ENABLE; break; case ASIC_PIPE_INIT: atom_pipe_action = ATOM_INIT; break; default: BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */ break; } return atom_pipe_action; } static const struct command_table_helper command_table_helper_funcs = { .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom, .encoder_action_to_atom = encoder_action_to_atom, .engine_bp_to_atom = engine_bp_to_atom, .clock_source_id_to_atom = clock_source_id_to_atom, .clock_source_id_to_atom_phy_clk_src_id = clock_source_id_to_atom_phy_clk_src_id, .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode, .hpd_sel_to_atom = hpd_sel_to_atom, .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom, .phy_id_to_atom = phy_id_to_atom, .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom, .assign_control_parameter = dal_cmd_table_helper_assign_control_parameter, .clock_source_id_to_ref_clk_src = dal_cmd_table_helper_clock_source_id_to_ref_clk_src, .transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom, .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom, .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom, }; const struct command_table_helper *dal_cmd_tbl_helper_dce60_get_table(void) { return &command_table_helper_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "atom.h" #include "include/bios_parser_types.h" #include "../command_table_helper.h" static uint8_t phy_id_to_atom(enum transmitter t) { uint8_t atom_phy_id; switch (t) { case TRANSMITTER_UNIPHY_A: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; case TRANSMITTER_UNIPHY_B: atom_phy_id = ATOM_PHY_ID_UNIPHYB; break; case TRANSMITTER_UNIPHY_C: atom_phy_id = ATOM_PHY_ID_UNIPHYC; break; case TRANSMITTER_UNIPHY_D: atom_phy_id = ATOM_PHY_ID_UNIPHYD; break; case TRANSMITTER_UNIPHY_E: atom_phy_id = ATOM_PHY_ID_UNIPHYE; break; case TRANSMITTER_UNIPHY_F: atom_phy_id = ATOM_PHY_ID_UNIPHYF; break; case TRANSMITTER_UNIPHY_G: atom_phy_id = ATOM_PHY_ID_UNIPHYG; break; default: atom_phy_id = ATOM_PHY_ID_UNIPHYA; break; } return atom_phy_id; } static uint8_t signal_type_to_atom_dig_mode(enum signal_type s) { uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP; switch (s) { case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_EDP: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP; break; case SIGNAL_TYPE_LVDS: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS; break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI; break; case SIGNAL_TYPE_HDMI_TYPE_A: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI; break; case SIGNAL_TYPE_DISPLAY_PORT_MST: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST; break; default: atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI; break; } return atom_dig_mode; } static uint8_t clock_source_id_to_atom_phy_clk_src_id( enum clock_source_id id) { uint8_t atom_phy_clk_src_id = 0; switch (id) { case CLOCK_SOURCE_ID_PLL0: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL; break; case CLOCK_SOURCE_ID_PLL1: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; case CLOCK_SOURCE_ID_PLL2: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL; break; case CLOCK_SOURCE_ID_EXTERNAL: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT; break; default: atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; break; } return atom_phy_clk_src_id >> 2; } static uint8_t hpd_sel_to_atom(enum hpd_source_id id) { uint8_t atom_hpd_sel = 0; switch (id) { case HPD_SOURCEID1: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL; break; case HPD_SOURCEID2: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL; break; case HPD_SOURCEID3: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL; break; case HPD_SOURCEID4: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL; break; case HPD_SOURCEID5: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL; break; case HPD_SOURCEID6: atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL; break; case HPD_SOURCEID_UNKNOWN: default: atom_hpd_sel = 0; break; } return atom_hpd_sel >> 4; } static uint8_t dig_encoder_sel_to_atom(enum engine_id id) { /* On any ASIC after DCE80, we manually program the DIG_FE * selection (see connect_dig_be_to_fe function of the link * encoder), so translation should always return 0 (no FE). */ return 0; } static bool clock_source_id_to_atom( enum clock_source_id id, uint32_t *atom_pll_id) { bool result = true; if (atom_pll_id != NULL) switch (id) { case CLOCK_SOURCE_ID_PLL0: *atom_pll_id = ATOM_PPLL0; break; case CLOCK_SOURCE_ID_PLL1: *atom_pll_id = ATOM_PPLL1; break; case CLOCK_SOURCE_ID_PLL2: *atom_pll_id = ATOM_PPLL2; break; case CLOCK_SOURCE_ID_EXTERNAL: *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_DFS: *atom_pll_id = ATOM_EXT_PLL1; break; case CLOCK_SOURCE_ID_VCE: /* for VCE encoding, * we need to pass in ATOM_PPLL_INVALID */ *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_DP_DTO: /* When programming DP DTO PLL ID should be invalid */ *atom_pll_id = ATOM_PPLL_INVALID; break; case CLOCK_SOURCE_ID_UNDEFINED: /* Should not happen */ *atom_pll_id = ATOM_PPLL_INVALID; result = false; break; default: result = false; break; } return result; } static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id) { bool result = false; if (atom_engine_id != NULL) switch (id) { case ENGINE_ID_DIGA: *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID; result = true; break; case ENGINE_ID_DIGB: *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID; result = true; break; case ENGINE_ID_DIGC: *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID; result = true; break; case ENGINE_ID_DIGD: *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID; result = true; break; case ENGINE_ID_DIGE: *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID; result = true; break; case ENGINE_ID_DIGF: *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID; result = true; break; case ENGINE_ID_DIGG: *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID; result = true; break; case ENGINE_ID_DACA: *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID; result = true; break; default: break; } return result; } static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action) { uint8_t atom_action = 0; switch (action) { case ENCODER_CONTROL_ENABLE: atom_action = ATOM_ENABLE; break; case ENCODER_CONTROL_DISABLE: atom_action = ATOM_DISABLE; break; case ENCODER_CONTROL_SETUP: atom_action = ATOM_ENCODER_CMD_SETUP; break; case ENCODER_CONTROL_INIT: atom_action = ATOM_ENCODER_INIT; break; default: BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */ break; } return atom_action; } static uint8_t disp_power_gating_action_to_atom( enum bp_pipe_control_action action) { uint8_t atom_pipe_action = 0; switch (action) { case ASIC_PIPE_DISABLE: atom_pipe_action = ATOM_DISABLE; break; case ASIC_PIPE_ENABLE: atom_pipe_action = ATOM_ENABLE; break; case ASIC_PIPE_INIT: atom_pipe_action = ATOM_INIT; break; default: ASSERT_CRITICAL(false); /* Unhandle action in driver! */ break; } return atom_pipe_action; } /* function table */ static const struct command_table_helper command_table_helper_funcs = { .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom, .encoder_action_to_atom = encoder_action_to_atom, .engine_bp_to_atom = engine_bp_to_atom, .clock_source_id_to_atom = clock_source_id_to_atom, .clock_source_id_to_atom_phy_clk_src_id = clock_source_id_to_atom_phy_clk_src_id, .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode, .hpd_sel_to_atom = hpd_sel_to_atom, .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom, .phy_id_to_atom = phy_id_to_atom, .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom, .assign_control_parameter = NULL, .clock_source_id_to_ref_clk_src = NULL, .transmitter_bp_to_atom = NULL, .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom, .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom, }; /* * dal_cmd_tbl_helper_dce110_get_table * * @brief * Initialize command table helper functions * * @param * const struct command_table_helper **h - [out] struct of functions * */ const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void) { return &command_table_helper_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/delay.h> #include "dc_bios_types.h" #include "dcn20_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" #include "link.h" #include "dpcd_defs.h" #define DC_LOGGER \ enc1->base.ctx->logger #define REG(reg)\ (enc1->regs->reg) #undef FN #define FN(reg_name, field_name) \ enc1->se_shift->field_name, enc1->se_mask->field_name #define CTX \ enc1->base.ctx static void enc2_update_hdmi_info_packet( struct dcn10_stream_encoder *enc1, uint32_t packet_index, const struct dc_info_packet *info_packet) { uint32_t cont, send, line; if (info_packet->valid) { enc1_update_generic_info_packet( enc1, packet_index, info_packet); /* enable transmission of packet(s) - * packet transmission begins on the next frame */ cont = 1; /* send packet(s) every frame */ send = 1; /* select line number to send packets on */ line = 2; } else { cont = 0; send = 0; line = 0; } /* DP_SEC_GSP[x]_LINE_REFERENCE - keep default value REFER_TO_DP_SOF */ /* choose which generic packet control to use */ switch (packet_index) { case 0: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_LINE, line); break; case 1: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_LINE, line); break; case 2: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_CONT, cont, HDMI_GENERIC2_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC2_LINE, line); break; case 3: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_CONT, cont, HDMI_GENERIC3_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC3_LINE, line); break; case 4: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_CONT, cont, HDMI_GENERIC4_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC4_LINE, line); break; case 5: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_CONT, cont, HDMI_GENERIC5_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC5_LINE, line); break; case 6: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_CONT, cont, HDMI_GENERIC6_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC6_LINE, line); break; case 7: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_CONT, cont, HDMI_GENERIC7_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC7_LINE, line); break; default: /* invalid HW packet index */ DC_LOG_WARNING( "Invalid HW packet index: %s()\n", __func__); return; } } static void enc2_stream_encoder_update_hdmi_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); /* for bring up, disable dp double TODO */ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1); /*Always add mandatory packets first followed by optional ones*/ enc2_update_hdmi_info_packet(enc1, 0, &info_frame->avi); enc2_update_hdmi_info_packet(enc1, 1, &info_frame->hfvsif); enc2_update_hdmi_info_packet(enc1, 2, &info_frame->gamut); enc2_update_hdmi_info_packet(enc1, 3, &info_frame->vendor); enc2_update_hdmi_info_packet(enc1, 4, &info_frame->spd); enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hdrsmd); enc2_update_hdmi_info_packet(enc1, 6, &info_frame->vtem); } static void enc2_stream_encoder_stop_hdmi_info_packets( struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); /* stop generic packets 0,1 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC0_CONT, 0, HDMI_GENERIC0_SEND, 0, HDMI_GENERIC1_CONT, 0, HDMI_GENERIC1_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL1, 0, HDMI_GENERIC0_LINE, 0, HDMI_GENERIC1_LINE, 0); /* stop generic packets 2,3 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC2_CONT, 0, HDMI_GENERIC2_SEND, 0, HDMI_GENERIC3_CONT, 0, HDMI_GENERIC3_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL2, 0, HDMI_GENERIC2_LINE, 0, HDMI_GENERIC3_LINE, 0); /* stop generic packets 4,5 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC4_CONT, 0, HDMI_GENERIC4_SEND, 0, HDMI_GENERIC5_CONT, 0, HDMI_GENERIC5_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL3, 0, HDMI_GENERIC4_LINE, 0, HDMI_GENERIC5_LINE, 0); /* stop generic packets 6,7 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC6_CONT, 0, HDMI_GENERIC6_SEND, 0, HDMI_GENERIC7_CONT, 0, HDMI_GENERIC7_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL4, 0, HDMI_GENERIC6_LINE, 0, HDMI_GENERIC7_LINE, 0); } /* Update GSP7 SDP 128 byte long */ static void enc2_update_gsp7_128_info_packet( struct dcn10_stream_encoder *enc1, const struct dc_info_packet_128 *info_packet, bool immediate_update) { uint32_t i; /* TODOFPGA Figure out a proper number for max_retries polling for lock * use 50 for now. */ uint32_t max_retries = 50; const uint32_t *content = (const uint32_t *) &info_packet->sb[0]; ASSERT(info_packet->hb1 == DC_DP_INFOFRAME_TYPE_PPS); /* Configure for PPS packet size (128 bytes) */ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 1); /* We need turn on clock before programming AFMT block*/ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); /* Poll dig_update_lock is not locked -> asic internal signal * assumes otg master lock will unlock it */ /*REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, 0, 10, max_retries);*/ /* Wait for HW/SW GSP memory access conflict to go away */ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, 0, 10, max_retries); /* Clear HW/SW memory access conflict flag */ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1); /* write generic packet header */ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, 7); REG_SET_4(AFMT_GENERIC_HDR, 0, AFMT_GENERIC_HB0, info_packet->hb0, AFMT_GENERIC_HB1, info_packet->hb1, AFMT_GENERIC_HB2, info_packet->hb2, AFMT_GENERIC_HB3, info_packet->hb3); /* Write generic packet content 128 bytes long. Four sets are used (indexes 7 * through 10) to fit 128 bytes. */ for (i = 0; i < 4; i++) { uint32_t packet_index = 7 + i; REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, packet_index); REG_WRITE(AFMT_GENERIC_0, *content++); REG_WRITE(AFMT_GENERIC_1, *content++); REG_WRITE(AFMT_GENERIC_2, *content++); REG_WRITE(AFMT_GENERIC_3, *content++); REG_WRITE(AFMT_GENERIC_4, *content++); REG_WRITE(AFMT_GENERIC_5, *content++); REG_WRITE(AFMT_GENERIC_6, *content++); REG_WRITE(AFMT_GENERIC_7, *content++); } REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, !immediate_update, AFMT_GENERIC7_IMMEDIATE_UPDATE, immediate_update); } /* Set DSC-related configuration. * dsc_mode: 0 disables DSC, other values enable DSC in specified format * sc_bytes_per_pixel: Bytes per pixel in u3.28 format * dsc_slice_width: Slice width in pixels */ static void enc2_dp_set_dsc_config(struct stream_encoder *enc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE_2(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode, DP_DSC_SLICE_WIDTH, dsc_slice_width); REG_SET(DP_DSC_BYTES_PER_PIXEL, 0, DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel); } static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps, bool immediate_update) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (enable) { struct dc_info_packet_128 pps_sdp; ASSERT(dsc_packed_pps); /* Load PPS into infoframe (SDP) registers */ pps_sdp.valid = true; pps_sdp.hb0 = 0; pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS; pps_sdp.hb2 = 127; pps_sdp.hb3 = 0; memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb)); enc2_update_gsp7_128_info_packet(enc1, &pps_sdp, immediate_update); /* Enable Generic Stream Packet 7 (GSP) transmission */ //REG_UPDATE(DP_SEC_CNTL, // DP_SEC_GSP7_ENABLE, 1); /* SW should make sure VBID[6] update line number is bigger * than PPS transmit line number */ REG_UPDATE(DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, 2); REG_UPDATE_2(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, 0, DP_VBID6_LINE_NUM, 3); /* Send PPS data at the line number specified above. * DP spec requires PPS to be sent only when it changes, however since * decoder has to be able to handle its change on every frame, we're * sending it always (i.e. on every frame) to reduce the chance it'd be * missed by decoder. If it turns out required to send PPS only when it * changes, we can use DP_SEC_GSP7_SEND register. */ REG_UPDATE_2(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, 1, DP_SEC_STREAM_ENABLE, 1); } else { /* Disable Generic Stream Packet 7 (GSP) transmission */ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, 0); REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 0); } } /* this function read dsc related register fields to be logged later in dcn10_log_hw_state * into a dcn_dsc_state struct. */ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); //if dsc is enabled, continue to read REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode); if (s->dsc_mode) { REG_GET(DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, &s->dsc_slice_width); REG_GET(DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, &s->sec_gsp_pps_line_num); REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference); REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num); REG_GET(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, &s->sec_gsp_pps_enable); REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable); } } /* Set Dynamic Metadata-configuration. * enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME * hubp_requestor_id: HUBP physical instance that is the source of dynamic metadata * only needs to be set when enable_dme is TRUE * dmdata_mode: dynamic metadata packet type: DP, HDMI, or Dolby Vision * * Ensure the OTG master update lock is set when changing DME configuration. */ void enc2_set_dynamic_metadata(struct stream_encoder *enc, bool enable_dme, uint32_t hubp_requestor_id, enum dynamic_metadata_mode dmdata_mode) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (enable_dme) { REG_UPDATE_2(DME_CONTROL, METADATA_HUBP_REQUESTOR_ID, hubp_requestor_id, METADATA_STREAM_TYPE, (dmdata_mode == dmdata_dolby_vision) ? 1 : 0); /* Use default line reference DP_SOF for bringup. * Should use OTG_SOF for DRR cases */ if (dmdata_mode == dmdata_dp) REG_UPDATE_3(DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, 1, DP_SEC_METADATA_PACKET_LINE_REFERENCE, 0, DP_SEC_METADATA_PACKET_LINE, 20); else { REG_UPDATE_3(HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_ENABLE, 1, HDMI_METADATA_PACKET_LINE_REFERENCE, 0, HDMI_METADATA_PACKET_LINE, 2); if (dmdata_mode == dmdata_dolby_vision) REG_UPDATE(DIG_FE_CNTL, DOLBY_VISION_EN, 1); } REG_UPDATE(DME_CONTROL, METADATA_ENGINE_EN, 1); } else { REG_UPDATE(DME_CONTROL, METADATA_ENGINE_EN, 0); if (dmdata_mode == dmdata_dp) REG_UPDATE(DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, 0); else { REG_UPDATE(HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_ENABLE, 0); REG_UPDATE(DIG_FE_CNTL, DOLBY_VISION_EN, 0); } } } static void enc2_stream_encoder_update_dp_info_packets_sdp_line_num( struct stream_encoder *enc, struct encoder_info_frame *info_frame) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (info_frame->adaptive_sync.valid == true && info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1); REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, info_frame->sdp_line_num.adaptive_sync_line_num); } } static void enc2_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); uint32_t dmdata_packet_enabled = 0; enc1_stream_encoder_update_dp_info_packets(enc, info_frame); /* check if dynamic metadata packet transmission is enabled */ REG_GET(DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, &dmdata_packet_enabled); if (dmdata_packet_enabled) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !timing->dsc_cfg.ycbcr422_simple); return two_pix; } void enc2_stream_encoder_dp_unblank( struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) { uint32_t n_vid = 0x8000; uint32_t m_vid; uint32_t n_multiply = 0; uint64_t m_vid_l = n_vid; /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */ if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1) { /*this logic should be the same in get_pixel_clock_parameters() */ n_multiply = 1; } /* M / N = Fstream / Flink * m_vid / n_vid = pixel rate / link rate */ m_vid_l *= param->timing.pix_clk_100hz / 10; m_vid_l = div_u64(m_vid_l, param->link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ); m_vid = (uint32_t) m_vid_l; /* enable auto measurement */ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0); /* auto measurement need 1 full 0x8000 symbol cycle to kick in, * therefore program initial value for Mvid and Nvid */ REG_UPDATE(DP_VID_N, DP_VID_N, n_vid); REG_UPDATE(DP_VID_M, DP_VID_M, m_vid); REG_UPDATE_2(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1, DP_VID_N_MUL, n_multiply); } /* make sure stream is disabled before resetting steer fifo */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); /* set DIG_START to 0x1 to reset FIFO */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); udelay(1); /* write 0 to take the FIFO out of reset */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen * that it overflows during mode transition, and sometimes doesn't recover. */ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); udelay(10); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); /* wait 100us for DIG/DP logic to prime * (i.e. a few video lines) */ udelay(100); /* the hardware would start sending video at the start of the next DP * frame (i.e. rising edge of the vblank). * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this * register has no effect on enable transition! HW always guarantees * VID_STREAM enable at start of next frame, and this is not * programmable */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } static void enc2_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, odm_combine); } void enc2_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, use_vsc_sdp_for_colorimetry, enable_sdp_splitting); REG_UPDATE(DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, enable_sdp_splitting); } uint32_t enc2_get_fifo_cal_average_level( struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); uint32_t fifo_level; REG_GET(DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, &fifo_level); return fifo_level; } static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .dp_set_odm_combine = enc2_dp_set_odm_combine, .dp_set_stream_attribute = enc2_stream_encoder_dp_set_stream_attribute, .hdmi_set_stream_attribute = enc1_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = enc1_stream_encoder_dvi_set_stream_attribute, .set_throttled_vcp_size = enc1_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = enc2_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = enc2_stream_encoder_stop_hdmi_info_packets, .update_dp_info_packets_sdp_line_num = enc2_stream_encoder_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = enc2_stream_encoder_update_dp_info_packets, .send_immediate_sdp_message = enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = enc2_stream_encoder_dp_unblank, .audio_mute_control = enc1_se_audio_mute_control, .dp_audio_setup = enc1_se_dp_audio_setup, .dp_audio_enable = enc1_se_dp_audio_enable, .dp_audio_disable = enc1_se_dp_audio_disable, .hdmi_audio_setup = enc1_se_hdmi_audio_setup, .hdmi_audio_disable = enc1_se_hdmi_audio_disable, .setup_stereo_sync = enc1_setup_stereo_sync, .set_avmute = enc1_stream_encoder_set_avmute, .dig_connect_to_otg = enc1_dig_connect_to_otg, .dig_source_otg = enc1_dig_source_otg, .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, .enc_read_state = enc2_read_state, .dp_set_dsc_config = enc2_dp_set_dsc_config, .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet, .set_dynamic_metadata = enc2_set_dynamic_metadata, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level, }; void dcn20_stream_encoder_construct( struct dcn10_stream_encoder *enc1, struct dc_context *ctx, struct dc_bios *bp, enum engine_id eng_id, const struct dcn10_stream_enc_registers *regs, const struct dcn10_stream_encoder_shift *se_shift, const struct dcn10_stream_encoder_mask *se_mask) { enc1->base.funcs = &dcn20_str_enc_funcs; enc1->base.ctx = ctx; enc1->base.id = eng_id; enc1->base.bp = bp; enc1->regs = regs; enc1->se_shift = se_shift; enc1->se_mask = se_mask; enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dcn20_opp.h" #include "reg_helper.h" #define REG(reg) \ (oppn20->regs->reg) #undef FN #define FN(reg_name, field_name) \ oppn20->opp_shift->field_name, oppn20->opp_mask->field_name #define CTX \ oppn20->base.ctx void opp2_set_disp_pattern_generator( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, int height, int offset) { struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); enum test_pattern_color_format bit_depth; enum test_pattern_dyn_range dyn_range; enum test_pattern_mode mode; /* color ramp generator mixes 16-bits color */ uint32_t src_bpc = 16; /* requested bpc */ uint32_t dst_bpc; uint32_t index; /* RGB values of the color bars. * Produce two RGB colors: RGB0 - white (all Fs) * and RGB1 - black (all 0s) * (three RGB components for two colors) */ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000}; /* dest color (converted to the specified color format) */ uint16_t dst_color[6]; uint32_t inc_base; /* translate to bit depth */ switch (color_depth) { case COLOR_DEPTH_666: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6; break; case COLOR_DEPTH_888: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; break; case COLOR_DEPTH_101010: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10; break; case COLOR_DEPTH_121212: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12; break; default: bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; break; } /* set DPG dimentions */ REG_SET_2(DPG_DIMENSIONS, 0, DPG_ACTIVE_WIDTH, width, DPG_ACTIVE_HEIGHT, height); /* set DPG offset */ REG_SET_2(DPG_OFFSET_SEGMENT, 0, DPG_X_OFFSET, offset, DPG_SEGMENT_WIDTH, 0); switch (test_pattern) { case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: { dyn_range = (test_pattern == CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ? TEST_PATTERN_DYN_RANGE_CEA : TEST_PATTERN_DYN_RANGE_VESA); switch (color_space) { case CONTROLLER_DP_COLOR_SPACE_YCBCR601: mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601; break; case CONTROLLER_DP_COLOR_SPACE_YCBCR709: mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709; break; case CONTROLLER_DP_COLOR_SPACE_RGB: default: mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; break; } REG_UPDATE_6(DPG_CONTROL, DPG_EN, 1, DPG_MODE, mode, DPG_DYNAMIC_RANGE, dyn_range, DPG_BIT_DEPTH, bit_depth, DPG_VRES, 6, DPG_HRES, 6); } break; case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS: case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS: { mode = (test_pattern == CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ? TEST_PATTERN_MODE_VERTICALBARS : TEST_PATTERN_MODE_HORIZONTALBARS); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: dst_bpc = 6; break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: dst_bpc = 8; break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: dst_bpc = 10; break; default: dst_bpc = 8; break; } /* adjust color to the required colorFormat */ for (index = 0; index < 6; index++) { /* dst = 2^dstBpc * src / 2^srcBpc = src >> * (srcBpc - dstBpc); */ dst_color[index] = src_color[index] >> (src_bpc - dst_bpc); /* DPG_COLOUR registers are 16-bit MSB aligned value with bits 3:0 hardwired to ZERO. * XXXXXXXXXX000000 for 10 bit, * XXXXXXXX00000000 for 8 bit, * XXXXXX0000000000 for 6 bits */ dst_color[index] <<= (16 - dst_bpc); } REG_SET_2(DPG_COLOUR_R_CR, 0, DPG_COLOUR1_R_CR, dst_color[0], DPG_COLOUR0_R_CR, dst_color[3]); REG_SET_2(DPG_COLOUR_G_Y, 0, DPG_COLOUR1_G_Y, dst_color[1], DPG_COLOUR0_G_Y, dst_color[4]); REG_SET_2(DPG_COLOUR_B_CB, 0, DPG_COLOUR1_B_CB, dst_color[2], DPG_COLOUR0_B_CB, dst_color[5]); /* enable test pattern */ REG_UPDATE_6(DPG_CONTROL, DPG_EN, 1, DPG_MODE, mode, DPG_DYNAMIC_RANGE, 0, DPG_BIT_DEPTH, bit_depth, DPG_VRES, 0, DPG_HRES, 0); } break; case CONTROLLER_DP_TEST_PATTERN_COLORRAMP: { mode = (bit_depth == TEST_PATTERN_COLOR_FORMAT_BPC_10 ? TEST_PATTERN_MODE_DUALRAMP_RGB : TEST_PATTERN_MODE_SINGLERAMP_RGB); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: dst_bpc = 6; break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: dst_bpc = 8; break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: dst_bpc = 10; break; default: dst_bpc = 8; break; } /* increment for the first ramp for one color gradation * 1 gradation for 6-bit color is 2^10 * gradations in 16-bit color */ inc_base = (src_bpc - dst_bpc); switch (bit_depth) { case TEST_PATTERN_COLOR_FORMAT_BPC_6: { REG_SET_3(DPG_RAMP_CONTROL, 0, DPG_RAMP0_OFFSET, 0, DPG_INC0, inc_base, DPG_INC1, 0); REG_UPDATE_2(DPG_CONTROL, DPG_VRES, 6, DPG_HRES, 6); } break; case TEST_PATTERN_COLOR_FORMAT_BPC_8: { REG_SET_3(DPG_RAMP_CONTROL, 0, DPG_RAMP0_OFFSET, 0, DPG_INC0, inc_base, DPG_INC1, 0); REG_UPDATE_2(DPG_CONTROL, DPG_VRES, 6, DPG_HRES, 8); } break; case TEST_PATTERN_COLOR_FORMAT_BPC_10: { REG_SET_3(DPG_RAMP_CONTROL, 0, DPG_RAMP0_OFFSET, 384 << 6, DPG_INC0, inc_base, DPG_INC1, inc_base + 2); REG_UPDATE_2(DPG_CONTROL, DPG_VRES, 5, DPG_HRES, 8); } break; default: break; } /* enable test pattern */ REG_UPDATE_4(DPG_CONTROL, DPG_EN, 1, DPG_MODE, mode, DPG_DYNAMIC_RANGE, 0, DPG_BIT_DEPTH, bit_depth); } break; case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE: { REG_WRITE(DPG_CONTROL, 0); REG_WRITE(DPG_COLOUR_R_CR, 0); REG_WRITE(DPG_COLOUR_G_Y, 0); REG_WRITE(DPG_COLOUR_B_CB, 0); REG_WRITE(DPG_RAMP_CONTROL, 0); } break; case CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR: { opp2_dpg_set_blank_color(opp, solid_color); REG_UPDATE_2(DPG_CONTROL, DPG_EN, 1, DPG_MODE, TEST_PATTERN_MODE_HORIZONTALBARS); REG_SET_2(DPG_DIMENSIONS, 0, DPG_ACTIVE_WIDTH, width, DPG_ACTIVE_HEIGHT, height); } break; default: break; } } void opp2_program_dpg_dimensions( struct output_pixel_processor *opp, int width, int height) { struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); REG_SET_2(DPG_DIMENSIONS, 0, DPG_ACTIVE_WIDTH, width, DPG_ACTIVE_HEIGHT, height); } void opp2_dpg_set_blank_color( struct output_pixel_processor *opp, const struct tg_color *color) { struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); /* 16-bit MSB aligned value. Bits 3:0 of this field are hardwired to ZERO */ ASSERT(color); REG_SET_2(DPG_COLOUR_B_CB, 0, DPG_COLOUR1_B_CB, color->color_b_cb << 6, DPG_COLOUR0_B_CB, color->color_b_cb << 6); REG_SET_2(DPG_COLOUR_G_Y, 0, DPG_COLOUR1_G_Y, color->color_g_y << 6, DPG_COLOUR0_G_Y, color->color_g_y << 6); REG_SET_2(DPG_COLOUR_R_CR, 0, DPG_COLOUR1_R_CR, color->color_r_cr << 6, DPG_COLOUR0_R_CR, color->color_r_cr << 6); } bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) { struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); uint32_t dpg_en, dpg_mode; uint32_t double_buffer_pending; REG_GET_2(DPG_CONTROL, DPG_EN, &dpg_en, DPG_MODE, &dpg_mode); REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); return (dpg_en == 1) && (double_buffer_pending == 0); } void opp2_program_left_edge_extra_pixel ( struct output_pixel_processor *opp, bool count) { struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); /* Specifies the number of extra left edge pixels that are supplied to * the 422 horizontal chroma sub-sample filter. * Note that when left edge pixel is not "0", fmt pixel encoding can be in either 420 or 422 mode * */ REG_UPDATE(FMT_422_CONTROL, FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT, count); } /*****************************************/ /* Constructor, Destructor */ /*****************************************/ static struct opp_funcs dcn20_opp_funcs = { .opp_set_dyn_expansion = opp1_set_dyn_expansion, .opp_program_fmt = opp1_program_fmt, .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, .opp_program_stereo = opp1_program_stereo, .opp_pipe_clock_control = opp1_pipe_clock_control, .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, }; void dcn20_opp_construct(struct dcn20_opp *oppn20, struct dc_context *ctx, uint32_t inst, const struct dcn20_opp_registers *regs, const struct dcn20_opp_shift *opp_shift, const struct dcn20_opp_mask *opp_mask) { oppn20->base.ctx = ctx; oppn20->base.inst = inst; oppn20->base.funcs = &dcn20_opp_funcs; oppn20->regs = regs; oppn20->opp_shift = opp_shift; oppn20->opp_mask = opp_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
/* * Copyright 2012-17 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "fixed31_32.h" #include "resource.h" #include "dwb.h" #include "dcn20_dwb.h" #define NUM_PHASES 16 #define HORZ_MAX_TAPS 12 #define VERT_MAX_TAPS 12 #define REG(reg)\ dwbc20->dwbc_regs->reg #define CTX \ dwbc20->base.ctx #undef FN #define FN(reg_name, field_name) \ dwbc20->dwbc_shift->field_name, dwbc20->dwbc_mask->field_name #define TO_DCN20_DWBC(dwbc_base) \ container_of(dwbc_base, struct dcn20_dwbc, base) static const uint16_t filter_3tap_16p_upscale[27] = { 2048, 2048, 0, 1708, 2424, 16348, 1372, 2796, 16308, 1056, 3148, 16272, 768, 3464, 16244, 512, 3728, 16236, 296, 3928, 16252, 124, 4052, 16296, 0, 4096, 0 }; static const uint16_t filter_3tap_16p_117[27] = { 2048, 2048, 0, 1824, 2276, 16376, 1600, 2496, 16380, 1376, 2700, 16, 1156, 2880, 52, 948, 3032, 108, 756, 3144, 192, 580, 3212, 296, 428, 3236, 428 }; static const uint16_t filter_3tap_16p_150[27] = { 2048, 2048, 0, 1872, 2184, 36, 1692, 2308, 88, 1516, 2420, 156, 1340, 2516, 236, 1168, 2592, 328, 1004, 2648, 440, 844, 2684, 560, 696, 2696, 696 }; static const uint16_t filter_3tap_16p_183[27] = { 2048, 2048, 0, 1892, 2104, 92, 1744, 2152, 196, 1592, 2196, 300, 1448, 2232, 412, 1304, 2256, 528, 1168, 2276, 648, 1032, 2288, 772, 900, 2292, 900 }; static const uint16_t filter_4tap_16p_upscale[36] = { 0, 4096, 0, 0, 16240, 4056, 180, 16380, 16136, 3952, 404, 16364, 16072, 3780, 664, 16344, 16040, 3556, 952, 16312, 16036, 3284, 1268, 16272, 16052, 2980, 1604, 16224, 16084, 2648, 1952, 16176, 16128, 2304, 2304, 16128 }; static const uint16_t filter_4tap_16p_117[36] = { 428, 3236, 428, 0, 276, 3232, 604, 16364, 148, 3184, 800, 16340, 44, 3104, 1016, 16312, 16344, 2984, 1244, 16284, 16284, 2832, 1488, 16256, 16244, 2648, 1732, 16236, 16220, 2440, 1976, 16220, 16212, 2216, 2216, 16212 }; static const uint16_t filter_4tap_16p_150[36] = { 696, 2700, 696, 0, 560, 2700, 848, 16364, 436, 2676, 1008, 16348, 328, 2628, 1180, 16336, 232, 2556, 1356, 16328, 152, 2460, 1536, 16328, 84, 2344, 1716, 16332, 28, 2208, 1888, 16348, 16376, 2052, 2052, 16376 }; static const uint16_t filter_4tap_16p_183[36] = { 940, 2208, 940, 0, 832, 2200, 1052, 4, 728, 2180, 1164, 16, 628, 2148, 1280, 36, 536, 2100, 1392, 60, 448, 2044, 1504, 92, 368, 1976, 1612, 132, 296, 1900, 1716, 176, 232, 1812, 1812, 232 }; static const uint16_t filter_5tap_16p_upscale[45] = { 15936, 2496, 2496, 15936, 0, 15992, 2128, 2832, 15896, 12, 16056, 1760, 3140, 15876, 24, 16120, 1404, 3420, 15876, 36, 16188, 1060, 3652, 15908, 44, 16248, 744, 3844, 15972, 44, 16304, 460, 3980, 16072, 40, 16348, 212, 4064, 16208, 24, 0, 0, 4096, 0, 0, }; static const uint16_t filter_5tap_16p_117[45] = { 16056, 2372, 2372, 16056, 0, 16052, 2124, 2600, 16076, 0, 16060, 1868, 2808, 16120, 0, 16080, 1612, 2992, 16180, 16376, 16112, 1356, 3144, 16268, 16364, 16144, 1108, 3268, 16376, 16344, 16184, 872, 3356, 124, 16320, 16220, 656, 3412, 276, 16292, 16256, 456, 3428, 456, 16256, }; static const uint16_t filter_5tap_16p_150[45] = { 16368, 2064, 2064, 16368, 0, 16316, 1924, 2204, 44, 16372, 16280, 1772, 2328, 116, 16356, 16256, 1616, 2440, 204, 16340, 16240, 1456, 2536, 304, 16320, 16232, 1296, 2612, 416, 16300, 16232, 1132, 2664, 544, 16284, 16240, 976, 2700, 680, 16264, 16248, 824, 2708, 824, 16248, }; static const uint16_t filter_5tap_16p_183[45] = { 228, 1816, 1816, 228, 0, 168, 1728, 1904, 300, 16372, 116, 1632, 1988, 376, 16360, 72, 1528, 2060, 460, 16348, 36, 1424, 2120, 552, 16340, 4, 1312, 2168, 652, 16336, 16368, 1200, 2204, 752, 16332, 16352, 1084, 2224, 860, 16332, 16340, 972, 2232, 972, 16340, }; static const uint16_t filter_6tap_16p_upscale[54] = { 0, 0, 4092, 0, 0, 0, 44, 16188, 4064, 228, 16324, 0, 80, 16036, 3980, 492, 16256, 4, 108, 15916, 3844, 788, 16184, 16, 120, 15836, 3656, 1108, 16104, 28, 128, 15792, 3420, 1448, 16024, 44, 124, 15776, 3144, 1800, 15948, 64, 112, 15792, 2836, 2152, 15880, 80, 100, 15828, 2504, 2504, 15828, 100, }; static const uint16_t filter_6tap_16p_117[54] = { 16168, 476, 3568, 476, 16168, 0, 16216, 280, 3540, 692, 16116, 8, 16264, 104, 3472, 924, 16068, 16, 16304, 16340, 3372, 1168, 16024, 28, 16344, 16212, 3236, 1424, 15988, 36, 16372, 16112, 3072, 1680, 15956, 44, 12, 16036, 2880, 1936, 15940, 48, 28, 15984, 2668, 2192, 15936, 48, 40, 15952, 2436, 2436, 15952, 40, }; static const uint16_t filter_6tap_16p_150[54] = { 16148, 920, 2724, 920, 16148, 0, 16156, 768, 2712, 1072, 16144, 0, 16172, 628, 2684, 1232, 16148, 16380, 16192, 492, 2632, 1388, 16160, 16372, 16212, 368, 2564, 1548, 16180, 16364, 16232, 256, 2480, 1704, 16212, 16352, 16256, 156, 2380, 1856, 16256, 16336, 16276, 64, 2268, 2004, 16308, 16320, 16300, 16372, 2140, 2140, 16372, 16300, }; static const uint16_t filter_6tap_16p_183[54] = { 16296, 1032, 2196, 1032, 16296, 0, 16284, 924, 2196, 1144, 16320, 16376, 16272, 820, 2180, 1256, 16348, 16364, 16268, 716, 2156, 1364, 16380, 16352, 16264, 620, 2116, 1472, 36, 16340, 16268, 524, 2068, 1576, 88, 16328, 16272, 436, 2008, 1680, 144, 16316, 16280, 352, 1940, 1772, 204, 16304, 16292, 276, 1860, 1860, 276, 16292, }; static const uint16_t filter_7tap_16p_upscale[63] = { 176, 15760, 2488, 2488, 15760, 176, 0, 160, 15812, 2152, 2816, 15728, 192, 16376, 136, 15884, 1812, 3124, 15720, 196, 16368, 108, 15964, 1468, 3400, 15740, 196, 16364, 84, 16048, 1132, 3640, 15792, 180, 16360, 56, 16140, 812, 3832, 15884, 152, 16360, 32, 16228, 512, 3976, 16012, 116, 16364, 12, 16308, 240, 4064, 16180, 60, 16372, 0, 0, 0, 4096, 0, 0, 0, }; static const uint16_t filter_7tap_16p_117[63] = { 92, 15868, 2464, 2464, 15868, 92, 0, 108, 15852, 2216, 2700, 15904, 72, 0, 112, 15856, 1960, 2916, 15964, 44, 0, 116, 15876, 1696, 3108, 16048, 8, 8, 112, 15908, 1428, 3268, 16156, 16348, 12, 104, 15952, 1168, 3400, 16288, 16300, 24, 92, 16004, 916, 3496, 64, 16244, 36, 80, 16064, 676, 3556, 248, 16184, 48, 64, 16124, 452, 3576, 452, 16124, 64, }; static const uint16_t filter_7tap_16p_150[63] = { 16224, 16380, 2208, 2208, 16380, 16224, 0, 16252, 16304, 2072, 2324, 84, 16196, 4, 16276, 16240, 1924, 2432, 184, 16172, 8, 16300, 16184, 1772, 2524, 296, 16144, 12, 16324, 16144, 1616, 2600, 416, 16124, 12, 16344, 16112, 1456, 2660, 548, 16104, 12, 16360, 16092, 1296, 2704, 688, 16088, 12, 16372, 16080, 1140, 2732, 832, 16080, 8, 0, 16076, 984, 2740, 984, 16076, 0, }; static const uint16_t filter_7tap_16p_183[63] = { 16216, 324, 1884, 1884, 324, 16216, 0, 16228, 248, 1804, 1960, 408, 16212, 16380, 16240, 176, 1716, 2028, 496, 16208, 16376, 16252, 112, 1624, 2084, 588, 16208, 16372, 16264, 56, 1524, 2132, 684, 16212, 16364, 16280, 4, 1424, 2168, 788, 16220, 16356, 16292, 16344, 1320, 2196, 892, 16232, 16344, 16308, 16308, 1212, 2212, 996, 16252, 16332, 16320, 16276, 1104, 2216, 1104, 16276, 16320, }; static const uint16_t filter_8tap_16p_upscale[72] = { 0, 0, 0, 4096, 0, 0, 0, 0, 16360, 76, 16172, 4064, 244, 16296, 24, 16380, 16340, 136, 15996, 3980, 524, 16204, 56, 16380, 16328, 188, 15860, 3844, 828, 16104, 92, 16372, 16320, 224, 15760, 3656, 1156, 16008, 128, 16368, 16320, 248, 15696, 3428, 1496, 15912, 160, 16360, 16320, 256, 15668, 3156, 1844, 15828, 192, 16348, 16324, 256, 15672, 2856, 2192, 15756, 220, 16340, 16332, 244, 15704, 2532, 2532, 15704, 244, 16332, }; static const uint16_t filter_8tap_16p_117[72] = { 116, 16100, 428, 3564, 428, 16100, 116, 0, 96, 16168, 220, 3548, 656, 16032, 136, 16376, 76, 16236, 32, 3496, 904, 15968, 152, 16372, 56, 16300, 16252, 3408, 1164, 15908, 164, 16368, 36, 16360, 16116, 3284, 1428, 15856, 172, 16364, 20, 28, 16000, 3124, 1700, 15820, 176, 16364, 4, 76, 15912, 2940, 1972, 15800, 172, 16364, 16380, 112, 15848, 2724, 2236, 15792, 160, 16364, 16372, 140, 15812, 2488, 2488, 15812, 140, 16372, }; static const uint16_t filter_8tap_16p_150[72] = { 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0, 12, 16020, 876, 2744, 1184, 16032, 16364, 4, 24, 16028, 728, 2716, 1344, 16052, 16340, 8, 36, 16040, 584, 2668, 1500, 16080, 16316, 16, 40, 16060, 448, 2608, 1652, 16120, 16288, 20, 44, 16080, 320, 2528, 1804, 16168, 16260, 28, 48, 16108, 204, 2436, 1948, 16232, 16228, 32, 44, 16136, 100, 2328, 2084, 16304, 16200, 40, 44, 16168, 4, 2212, 2212, 4, 16168, 44, }; static const uint16_t filter_8tap_16p_183[72] = { 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0, 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0, 16296, 16204, 948, 2220, 1372, 16348, 16232, 0, 16312, 16184, 844, 2192, 1472, 12, 16216, 4, 16328, 16172, 740, 2156, 1572, 72, 16200, 0, 16340, 16160, 640, 2108, 1668, 136, 16188, 0, 16352, 16156, 544, 2052, 1756, 204, 16176, 16380, 16360, 16156, 452, 1988, 1840, 280, 16164, 16376, 16368, 16160, 364, 1920, 1920, 364, 16160, 16368, }; static const uint16_t filter_9tap_16p_upscale[81] = { 16284, 296, 15660, 2572, 2572, 15660, 296, 16284, 0, 16296, 272, 15712, 2228, 2896, 15632, 304, 16276, 4, 16308, 240, 15788, 1876, 3192, 15632, 304, 16276, 4, 16320, 204, 15876, 1520, 3452, 15664, 288, 16280, 8, 16336, 164, 15976, 1176, 3676, 15732, 260, 16288, 12, 16348, 120, 16080, 844, 3856, 15840, 216, 16300, 12, 16364, 76, 16188, 532, 3988, 15984, 156, 16324, 8, 16376, 36, 16288, 252, 4068, 16164, 84, 16352, 4, 0, 0, 0, 0, 4096, 0, 0, 0, 0, }; static const uint16_t filter_9tap_16p_117[81] = { 16356, 172, 15776, 2504, 2504, 15776, 172, 16356, 0, 16344, 200, 15756, 2252, 2740, 15816, 136, 16372, 16380, 16336, 216, 15756, 1988, 2956, 15884, 92, 8, 16380, 16332, 224, 15780, 1720, 3144, 15976, 40, 28, 16376, 16328, 224, 15816, 1448, 3304, 16096, 16364, 52, 16372, 16328, 216, 15868, 1180, 3432, 16240, 16296, 80, 16364, 16332, 200, 15928, 916, 3524, 24, 16224, 108, 16356, 16336, 184, 15996, 668, 3580, 220, 16148, 132, 16352, 16344, 160, 16072, 436, 3600, 436, 16072, 160, 16344, }; static const uint16_t filter_9tap_16p_150[81] = { 84, 16128, 0, 2216, 2216, 0, 16128, 84, 0, 80, 16160, 16296, 2088, 2332, 100, 16092, 84, 0, 76, 16196, 16220, 1956, 2432, 208, 16064, 80, 0, 72, 16232, 16152, 1812, 2524, 328, 16036, 76, 4, 64, 16264, 16096, 1664, 2600, 460, 16012, 64, 8, 56, 16300, 16052, 1508, 2656, 596, 15996, 52, 12, 48, 16328, 16020, 1356, 2700, 740, 15984, 36, 20, 40, 16356, 15996, 1196, 2728, 888, 15980, 20, 24, 32, 0, 15984, 1044, 2736, 1044, 15984, 0, 32, }; static const uint16_t filter_9tap_16p_183[81] = { 16356, 16112, 388, 1952, 1952, 388, 16112, 16356, 0, 16368, 16116, 304, 1876, 2020, 480, 16112, 16344, 4, 16376, 16124, 224, 1792, 2080, 576, 16116, 16328, 8, 0, 16136, 148, 1700, 2132, 672, 16124, 16312, 8, 8, 16148, 80, 1604, 2176, 772, 16140, 16296, 12, 12, 16164, 16, 1504, 2208, 876, 16156, 16276, 16, 16, 16180, 16344, 1404, 2232, 980, 16184, 16256, 20, 20, 16200, 16296, 1300, 2244, 1088, 16212, 16240, 20, 20, 16220, 16252, 1196, 2252, 1196, 16252, 16220, 20, }; static const uint16_t filter_10tap_16p_upscale[90] = { 0, 0, 0, 0, 4096, 0, 0, 0, 0, 0, 12, 16344, 88, 16160, 4068, 252, 16280, 44, 16368, 0, 24, 16308, 168, 15976, 3988, 540, 16176, 92, 16348, 0, 32, 16280, 236, 15828, 3852, 852, 16064, 140, 16328, 4, 36, 16260, 284, 15720, 3672, 1184, 15956, 188, 16308, 8, 36, 16244, 320, 15648, 3448, 1528, 15852, 236, 16288, 12, 36, 16240, 336, 15612, 3184, 1880, 15764, 276, 16272, 20, 32, 16240, 340, 15608, 2888, 2228, 15688, 308, 16256, 24, 28, 16244, 332, 15636, 2568, 2568, 15636, 332, 16244, 28, }; static const uint16_t filter_10tap_16p_117[90] = { 16308, 196, 16048, 440, 3636, 440, 16048, 196, 16308, 0, 16316, 164, 16132, 220, 3612, 676, 15972, 220, 16300, 0, 16324, 132, 16212, 20, 3552, 932, 15900, 240, 16296, 4, 16336, 100, 16292, 16232, 3456, 1192, 15836, 256, 16296, 4, 16348, 68, 16364, 16084, 3324, 1464, 15784, 264, 16296, 8, 16356, 36, 48, 15960, 3164, 1736, 15748, 260, 16304, 4, 16364, 8, 108, 15864, 2972, 2008, 15728, 252, 16312, 4, 16372, 16368, 160, 15792, 2756, 2268, 15724, 228, 16328, 0, 16380, 16344, 200, 15748, 2520, 2520, 15748, 200, 16344, 16380, }; static const uint16_t filter_10tap_16p_150[90] = { 64, 0, 15956, 1048, 2716, 1048, 15956, 0, 64, 0, 52, 24, 15952, 896, 2708, 1204, 15972, 16356, 72, 16380, 44, 48, 15952, 748, 2684, 1360, 16000, 16320, 84, 16380, 32, 68, 15964, 604, 2644, 1516, 16032, 16288, 92, 16376, 24, 88, 15980, 464, 2588, 1668, 16080, 16248, 100, 16376, 16, 100, 16004, 332, 2516, 1816, 16140, 16212, 108, 16376, 8, 108, 16032, 212, 2428, 1956, 16208, 16172, 112, 16376, 4, 116, 16060, 100, 2328, 2092, 16288, 16132, 116, 16380, 0, 116, 16096, 16380, 2216, 2216, 16380, 16096, 116, 0, }; static const uint16_t filter_10tap_16p_183[90] = { 40, 16180, 16240, 1216, 2256, 1216, 16240, 16180, 40, 0, 44, 16204, 16200, 1112, 2252, 1320, 16288, 16160, 36, 0, 44, 16224, 16168, 1004, 2236, 1424, 16344, 16144, 28, 4, 44, 16248, 16136, 900, 2208, 1524, 16, 16124, 24, 8, 44, 16268, 16116, 796, 2176, 1620, 84, 16108, 12, 12, 40, 16288, 16100, 692, 2132, 1712, 156, 16096, 4, 16, 36, 16308, 16088, 592, 2080, 1796, 232, 16088, 16376, 20, 32, 16328, 16080, 496, 2020, 1876, 316, 16080, 16360, 24, 28, 16344, 16080, 404, 1952, 1952, 404, 16080, 16344, 28, }; static const uint16_t filter_11tap_16p_upscale[99] = { 60, 16216, 356, 15620, 2556, 2556, 15620, 356, 16216, 60, 0, 52, 16224, 336, 15672, 2224, 2876, 15592, 368, 16208, 64, 16380, 44, 16244, 304, 15744, 1876, 3176, 15596, 364, 16212, 64, 16376, 36, 16264, 260, 15836, 1532, 3440, 15636, 340, 16220, 60, 16376, 28, 16288, 212, 15940, 1188, 3668, 15708, 304, 16236, 56, 16376, 20, 16312, 160, 16052, 856, 3848, 15820, 248, 16264, 48, 16376, 12, 16336, 104, 16164, 544, 3984, 15968, 180, 16296, 36, 16376, 4, 16360, 48, 16276, 256, 4068, 16160, 96, 16336, 16, 16380, 0, 0, 0, 0, 0, 4096, 0, 0, 0, 0, 0, }; static const uint16_t filter_11tap_16p_117[99] = { 16380, 16332, 220, 15728, 2536, 2536, 15728, 220, 16332, 16380, 0, 4, 16308, 256, 15704, 2280, 2768, 15772, 176, 16360, 16368, 0, 12, 16292, 280, 15704, 2016, 2984, 15848, 120, 8, 16356, 0, 20, 16276, 292, 15724, 1744, 3172, 15948, 56, 40, 16340, 4, 24, 16268, 292, 15760, 1468, 3328, 16072, 16368, 80, 16324, 8, 24, 16264, 288, 15816, 1196, 3456, 16224, 16288, 116, 16312, 12, 24, 16264, 272, 15880, 932, 3548, 16, 16208, 152, 16296, 16, 24, 16268, 248, 15956, 676, 3604, 216, 16120, 188, 16284, 20, 24, 16276, 220, 16036, 436, 3624, 436, 16036, 220, 16276, 24, }; static const uint16_t filter_11tap_16p_150[99] = { 0, 144, 16072, 0, 2212, 2212, 0, 16072, 144, 0, 0, 16376, 144, 16112, 16288, 2092, 2324, 104, 16036, 140, 8, 16380, 16368, 144, 16152, 16204, 1960, 2424, 216, 16004, 132, 16, 16376, 16364, 140, 16192, 16132, 1820, 2512, 340, 15976, 116, 28, 16376, 16364, 132, 16232, 16072, 1676, 2584, 476, 15952, 100, 40, 16372, 16360, 124, 16272, 16020, 1528, 2644, 612, 15936, 80, 52, 16368, 16360, 116, 16312, 15980, 1372, 2684, 760, 15928, 56, 64, 16364, 16360, 104, 16348, 15952, 1216, 2712, 908, 15928, 28, 76, 16364, 16360, 92, 0, 15936, 1064, 2720, 1064, 15936, 0, 92, 16360, }; static const uint16_t filter_11tap_16p_183[99] = { 60, 16336, 16052, 412, 1948, 1948, 412, 16052, 16336, 60, 0, 56, 16356, 16052, 324, 1876, 2016, 504, 16056, 16316, 64, 0, 48, 16372, 16060, 240, 1796, 2072, 604, 16064, 16292, 64, 0, 44, 4, 16068, 160, 1712, 2124, 700, 16080, 16272, 68, 0, 40, 20, 16080, 84, 1620, 2164, 804, 16096, 16248, 68, 4, 32, 32, 16096, 16, 1524, 2200, 908, 16124, 16224, 68, 4, 28, 40, 16112, 16340, 1428, 2220, 1012, 16152, 16200, 64, 8, 24, 52, 16132, 16284, 1328, 2236, 1120, 16192, 16176, 64, 12, 16, 56, 16156, 16236, 1224, 2240, 1224, 16236, 16156, 56, 16, }; static const uint16_t filter_12tap_16p_upscale[108] = { 0, 0, 0, 0, 0, 4096, 0, 0, 0, 0, 0, 0, 16376, 24, 16332, 100, 16156, 4068, 260, 16272, 56, 16356, 8, 0, 16368, 44, 16284, 188, 15964, 3988, 548, 16156, 112, 16328, 20, 16380, 16360, 64, 16248, 260, 15812, 3856, 864, 16040, 172, 16296, 32, 16380, 16360, 76, 16216, 320, 15696, 3672, 1196, 15928, 228, 16268, 44, 16376, 16356, 84, 16196, 360, 15620, 3448, 1540, 15820, 280, 16240, 56, 16372, 16356, 88, 16184, 384, 15580, 3188, 1888, 15728, 324, 16216, 68, 16368, 16360, 88, 16180, 392, 15576, 2892, 2236, 15652, 360, 16200, 80, 16364, 16360, 84, 16188, 384, 15600, 2576, 2576, 15600, 384, 16188, 84, 16360, }; static const uint16_t filter_12tap_16p_117[108] = { 48, 16248, 240, 16028, 436, 3612, 436, 16028, 240, 16248, 48, 0, 44, 16260, 208, 16116, 212, 3596, 676, 15944, 272, 16240, 48, 16380, 40, 16276, 168, 16204, 12, 3540, 932, 15868, 296, 16240, 48, 16380, 36, 16292, 128, 16288, 16220, 3452, 1196, 15800, 312, 16240, 44, 16380, 28, 16308, 84, 16372, 16064, 3324, 1472, 15748, 316, 16244, 40, 16380, 24, 16328, 44, 64, 15936, 3168, 1744, 15708, 312, 16256, 32, 16380, 16, 16344, 8, 132, 15836, 2980, 2016, 15688, 300, 16272, 20, 0, 12, 16364, 16356, 188, 15760, 2768, 2280, 15688, 272, 16296, 8, 4, 8, 16380, 16324, 236, 15712, 2532, 2532, 15712, 236, 16324, 16380, 8, }; static const uint16_t filter_12tap_16p_150[108] = { 16340, 116, 0, 15916, 1076, 2724, 1076, 15916, 0, 116, 16340, 0, 16340, 100, 32, 15908, 920, 2716, 1232, 15936, 16344, 128, 16340, 0, 16344, 84, 64, 15908, 772, 2692, 1388, 15968, 16304, 140, 16344, 16380, 16344, 68, 92, 15912, 624, 2652, 1540, 16008, 16264, 152, 16344, 16380, 16348, 52, 112, 15928, 484, 2592, 1688, 16060, 16220, 160, 16348, 16380, 16352, 40, 132, 15952, 348, 2520, 1836, 16124, 16176, 168, 16356, 16376, 16356, 24, 148, 15980, 224, 2436, 1976, 16200, 16132, 172, 16364, 16372, 16360, 12, 160, 16012, 108, 2336, 2104, 16288, 16088, 172, 16372, 16368, 16364, 0, 168, 16048, 0, 2228, 2228, 0, 16048, 168, 0, 16364, }; static const uint16_t filter_12tap_16p_183[108] = { 36, 72, 16132, 16228, 1224, 2224, 1224, 16228, 16132, 72, 36, 0, 28, 80, 16156, 16184, 1120, 2224, 1328, 16280, 16112, 64, 40, 16380, 24, 84, 16180, 16144, 1016, 2208, 1428, 16340, 16092, 52, 48, 16380, 16, 88, 16208, 16112, 912, 2188, 1524, 16, 16072, 36, 56, 16380, 12, 92, 16232, 16084, 812, 2156, 1620, 88, 16056, 24, 64, 16380, 8, 92, 16256, 16064, 708, 2116, 1708, 164, 16044, 4, 68, 16380, 4, 88, 16280, 16048, 608, 2068, 1792, 244, 16036, 16372, 76, 16380, 0, 88, 16308, 16036, 512, 2008, 1872, 328, 16032, 16352, 80, 16380, 0, 84, 16328, 16032, 416, 1944, 1944, 416, 16032, 16328, 84, 0, }; static const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_3tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_3tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_3tap_16p_150; else return filter_3tap_16p_183; } static const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_4tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_4tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_4tap_16p_150; else return filter_4tap_16p_183; } static const uint16_t *wbscl_get_filter_5tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_5tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_5tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_5tap_16p_150; else return filter_5tap_16p_183; } static const uint16_t *wbscl_get_filter_6tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_6tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_6tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_6tap_16p_150; else return filter_6tap_16p_183; } static const uint16_t *wbscl_get_filter_7tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_7tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_7tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_7tap_16p_150; else return filter_7tap_16p_183; } static const uint16_t *wbscl_get_filter_8tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_8tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_8tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_8tap_16p_150; else return filter_8tap_16p_183; } static const uint16_t *wbscl_get_filter_9tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_9tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_9tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_9tap_16p_150; else return filter_9tap_16p_183; } static const uint16_t *wbscl_get_filter_10tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_10tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_10tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_10tap_16p_150; else return filter_10tap_16p_183; } static const uint16_t *wbscl_get_filter_11tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_11tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_11tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_11tap_16p_150; else return filter_11tap_16p_183; } static const uint16_t *wbscl_get_filter_12tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_12tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_12tap_16p_117; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_12tap_16p_150; else return filter_12tap_16p_183; } static const uint16_t *wbscl_get_filter_coeffs_16p(int taps, struct fixed31_32 ratio) { if (taps == 12) return wbscl_get_filter_12tap_16p(ratio); else if (taps == 11) return wbscl_get_filter_11tap_16p(ratio); else if (taps == 10) return wbscl_get_filter_10tap_16p(ratio); else if (taps == 9) return wbscl_get_filter_9tap_16p(ratio); else if (taps == 8) return wbscl_get_filter_8tap_16p(ratio); else if (taps == 7) return wbscl_get_filter_7tap_16p(ratio); else if (taps == 6) return wbscl_get_filter_6tap_16p(ratio); else if (taps == 5) return wbscl_get_filter_5tap_16p(ratio); else if (taps == 4) return wbscl_get_filter_4tap_16p(ratio); else if (taps == 3) return wbscl_get_filter_3tap_16p(ratio); else if (taps == 2) return get_filter_2tap_16p(); else if (taps == 1) return NULL; else { /* should never happen, bug */ BREAK_TO_DEBUGGER(); return NULL; } } static void wbscl_set_scaler_filter( struct dcn20_dwbc *dwbc20, uint32_t taps, enum wbscl_coef_filter_type_sel filter_type, const uint16_t *filter) { const int tap_pairs = (taps + 1) / 2; int phase; int pair; uint16_t odd_coef, even_coef; for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) { for (pair = 0; pair < tap_pairs; pair++) { even_coef = filter[phase * taps + 2 * pair]; if ((pair * 2 + 1) < taps) odd_coef = filter[phase * taps + 2 * pair + 1]; else odd_coef = 0; REG_SET_3(WBSCL_COEF_RAM_SELECT, 0, WBSCL_COEF_RAM_TAP_PAIR_IDX, pair, WBSCL_COEF_RAM_PHASE, phase, WBSCL_COEF_RAM_FILTER_TYPE, filter_type); REG_SET_4(WBSCL_COEF_RAM_TAP_DATA, 0, /* Even tap coefficient (bits 1:0 fixed to 0) */ WBSCL_COEF_RAM_EVEN_TAP_COEF, even_coef, /* Write/read control for even coefficient */ WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, 1, /* Odd tap coefficient (bits 1:0 fixed to 0) */ WBSCL_COEF_RAM_ODD_TAP_COEF, odd_coef, /* Write/read control for odd coefficient */ WBSCL_COEF_RAM_ODD_TAP_COEF_EN, 1); } } } bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, uint32_t src_width, uint32_t dest_width, struct scaling_taps num_taps) { uint32_t h_ratio_luma = 1; uint32_t h_taps_luma = num_taps.h_taps; uint32_t h_taps_chroma = num_taps.h_taps_c; int32_t h_init_phase_luma = 0; int32_t h_init_phase_chroma = 0; uint32_t h_init_phase_luma_int = 0; uint32_t h_init_phase_luma_frac = 0; uint32_t h_init_phase_chroma_int = 0; uint32_t h_init_phase_chroma_frac = 0; const uint16_t *filter_h = NULL; const uint16_t *filter_h_c = NULL; struct fixed31_32 tmp_h_init_phase_luma = dc_fixpt_from_int(0); struct fixed31_32 tmp_h_init_phase_chroma = dc_fixpt_from_int(0); /*Calculate ratio*/ struct fixed31_32 tmp_h_ratio_luma = dc_fixpt_from_fraction( src_width, dest_width); if (dc_fixpt_floor(tmp_h_ratio_luma) == 8) h_ratio_luma = -1; else h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5; /*Program ratio*/ REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma); /* Program taps*/ REG_UPDATE(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, h_taps_luma - 1); REG_UPDATE(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, h_taps_chroma - 1); /* Calculate phase*/ tmp_h_init_phase_luma = dc_fixpt_add_int(tmp_h_ratio_luma, h_taps_luma + 1); tmp_h_init_phase_luma = dc_fixpt_div_int(tmp_h_init_phase_luma, 2); tmp_h_init_phase_luma = dc_fixpt_sub_int(tmp_h_init_phase_luma, h_taps_luma); h_init_phase_luma = dc_fixpt_s4d19(tmp_h_init_phase_luma); h_init_phase_luma_int = (h_init_phase_luma >> 19) & 0x1f; h_init_phase_luma_frac = (h_init_phase_luma & 0x7ffff) << 5; tmp_h_init_phase_chroma = dc_fixpt_mul_int(tmp_h_ratio_luma, 2); tmp_h_init_phase_chroma = dc_fixpt_add_int(tmp_h_init_phase_chroma, h_taps_chroma + 1); tmp_h_init_phase_chroma = dc_fixpt_div_int(tmp_h_init_phase_chroma, 2); tmp_h_init_phase_chroma = dc_fixpt_sub_int(tmp_h_init_phase_chroma, h_taps_chroma); tmp_h_init_phase_chroma = dc_fixpt_add(tmp_h_init_phase_chroma, dc_fixpt_from_fraction(1, 4)); h_init_phase_chroma = dc_fixpt_s4d19(tmp_h_init_phase_chroma); h_init_phase_chroma_int = (h_init_phase_chroma >> 19) & 0x1f; h_init_phase_chroma_frac = (h_init_phase_chroma & 0x7ffff) << 5; /* Program phase*/ REG_UPDATE(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, h_init_phase_luma_int); REG_UPDATE(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, h_init_phase_luma_frac); REG_UPDATE(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, h_init_phase_chroma_int); REG_UPDATE(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, h_init_phase_chroma_frac); /* Program LUT coefficients*/ filter_h = wbscl_get_filter_coeffs_16p( h_taps_luma, tmp_h_ratio_luma); filter_h_c = wbscl_get_filter_coeffs_16p( h_taps_chroma, dc_fixpt_from_int(h_ratio_luma * 2)); wbscl_set_scaler_filter(dwbc20, h_taps_luma, WBSCL_COEF_LUMA_HORZ_FILTER, filter_h); wbscl_set_scaler_filter(dwbc20, h_taps_chroma, WBSCL_COEF_CHROMA_HORZ_FILTER, filter_h_c); return true; } bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, uint32_t src_height, uint32_t dest_height, struct scaling_taps num_taps, enum dwb_subsample_position subsample_position) { uint32_t v_ratio_luma = 1; uint32_t v_taps_luma = num_taps.v_taps; uint32_t v_taps_chroma = num_taps.v_taps_c; int32_t v_init_phase_luma = 0; int32_t v_init_phase_chroma = 0; uint32_t v_init_phase_luma_int = 0; uint32_t v_init_phase_luma_frac = 0; uint32_t v_init_phase_chroma_int = 0; uint32_t v_init_phase_chroma_frac = 0; const uint16_t *filter_v = NULL; const uint16_t *filter_v_c = NULL; struct fixed31_32 tmp_v_init_phase_luma = dc_fixpt_from_int(0); struct fixed31_32 tmp_v_init_phase_chroma = dc_fixpt_from_int(0); /*Calculate ratio*/ struct fixed31_32 tmp_v_ratio_luma = dc_fixpt_from_fraction( src_height, dest_height); if (dc_fixpt_floor(tmp_v_ratio_luma) == 8) v_ratio_luma = -1; else v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5; /*Program ratio*/ REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma); /* Program taps*/ REG_UPDATE(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, v_taps_luma - 1); REG_UPDATE(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, v_taps_chroma - 1); /* Calculate phase*/ tmp_v_init_phase_luma = dc_fixpt_add_int(tmp_v_ratio_luma, v_taps_luma + 1); tmp_v_init_phase_luma = dc_fixpt_div_int(tmp_v_init_phase_luma, 2); tmp_v_init_phase_luma = dc_fixpt_sub_int(tmp_v_init_phase_luma, v_taps_luma); v_init_phase_luma = dc_fixpt_s4d19(tmp_v_init_phase_luma); v_init_phase_luma_int = (v_init_phase_luma >> 19) & 0x1f; v_init_phase_luma_frac = (v_init_phase_luma & 0x7ffff) << 5; tmp_v_init_phase_chroma = dc_fixpt_mul_int(tmp_v_ratio_luma, 2); tmp_v_init_phase_chroma = dc_fixpt_add_int(tmp_v_init_phase_chroma, v_taps_chroma + 1); tmp_v_init_phase_chroma = dc_fixpt_div_int(tmp_v_init_phase_chroma, 2); tmp_v_init_phase_chroma = dc_fixpt_sub_int(tmp_v_init_phase_chroma, v_taps_chroma); if (subsample_position == DWB_COSITED_SUBSAMPLING) tmp_v_init_phase_chroma = dc_fixpt_add(tmp_v_init_phase_chroma, dc_fixpt_from_fraction(1, 4)); v_init_phase_chroma = dc_fixpt_s4d19(tmp_v_init_phase_chroma); v_init_phase_chroma_int = (v_init_phase_chroma >> 19) & 0x1f; v_init_phase_chroma_frac = (v_init_phase_chroma & 0x7ffff) << 5; /* Program phase*/ REG_UPDATE(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, v_init_phase_luma_int); REG_UPDATE(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, v_init_phase_luma_frac); REG_UPDATE(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, v_init_phase_chroma_int); REG_UPDATE(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, v_init_phase_chroma_frac); /* Program LUT coefficients*/ filter_v = wbscl_get_filter_coeffs_16p( v_taps_luma, tmp_v_ratio_luma); filter_v_c = wbscl_get_filter_coeffs_16p( v_taps_chroma, dc_fixpt_from_int(v_ratio_luma * 2)); wbscl_set_scaler_filter(dwbc20, v_taps_luma, WBSCL_COEF_LUMA_VERT_FILTER, filter_v); wbscl_set_scaler_filter(dwbc20, v_taps_chroma, WBSCL_COEF_CHROMA_VERT_FILTER, filter_v_c); return true; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
/* * Copyright 2012-17 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "resource.h" #include "dwb.h" #include "dcn20_dwb.h" #define REG(reg)\ dwbc20->dwbc_regs->reg #define CTX \ dwbc20->base.ctx #define DC_LOGGER \ dwbc20->base.ctx->logger #undef FN #define FN(reg_name, field_name) \ dwbc20->dwbc_shift->field_name, dwbc20->dwbc_mask->field_name enum dwb_outside_pix_strategy { DWB_OUTSIDE_PIX_STRATEGY_BLACK = 0, DWB_OUTSIDE_PIX_STRATEGY_EDGE = 1 }; static bool dwb2_get_caps(struct dwbc *dwbc, struct dwb_caps *caps) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); if (caps) { caps->adapter_id = 0; /* we only support 1 adapter currently */ caps->hw_version = DCN_VERSION_2_0; caps->num_pipes = 1; memset(&caps->reserved, 0, sizeof(caps->reserved)); memset(&caps->reserved2, 0, sizeof(caps->reserved2)); caps->sw_version = dwb_ver_1_0; caps->caps.support_dwb = true; caps->caps.support_ogam = false; caps->caps.support_wbscl = false; caps->caps.support_ocsc = false; DC_LOG_DWB("%s SUPPORTED! inst = %d", __func__, dwbc20->base.inst); return true; } else { DC_LOG_DWB("%s NOT SUPPORTED! inst = %d", __func__, dwbc20->base.inst); return false; } } void dwb2_config_dwb_cnv(struct dwbc *dwbc, struct dc_dwb_params *params) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); DC_LOG_DWB("%s inst = %d", __func__, dwbc20->base.inst); /* Set DWB source size */ REG_UPDATE_2(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, params->cnv_params.src_width, CNV_SOURCE_HEIGHT, params->cnv_params.src_height); /* source size is not equal the source size, then enable cropping. */ if (params->cnv_params.crop_en) { REG_UPDATE(CNV_MODE, CNV_WINDOW_CROP_EN, 1); REG_UPDATE(CNV_WINDOW_START, CNV_WINDOW_START_X, params->cnv_params.crop_x); REG_UPDATE(CNV_WINDOW_START, CNV_WINDOW_START_Y, params->cnv_params.crop_y); REG_UPDATE(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, params->cnv_params.crop_width); REG_UPDATE(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, params->cnv_params.crop_height); } else { REG_UPDATE(CNV_MODE, CNV_WINDOW_CROP_EN, 0); } /* Set CAPTURE_RATE */ REG_UPDATE(CNV_MODE, CNV_FRAME_CAPTURE_RATE, params->capture_rate); /* Set CNV output pixel depth */ REG_UPDATE(CNV_MODE, CNV_OUT_BPC, params->cnv_params.cnv_out_bpc); } static bool dwb2_enable(struct dwbc *dwbc, struct dc_dwb_params *params) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); /* Only chroma scaling (sub-sampling) is supported in DCN2 */ if ((params->cnv_params.src_width != params->dest_width) || (params->cnv_params.src_height != params->dest_height)) { DC_LOG_DWB("%s inst = %d, FAILED!LUMA SCALING NOT SUPPORTED", __func__, dwbc20->base.inst); return false; } DC_LOG_DWB("%s inst = %d, ENABLED", __func__, dwbc20->base.inst); /* disable power gating */ //REG_UPDATE_5(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, 1, // DISPCLK_G_WB_GATE_DIS, 1, DISPCLK_G_WBSCL_GATE_DIS, 1, // WB_LB_LS_DIS, 1, WB_LUT_LS_DIS, 1); /* Set WB_ENABLE (not double buffered; capture not enabled) */ REG_UPDATE(WB_ENABLE, WB_ENABLE, 1); /* Set CNV parameters */ dwb2_config_dwb_cnv(dwbc, params); /* Set scaling parameters */ dwb2_set_scaler(dwbc, params); /* Enable DWB capture enable (double buffered) */ REG_UPDATE(CNV_MODE, CNV_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_ENABLE); // disable warmup REG_UPDATE(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, 0); return true; } bool dwb2_disable(struct dwbc *dwbc) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); DC_LOG_DWB("%s inst = %d, Disabled", __func__, dwbc20->base.inst); /* disable CNV */ REG_UPDATE(CNV_MODE, CNV_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_DISABLE); /* disable WB */ REG_UPDATE(WB_ENABLE, WB_ENABLE, 0); /* soft reset */ REG_UPDATE(WB_SOFT_RESET, WB_SOFT_RESET, 1); REG_UPDATE(WB_SOFT_RESET, WB_SOFT_RESET, 0); /* enable power gating */ //REG_UPDATE_5(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, 0, // DISPCLK_G_WB_GATE_DIS, 0, DISPCLK_G_WBSCL_GATE_DIS, 0, // WB_LB_LS_DIS, 0, WB_LUT_LS_DIS, 0); return true; } static bool dwb2_update(struct dwbc *dwbc, struct dc_dwb_params *params) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); unsigned int pre_locked; /* Only chroma scaling (sub-sampling) is supported in DCN2 */ if ((params->cnv_params.src_width != params->dest_width) || (params->cnv_params.src_height != params->dest_height)) { DC_LOG_DWB("%s inst = %d, FAILED!LUMA SCALING NOT SUPPORTED", __func__, dwbc20->base.inst); return false; } DC_LOG_DWB("%s inst = %d, scaling", __func__, dwbc20->base.inst); /* * Check if the caller has already locked CNV registers. * If so: assume the caller will unlock, so don't touch the lock. * If not: lock them for this update, then unlock after the * update is complete. */ REG_GET(CNV_UPDATE, CNV_UPDATE_LOCK, &pre_locked); if (pre_locked == 0) { /* Lock DWB registers */ REG_UPDATE(CNV_UPDATE, CNV_UPDATE_LOCK, 1); } /* Set CNV parameters */ dwb2_config_dwb_cnv(dwbc, params); /* Set scaling parameters */ dwb2_set_scaler(dwbc, params); if (pre_locked == 0) { /* Unlock DWB registers */ REG_UPDATE(CNV_UPDATE, CNV_UPDATE_LOCK, 0); } return true; } bool dwb2_is_enabled(struct dwbc *dwbc) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); unsigned int wb_enabled = 0; unsigned int cnv_frame_capture_en = 0; REG_GET(WB_ENABLE, WB_ENABLE, &wb_enabled); REG_GET(CNV_MODE, CNV_FRAME_CAPTURE_EN, &cnv_frame_capture_en); return ((wb_enabled != 0) && (cnv_frame_capture_en != 0)); } void dwb2_set_stereo(struct dwbc *dwbc, struct dwb_stereo_params *stereo_params) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); DC_LOG_DWB("%s inst = %d, enabled =%d", __func__,\ dwbc20->base.inst, stereo_params->stereo_enabled); if (stereo_params->stereo_enabled) { REG_UPDATE(CNV_MODE, CNV_STEREO_TYPE, stereo_params->stereo_type); REG_UPDATE(CNV_MODE, CNV_EYE_SELECTION, stereo_params->stereo_eye_select); REG_UPDATE(CNV_MODE, CNV_STEREO_POLARITY, stereo_params->stereo_polarity); } else { REG_UPDATE(CNV_MODE, CNV_EYE_SELECTION, 0); } } void dwb2_set_new_content(struct dwbc *dwbc, bool is_new_content) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); DC_LOG_DWB("%s inst = %d", __func__, dwbc20->base.inst); REG_UPDATE(CNV_MODE, CNV_NEW_CONTENT, is_new_content); } static void dwb2_set_warmup(struct dwbc *dwbc, struct dwb_warmup_params *warmup_params) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); DC_LOG_DWB("%s inst = %d", __func__, dwbc20->base.inst); REG_UPDATE(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, warmup_params->warmup_en); REG_UPDATE(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, warmup_params->warmup_width); REG_UPDATE(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, warmup_params->warmup_height); REG_UPDATE(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, warmup_params->warmup_data); REG_UPDATE(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, warmup_params->warmup_mode); REG_UPDATE(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, warmup_params->warmup_depth); } void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params) { struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc); DC_LOG_DWB("%s inst = %d", __func__, dwbc20->base.inst); /* Program scaling mode */ REG_UPDATE_2(WBSCL_MODE, WBSCL_MODE, params->out_format, WBSCL_OUT_BIT_DEPTH, params->output_depth); if (params->out_format != dwb_scaler_mode_bypass444) { /* Program output size */ REG_UPDATE(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, params->dest_width); REG_UPDATE(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, params->dest_height); /* Program round offsets */ REG_UPDATE(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, 0x40); REG_UPDATE(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, 0x200); /* Program clamp values */ REG_UPDATE(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, 0x3fe); REG_UPDATE(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, 0x1); REG_UPDATE(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, 0x3fe); REG_UPDATE(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, 0x1); /* Program outside pixel strategy to use edge pixels */ REG_UPDATE(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, DWB_OUTSIDE_PIX_STRATEGY_EDGE); if (params->cnv_params.crop_en) { /* horizontal scale */ dwb_program_horz_scalar(dwbc20, params->cnv_params.crop_width, params->dest_width, params->scaler_taps); /* vertical scale */ dwb_program_vert_scalar(dwbc20, params->cnv_params.crop_height, params->dest_height, params->scaler_taps, params->subsample_position); } else { /* horizontal scale */ dwb_program_horz_scalar(dwbc20, params->cnv_params.src_width, params->dest_width, params->scaler_taps); /* vertical scale */ dwb_program_vert_scalar(dwbc20, params->cnv_params.src_height, params->dest_height, params->scaler_taps, params->subsample_position); } } } static const struct dwbc_funcs dcn20_dwbc_funcs = { .get_caps = dwb2_get_caps, .enable = dwb2_enable, .disable = dwb2_disable, .update = dwb2_update, .is_enabled = dwb2_is_enabled, .set_stereo = dwb2_set_stereo, .set_new_content = dwb2_set_new_content, .set_warmup = dwb2_set_warmup, .dwb_set_scaler = dwb2_set_scaler, }; void dcn20_dwbc_construct(struct dcn20_dwbc *dwbc20, struct dc_context *ctx, const struct dcn20_dwbc_registers *dwbc_regs, const struct dcn20_dwbc_shift *dwbc_shift, const struct dcn20_dwbc_mask *dwbc_mask, int inst) { dwbc20->base.ctx = ctx; dwbc20->base.inst = inst; dwbc20->base.funcs = &dcn20_dwbc_funcs; dwbc20->dwbc_regs = dwbc_regs; dwbc20->dwbc_shift = dwbc_shift; dwbc20->dwbc_mask = dwbc_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20_hwseq.h" #include "dcn20_init.h" static const struct hw_sequencer_funcs dcn20_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, .power_down_on_boot = dcn10_power_down_on_boot, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, .program_output_csc = dcn20_program_output_csc, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, .enable_vblanks_synchronization = dcn10_enable_vblanks_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, .update_info_frame = dce110_update_info_frame, .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, .enable_stream = dcn20_enable_stream, .disable_stream = dce110_disable_stream, .unblank_stream = dcn20_unblank_stream, .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn10_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dce110_set_avmute, .log_hw_state = dcn10_log_hw_state, .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, .program_triplebuffer = dcn20_program_triple_buffer, .enable_writeback = dcn20_enable_writeback, .disable_writeback = dcn20_disable_writeback, .dmdata_status_done = dcn20_dmdata_status_done, .program_dmdata_engine = dcn20_program_dmdata_engine, .set_dmdata_attributes = dcn20_set_dmdata_attributes, .init_sys_ctx = dcn20_init_sys_ctx, .init_vm_ctx = dcn20_init_vm_ctx, .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_pipe = dce110_set_pipe, #ifndef TRIM_FSFT .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft, #endif .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn20_private_funcs = { .init_pipes = dcn10_init_pipes, .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn20_set_input_transfer_func, .set_output_transfer_func = dcn20_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn20_init_blank, .disable_vga = dcn20_disable_vga, .bios_golden_init = dcn10_bios_golden_init, .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn10_plane_atomic_power_down, .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .set_hdr_multiplier = dcn10_set_hdr_multiplier, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn20_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, }; void dcn20_hw_sequencer_construct(struct dc *dc) { dc->hwss = dcn20_funcs; dc->hwseq->funcs = dcn20_private_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn20_hubbub.h" #include "reg_helper.h" #include "clk_mgr.h" #define REG(reg)\ hubbub1->regs->reg #define CTX \ hubbub1->base.ctx #undef FN #define FN(reg_name, field_name) \ hubbub1->shifts->field_name, hubbub1->masks->field_name #define REG(reg)\ hubbub1->regs->reg #define CTX \ hubbub1->base.ctx #undef FN #define FN(reg_name, field_name) \ hubbub1->shifts->field_name, hubbub1->masks->field_name #ifdef NUM_VMID #undef NUM_VMID #endif #define NUM_VMID 16 bool hubbub2_dcc_support_swizzle( enum swizzle_mode_values swizzle, unsigned int bytes_per_element, enum segment_order *segment_order_horz, enum segment_order *segment_order_vert) { bool standard_swizzle = false; bool display_swizzle = false; bool render_swizzle = false; switch (swizzle) { case DC_SW_4KB_S: case DC_SW_64KB_S: case DC_SW_VAR_S: case DC_SW_4KB_S_X: case DC_SW_64KB_S_X: case DC_SW_VAR_S_X: standard_swizzle = true; break; case DC_SW_64KB_R_X: render_swizzle = true; break; case DC_SW_4KB_D: case DC_SW_64KB_D: case DC_SW_VAR_D: case DC_SW_4KB_D_X: case DC_SW_64KB_D_X: case DC_SW_VAR_D_X: display_swizzle = true; break; default: break; } if (standard_swizzle) { if (bytes_per_element == 1) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__na; return true; } if (bytes_per_element == 2) { *segment_order_horz = segment_order__non_contiguous; *segment_order_vert = segment_order__contiguous; return true; } if (bytes_per_element == 4) { *segment_order_horz = segment_order__non_contiguous; *segment_order_vert = segment_order__contiguous; return true; } if (bytes_per_element == 8) { *segment_order_horz = segment_order__na; *segment_order_vert = segment_order__contiguous; return true; } } if (render_swizzle) { if (bytes_per_element == 2) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__contiguous; return true; } if (bytes_per_element == 4) { *segment_order_horz = segment_order__non_contiguous; *segment_order_vert = segment_order__contiguous; return true; } if (bytes_per_element == 8) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__non_contiguous; return true; } } if (display_swizzle && bytes_per_element == 8) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__non_contiguous; return true; } return false; } bool hubbub2_dcc_support_pixel_format( enum surface_pixel_format format, unsigned int *bytes_per_element) { /* DML: get_bytes_per_element */ switch (format) { case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: case SURFACE_PIXEL_FORMAT_GRPH_RGB565: *bytes_per_element = 2; return true; case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: case SURFACE_PIXEL_FORMAT_GRPH_RGBE: case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: *bytes_per_element = 4; return true; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: *bytes_per_element = 8; return true; default: return false; } } static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, unsigned int bytes_per_element) { /* copied from DML. might want to refactor DML to leverage from DML */ /* DML : get_blk256_size */ if (bytes_per_element == 1) { *blk256_width = 16; *blk256_height = 16; } else if (bytes_per_element == 2) { *blk256_width = 16; *blk256_height = 8; } else if (bytes_per_element == 4) { *blk256_width = 8; *blk256_height = 8; } else if (bytes_per_element == 8) { *blk256_width = 8; *blk256_height = 4; } } static void hubbub2_det_request_size( unsigned int detile_buf_size, unsigned int height, unsigned int width, unsigned int bpe, bool *req128_horz_wc, bool *req128_vert_wc) { unsigned int blk256_height = 0; unsigned int blk256_width = 0; unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; hubbub2_get_blk256_size(&blk256_width, &blk256_height, bpe); swath_bytes_horz_wc = width * blk256_height * bpe; swath_bytes_vert_wc = height * blk256_width * bpe; *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? false : /* full 256B request */ true; /* half 128b request */ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? false : /* full 256B request */ true; /* half 128b request */ } bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { struct dc *dc = hubbub->ctx->dc; /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ enum dcc_control dcc_control; unsigned int bpe; enum segment_order segment_order_horz, segment_order_vert; bool req128_horz_wc, req128_vert_wc; memset(output, 0, sizeof(*output)); if (dc->debug.disable_dcc == DCC_DISABLE) return false; if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe)) return false; if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, &segment_order_horz, &segment_order_vert)) return false; hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, input->surface_size.height, input->surface_size.width, bpe, &req128_horz_wc, &req128_vert_wc); if (!req128_horz_wc && !req128_vert_wc) { dcc_control = dcc_control__256_256_xxx; } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { if (!req128_horz_wc) dcc_control = dcc_control__256_256_xxx; else if (segment_order_horz == segment_order__contiguous) dcc_control = dcc_control__128_128_xxx; else dcc_control = dcc_control__256_64_64; } else if (input->scan == SCAN_DIRECTION_VERTICAL) { if (!req128_vert_wc) dcc_control = dcc_control__256_256_xxx; else if (segment_order_vert == segment_order__contiguous) dcc_control = dcc_control__128_128_xxx; else dcc_control = dcc_control__256_64_64; } else { if ((req128_horz_wc && segment_order_horz == segment_order__non_contiguous) || (req128_vert_wc && segment_order_vert == segment_order__non_contiguous)) /* access_dir not known, must use most constraining */ dcc_control = dcc_control__256_64_64; else /* reg128 is true for either horz and vert * but segment_order is contiguous */ dcc_control = dcc_control__128_128_xxx; } /* Exception for 64KB_R_X */ if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) dcc_control = dcc_control__128_128_xxx; if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && dcc_control != dcc_control__256_256_xxx) return false; switch (dcc_control) { case dcc_control__256_256_xxx: output->grph.rgb.max_uncompressed_blk_size = 256; output->grph.rgb.max_compressed_blk_size = 256; output->grph.rgb.independent_64b_blks = false; break; case dcc_control__128_128_xxx: output->grph.rgb.max_uncompressed_blk_size = 128; output->grph.rgb.max_compressed_blk_size = 128; output->grph.rgb.independent_64b_blks = false; break; case dcc_control__256_64_64: output->grph.rgb.max_uncompressed_blk_size = 256; output->grph.rgb.max_compressed_blk_size = 64; output->grph.rgb.independent_64b_blks = true; break; default: ASSERT(false); break; } output->capable = true; output->const_color_support = true; return true; } static enum dcn_hubbub_page_table_depth page_table_depth_to_hw(unsigned int page_table_depth) { enum dcn_hubbub_page_table_depth depth = 0; switch (page_table_depth) { case 1: depth = DCN_PAGE_TABLE_DEPTH_1_LEVEL; break; case 2: depth = DCN_PAGE_TABLE_DEPTH_2_LEVEL; break; case 3: depth = DCN_PAGE_TABLE_DEPTH_3_LEVEL; break; case 4: depth = DCN_PAGE_TABLE_DEPTH_4_LEVEL; break; default: ASSERT(false); break; } return depth; } static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigned int page_table_block_size) { enum dcn_hubbub_page_table_block_size block_size = 0; switch (page_table_block_size) { case 4096: block_size = DCN_PAGE_TABLE_BLOCK_SIZE_4KB; break; case 65536: block_size = DCN_PAGE_TABLE_BLOCK_SIZE_64KB; break; case 32768: block_size = DCN_PAGE_TABLE_BLOCK_SIZE_32KB; break; default: ASSERT(false); block_size = page_table_block_size; break; } return block_size; } void hubbub2_init_vm_ctx(struct hubbub *hubbub, struct dcn_hubbub_virt_addr_config *va_config, int vmid) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); struct dcn_vmid_page_table_config virt_config; virt_config.page_table_start_addr = va_config->page_table_start_addr >> 12; virt_config.page_table_end_addr = va_config->page_table_end_addr >> 12; virt_config.depth = page_table_depth_to_hw(va_config->page_table_depth); virt_config.block_size = page_table_block_size_to_hw(va_config->page_table_block_size); virt_config.page_table_base_addr = va_config->page_table_base_addr; dcn20_vmid_setup(&hubbub1->vmid[vmid], &virt_config); } int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, AGP_BASE, pa_config->system_aperture.agp_base >> 24); REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, (pa_config->page_table_default_page_addr >> 44) & 0xF); REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, (pa_config->page_table_default_page_addr >> 12) & 0xFFFFFFFF); if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; phys_config.depth = 0; phys_config.block_size = 0; // Init VMID 0 based on PA config dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); } return NUM_VMID; } void hubbub2_update_dchub(struct hubbub *hubbub, struct dchub_init_data *dh_data) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); if (REG(DCN_VM_FB_LOCATION_TOP) == 0) return; switch (dh_data->fb_mode) { case FRAME_BUFFER_MODE_ZFB_ONLY: /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/ REG_UPDATE(DCN_VM_FB_LOCATION_TOP, FB_TOP, 0); REG_UPDATE(DCN_VM_FB_LOCATION_BASE, FB_BASE, 0xFFFFFF); /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ REG_UPDATE(DCN_VM_AGP_BASE, AGP_BASE, dh_data->zfb_phys_addr_base >> 24); /*This field defines the bottom range of the AGP aperture and represents the 24*/ /*MSBs, bits [47:24] of the 48 address bits*/ REG_UPDATE(DCN_VM_AGP_BOT, AGP_BOT, dh_data->zfb_mc_base_addr >> 24); /*This field defines the top range of the AGP aperture and represents the 24*/ /*MSBs, bits [47:24] of the 48 address bits*/ REG_UPDATE(DCN_VM_AGP_TOP, AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 24); break; case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL: /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ REG_UPDATE(DCN_VM_AGP_BASE, AGP_BASE, dh_data->zfb_phys_addr_base >> 24); /*This field defines the bottom range of the AGP aperture and represents the 24*/ /*MSBs, bits [47:24] of the 48 address bits*/ REG_UPDATE(DCN_VM_AGP_BOT, AGP_BOT, dh_data->zfb_mc_base_addr >> 24); /*This field defines the top range of the AGP aperture and represents the 24*/ /*MSBs, bits [47:24] of the 48 address bits*/ REG_UPDATE(DCN_VM_AGP_TOP, AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 24); break; case FRAME_BUFFER_MODE_LOCAL_ONLY: /*Should not touch FB LOCATION (should be done by VBIOS)*/ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ REG_UPDATE(DCN_VM_AGP_BASE, AGP_BASE, 0); /*This field defines the bottom range of the AGP aperture and represents the 24*/ /*MSBs, bits [47:24] of the 48 address bits*/ REG_UPDATE(DCN_VM_AGP_BOT, AGP_BOT, 0xFFFFFF); /*This field defines the top range of the AGP aperture and represents the 24*/ /*MSBs, bits [47:24] of the 48 address bits*/ REG_UPDATE(DCN_VM_AGP_TOP, AGP_TOP, 0); break; default: break; } dh_data->dchub_initialzied = true; dh_data->dchub_info_valid = false; } void hubbub2_wm_read_state(struct hubbub *hubbub, struct dcn_hubbub_wm *wm) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); struct dcn_hubbub_wm_set *s; memset(wm, 0, sizeof(struct dcn_hubbub_wm)); s = &wm->sets[0]; s->wm_set = 0; s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A)) s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A); if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) { s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); } s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); s = &wm->sets[1]; s->wm_set = 1; s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B); if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B)) s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B); if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) { s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B); } s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); s = &wm->sets[2]; s->wm_set = 2; s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C); if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C)) s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C); if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) { s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C); } s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); s = &wm->sets[3]; s->wm_set = 3; s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D); if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)) s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D); if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) { s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D); s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D); } s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); } void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, unsigned int dccg_ref_freq_inKhz, unsigned int *dchub_ref_freq_inKhz) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); uint32_t ref_div = 0; uint32_t ref_en = 0; REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); if (ref_en) { if (ref_div == 2) *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz / 2; else *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz; // DC hub reference frequency must be around 50Mhz, otherwise there may be // overflow/underflow issues when doing HUBBUB programming if (*dchub_ref_freq_inKhz < 40000 || *dchub_ref_freq_inKhz > 60000) ASSERT_CRITICAL(false); return; } else { *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz; // HUBBUB global timer must be enabled. ASSERT_CRITICAL(false); return; } } static bool hubbub2_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); bool wm_pending = false; /* * Need to clamp to max of the register values (i.e. no wrap) * for dcn1, all wm registers are 21-bit wide */ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; /* * There's a special case when going from p-state support to p-state unsupported * here we are going to LOWER watermarks to go to dummy p-state only, but this has * to be done prepare_bandwidth, not optimize */ if (hubbub1->base.ctx->dc->clk_mgr->clks.prev_p_state_change_support == true && hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false) safe_to_lower = true; hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180); hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); return wm_pending; } void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_state) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); if (REG(DCN_VM_FAULT_ADDR_MSB)) hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_MSB); if (REG(DCN_VM_FAULT_ADDR_LSB)) hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_LSB); if (REG(DCN_VM_FAULT_CNTL)) REG_GET(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, &hubbub_state->vm_error_mode); if (REG(DCN_VM_FAULT_STATUS)) { REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, &hubbub_state->vm_error_status); REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid); REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe); } if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) { REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6); hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); } if (REG(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL)) hubbub_state->watermark_change_cntl = REG_READ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL); if (REG(DCHUBBUB_ARB_DRAM_STATE_CNTL)) hubbub_state->dram_state_cntl = REG_READ(DCHUBBUB_ARB_DRAM_STATE_CNTL); } static const struct hubbub_funcs hubbub2_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub2_init_dchub_sys_ctx, .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub2_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub2_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub2_program_watermarks, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .hubbub_read_state = hubbub2_read_state, }; void hubbub2_construct(struct dcn20_hubbub *hubbub, struct dc_context *ctx, const struct dcn_hubbub_registers *hubbub_regs, const struct dcn_hubbub_shift *hubbub_shift, const struct dcn_hubbub_mask *hubbub_mask) { hubbub->base.ctx = ctx; hubbub->base.funcs = &hubbub2_funcs; hubbub->regs = hubbub_regs; hubbub->shifts = hubbub_shift; hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ }
linux-master
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" #include "dcn20_dpp.h" #include "basics/conversion.h" #include "dcn10/dcn10_cm_common.h" #define REG(reg)\ dpp->tf_regs->reg #define IND_REG(index) \ (index) #define CTX \ dpp->base.ctx #undef FN #define FN(reg_name, field_name) \ dpp->tf_shift->field_name, dpp->tf_mask->field_name static void dpp2_enable_cm_block( struct dpp *dpp_base) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); unsigned int cm_bypass_mode = 0; //Temp, put CM in bypass mode if (dpp_base->ctx->dc->debug.cm_in_bypass) cm_bypass_mode = 1; REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); } static bool dpp2_degamma_ram_inuse( struct dpp *dpp_base, bool *ram_a_inuse) { bool ret = false; uint32_t status_reg = 0; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_GET(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, &status_reg); if (status_reg == 3) { *ram_a_inuse = true; ret = true; } else if (status_reg == 4) { *ram_a_inuse = false; ret = true; } return ret; } static void dpp2_program_degamma_lut( struct dpp *dpp_base, const struct pwl_result_data *rgb, uint32_t num, bool is_ram_a) { uint32_t i; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_EN_MASK, 7); REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); for (i = 0 ; i < num; i++) { REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); } } void dpp2_set_degamma_pwl( struct dpp *dpp_base, const struct pwl_params *params) { bool is_ram_a = true; dpp1_power_on_degamma_lut(dpp_base, true); dpp2_enable_cm_block(dpp_base); dpp2_degamma_ram_inuse(dpp_base, &is_ram_a); if (is_ram_a == true) dpp1_program_degamma_lutb_settings(dpp_base, params); else dpp1_program_degamma_luta_settings(dpp_base, params); dpp2_program_degamma_lut(dpp_base, params->rgb_resulted, params->hw_points_num, !is_ram_a); dpp1_degamma_ram_select(dpp_base, !is_ram_a); } void dpp2_set_degamma( struct dpp *dpp_base, enum ipp_degamma_mode mode) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); dpp2_enable_cm_block(dpp_base); switch (mode) { case IPP_DEGAMMA_MODE_BYPASS: /* Setting de gamma bypass for now */ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); break; case IPP_DEGAMMA_MODE_HW_sRGB: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); break; case IPP_DEGAMMA_MODE_HW_xvYCC: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); break; case IPP_DEGAMMA_MODE_USER_PWL: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); break; default: BREAK_TO_DEBUGGER(); break; } } static void program_gamut_remap( struct dcn20_dpp *dpp, const uint16_t *regval, enum dcn20_gamut_remap_select select) { uint32_t cur_select = 0; struct color_matrices_reg gam_regs; if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) { REG_SET(CM_GAMUT_REMAP_CONTROL, 0, CM_GAMUT_REMAP_MODE, 0); return; } /* determine which gamut_remap coefficients (A or B) we are using * currently. select the alternate set to double buffer * the update so gamut_remap is updated on frame boundary */ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, CM_TEST_DEBUG_DATA_STATUS_IDX, CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select); /* value stored in dbg reg will be 1 greater than mode we want */ if (cur_select != DCN2_GAMUT_REMAP_COEF_A) select = DCN2_GAMUT_REMAP_COEF_A; else select = DCN2_GAMUT_REMAP_COEF_B; gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; if (select == DCN2_GAMUT_REMAP_COEF_A) { gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); } else { gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); } cm_helper_program_color_matrices( dpp->base.ctx, regval, &gam_regs); REG_SET( CM_GAMUT_REMAP_CONTROL, 0, CM_GAMUT_REMAP_MODE, select); } void dpp2_cm_set_gamut_remap( struct dpp *dpp_base, const struct dpp_grph_csc_adjustment *adjust) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); int i = 0; if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) /* Bypass if type is bypass or hw */ program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS); else { struct fixed31_32 arr_matrix[12]; uint16_t arr_reg_val[12]; for (i = 0; i < 12; i++) arr_matrix[i] = adjust->temperature_matrix[i]; convert_float_matrix( arr_reg_val, arr_matrix, 12); program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A); } } void dpp2_program_input_csc( struct dpp *dpp_base, enum dc_color_space color_space, enum dcn20_input_csc_select input_select, const struct out_csc_color_matrix *tbl_entry) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); int i; int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); const uint16_t *regval = NULL; uint32_t cur_select = 0; enum dcn20_input_csc_select select; struct color_matrices_reg icsc_regs; if (input_select == DCN2_ICSC_SELECT_BYPASS) { REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); return; } if (tbl_entry == NULL) { for (i = 0; i < arr_size; i++) if (dpp_input_csc_matrix[i].color_space == color_space) { regval = dpp_input_csc_matrix[i].regval; break; } if (regval == NULL) { BREAK_TO_DEBUGGER(); return; } } else { regval = tbl_entry->regval; } /* determine which CSC coefficients (A or B) we are using * currently. select the alternate set to double buffer * the CSC update so CSC is updated on frame boundary */ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, CM_TEST_DEBUG_DATA_STATUS_IDX, CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select); if (cur_select != DCN2_ICSC_SELECT_ICSC_A) select = DCN2_ICSC_SELECT_ICSC_A; else select = DCN2_ICSC_SELECT_ICSC_B; icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; if (select == DCN2_ICSC_SELECT_ICSC_A) { icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); } else { icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12); icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34); } cm_helper_program_color_matrices( dpp->base.ctx, regval, &icsc_regs); REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, select); } static void dpp20_power_on_blnd_lut( struct dpp *dpp_base, bool power_on) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_SET(CM_MEM_PWR_CTRL, 0, BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0:1); } static void dpp20_configure_blnd_lut( struct dpp *dpp_base, bool is_ram_a) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, 7); REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); } static void dpp20_program_blnd_pwl( struct dpp *dpp_base, const struct pwl_result_data *rgb, uint32_t num) { uint32_t i; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); for (i = 0 ; i < num; i++) { REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].delta_red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].delta_green_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].delta_blue_reg); } } static void dcn20_dpp_cm_get_reg_field( struct dcn20_dpp *dpp, struct xfer_func_reg *reg) { reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; } /*program blnd lut RAM A*/ static void dpp20_program_blnd_luta_settings( struct dpp *dpp_base, const struct pwl_params *params) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); struct xfer_func_reg gam_regs; dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_B); gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_G); gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_R); gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); } /*program blnd lut RAM B*/ static void dpp20_program_blnd_lutb_settings( struct dpp *dpp_base, const struct pwl_params *params) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); struct xfer_func_reg gam_regs; dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_B); gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_G); gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_R); gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); } static enum dc_lut_mode dpp20_get_blndgam_current(struct dpp *dpp_base) { enum dc_lut_mode mode; uint32_t state_mode; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, &state_mode); switch (state_mode) { case 0: mode = LUT_BYPASS; break; case 1: mode = LUT_RAM_A; break; case 2: mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } return mode; } bool dpp20_program_blnd_lut( struct dpp *dpp_base, const struct pwl_params *params) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); if (params == NULL) { REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, 0); return false; } current_mode = dpp20_get_blndgam_current(dpp_base); if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) next_mode = LUT_RAM_B; else next_mode = LUT_RAM_A; dpp20_power_on_blnd_lut(dpp_base, true); dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dpp20_program_blnd_luta_settings(dpp_base, params); else dpp20_program_blnd_lutb_settings(dpp_base, params); dpp20_program_blnd_pwl( dpp_base, params->rgb_resulted, params->hw_points_num); REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); return true; } static void dpp20_program_shaper_lut( struct dpp *dpp_base, const struct pwl_result_data *rgb, uint32_t num) { uint32_t i, red, green, blue; uint32_t red_delta, green_delta, blue_delta; uint32_t red_value, green_value, blue_value; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); for (i = 0 ; i < num; i++) { red = rgb[i].red_reg; green = rgb[i].green_reg; blue = rgb[i].blue_reg; red_delta = rgb[i].delta_red_reg; green_delta = rgb[i].delta_green_reg; blue_delta = rgb[i].delta_blue_reg; red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); } } static enum dc_lut_mode dpp20_get_shaper_current(struct dpp *dpp_base) { enum dc_lut_mode mode; uint32_t state_mode; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_GET(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, &state_mode); switch (state_mode) { case 0: mode = LUT_BYPASS; break; case 1: mode = LUT_RAM_A; break; case 2: mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } return mode; } static void dpp20_configure_shaper_lut( struct dpp *dpp_base, bool is_ram_a) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, 7); REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); } /*program shaper RAM A*/ static void dpp20_program_shaper_luta_settings( struct dpp *dpp_base, const struct pwl_params *params) { const struct gamma_curve *curve; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); curve = params->arr_curve_points; REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); } /*program shaper RAM B*/ static void dpp20_program_shaper_lutb_settings( struct dpp *dpp_base, const struct pwl_params *params) { const struct gamma_curve *curve; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); curve = params->arr_curve_points; REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); } bool dpp20_program_shaper( struct dpp *dpp_base, const struct pwl_params *params) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); if (params == NULL) { REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); return false; } current_mode = dpp20_get_shaper_current(dpp_base); if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) next_mode = LUT_RAM_B; else next_mode = LUT_RAM_A; dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dpp20_program_shaper_luta_settings(dpp_base, params); else dpp20_program_shaper_lutb_settings(dpp_base, params); dpp20_program_shaper_lut( dpp_base, params->rgb_resulted, params->hw_points_num); REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); return true; } static enum dc_lut_mode get3dlut_config( struct dpp *dpp_base, bool *is_17x17x17, bool *is_12bits_color_channel) { uint32_t i_mode, i_enable_10bits, lut_size; enum dc_lut_mode mode; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, &i_mode, CM_3DLUT_30BIT_EN, &i_enable_10bits); switch (i_mode) { case 0: mode = LUT_BYPASS; break; case 1: mode = LUT_RAM_A; break; case 2: mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } if (i_enable_10bits > 0) *is_12bits_color_channel = false; else *is_12bits_color_channel = true; REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); if (lut_size == 0) *is_17x17x17 = true; else *is_17x17x17 = false; return mode; } /* * select ramA or ramB, or bypass * select color channel size 10 or 12 bits * select 3dlut size 17x17x17 or 9x9x9 */ static void dpp20_set_3dlut_mode( struct dpp *dpp_base, enum dc_lut_mode mode, bool is_color_channel_12bits, bool is_lut_size17x17x17) { uint32_t lut_mode; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); if (mode == LUT_BYPASS) lut_mode = 0; else if (mode == LUT_RAM_A) lut_mode = 1; else lut_mode = 2; REG_UPDATE_2(CM_3DLUT_MODE, CM_3DLUT_MODE, lut_mode, CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); } static void dpp20_select_3dlut_ram( struct dpp *dpp_base, enum dc_lut_mode mode, bool is_color_channel_12bits) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, CM_3DLUT_30BIT_EN, is_color_channel_12bits == true ? 0:1); } static void dpp20_set3dlut_ram12( struct dpp *dpp_base, const struct dc_rgb *lut, uint32_t entries) { uint32_t i, red, green, blue, red1, green1, blue1; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); for (i = 0 ; i < entries; i += 2) { red = lut[i].red<<4; green = lut[i].green<<4; blue = lut[i].blue<<4; red1 = lut[i+1].red<<4; green1 = lut[i+1].green<<4; blue1 = lut[i+1].blue<<4; REG_SET_2(CM_3DLUT_DATA, 0, CM_3DLUT_DATA0, red, CM_3DLUT_DATA1, red1); REG_SET_2(CM_3DLUT_DATA, 0, CM_3DLUT_DATA0, green, CM_3DLUT_DATA1, green1); REG_SET_2(CM_3DLUT_DATA, 0, CM_3DLUT_DATA0, blue, CM_3DLUT_DATA1, blue1); } } /* * load selected lut with 10 bits color channels */ static void dpp20_set3dlut_ram10( struct dpp *dpp_base, const struct dc_rgb *lut, uint32_t entries) { uint32_t i, red, green, blue, value; struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); for (i = 0; i < entries; i++) { red = lut[i].red; green = lut[i].green; blue = lut[i].blue; value = (red<<20) | (green<<10) | blue; REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); } } static void dpp20_select_3dlut_ram_mask( struct dpp *dpp_base, uint32_t ram_selection_mask) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, ram_selection_mask); REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); } bool dpp20_program_3dlut( struct dpp *dpp_base, struct tetrahedral_params *params) { enum dc_lut_mode mode; bool is_17x17x17; bool is_12bits_color_channel; struct dc_rgb *lut0; struct dc_rgb *lut1; struct dc_rgb *lut2; struct dc_rgb *lut3; int lut_size0; int lut_size; if (params == NULL) { dpp20_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); return false; } mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); if (mode == LUT_BYPASS || mode == LUT_RAM_B) mode = LUT_RAM_A; else mode = LUT_RAM_B; is_17x17x17 = !params->use_tetrahedral_9; is_12bits_color_channel = params->use_12bits; if (is_17x17x17) { lut0 = params->tetrahedral_17.lut0; lut1 = params->tetrahedral_17.lut1; lut2 = params->tetrahedral_17.lut2; lut3 = params->tetrahedral_17.lut3; lut_size0 = sizeof(params->tetrahedral_17.lut0)/ sizeof(params->tetrahedral_17.lut0[0]); lut_size = sizeof(params->tetrahedral_17.lut1)/ sizeof(params->tetrahedral_17.lut1[0]); } else { lut0 = params->tetrahedral_9.lut0; lut1 = params->tetrahedral_9.lut1; lut2 = params->tetrahedral_9.lut2; lut3 = params->tetrahedral_9.lut3; lut_size0 = sizeof(params->tetrahedral_9.lut0)/ sizeof(params->tetrahedral_9.lut0[0]); lut_size = sizeof(params->tetrahedral_9.lut1)/ sizeof(params->tetrahedral_9.lut1[0]); } dpp20_select_3dlut_ram(dpp_base, mode, is_12bits_color_channel); dpp20_select_3dlut_ram_mask(dpp_base, 0x1); if (is_12bits_color_channel) dpp20_set3dlut_ram12(dpp_base, lut0, lut_size0); else dpp20_set3dlut_ram10(dpp_base, lut0, lut_size0); dpp20_select_3dlut_ram_mask(dpp_base, 0x2); if (is_12bits_color_channel) dpp20_set3dlut_ram12(dpp_base, lut1, lut_size); else dpp20_set3dlut_ram10(dpp_base, lut1, lut_size); dpp20_select_3dlut_ram_mask(dpp_base, 0x4); if (is_12bits_color_channel) dpp20_set3dlut_ram12(dpp_base, lut2, lut_size); else dpp20_set3dlut_ram10(dpp_base, lut2, lut_size); dpp20_select_3dlut_ram_mask(dpp_base, 0x8); if (is_12bits_color_channel) dpp20_set3dlut_ram12(dpp_base, lut3, lut_size); else dpp20_set3dlut_ram10(dpp_base, lut3, lut_size); dpp20_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, is_17x17x17); return true; } void dpp2_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); }
linux-master
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "resource.h" #include "mcif_wb.h" #include "dcn20_mmhubbub.h" #define REG(reg)\ mcif_wb20->mcif_wb_regs->reg #define CTX \ mcif_wb20->base.ctx #undef FN #define FN(reg_name, field_name) \ mcif_wb20->mcif_wb_shift->field_name, mcif_wb20->mcif_wb_mask->field_name #define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8 #define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40 /* wbif programming guide: * 1. set up wbif parameter: * unsigned long long luma_address[4]; //4 frame buffer * unsigned long long chroma_address[4]; * unsigned int luma_pitch; * unsigned int chroma_pitch; * unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10 * unsigned int slice_lines; //slice size * unsigned int time_per_pixel; // time per pixel, in ns * unsigned int arbitration_slice; // 0: 512 bytes 1: 1024 bytes 2: 2048 Bytes * unsigned int max_scaled_time; // used for QOS generation * unsigned int swlock=0x0; * unsigned int cli_watermark[4]; //4 group urgent watermark * unsigned int pstate_watermark[4]; //4 group pstate watermark * unsigned int sw_int_en; // Software interrupt enable, frame end and overflow * unsigned int sw_slice_int_en; // slice end interrupt enable * unsigned int sw_overrun_int_en; // overrun error interrupt enable * unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow * unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow * * 2. configure wbif register * a. call mmhubbub_config_wbif() * * 3. Enable wbif * call set_wbif_bufmgr_enable(); * * 4. wbif_dump_status(), option, for debug purpose * the bufmgr status can show the progress of write back, can be used for debug purpose */ static void mmhubbub2_config_mcif_buf(struct mcif_wb *mcif_wb, struct mcif_buf_params *params, unsigned int dest_height) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); /* sw lock buffer0~buffer3, default is 0 */ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, params->swlock); /* buffer address for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, MCIF_ADDR(params->luma_address[0])); REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[0])); /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB_BUF_1_ADDR_Y_OFFSET, 0); /* buffer address for Chroma in planar mode (unused in packing mode) */ REG_UPDATE(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, MCIF_ADDR(params->chroma_address[0])); REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[0])); /* right eye offset for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB_BUF_1_ADDR_C_OFFSET, 0); /* buffer address for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, MCIF_ADDR(params->luma_address[1])); REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[1])); /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB_BUF_2_ADDR_Y_OFFSET, 0); /* buffer address for Chroma in planar mode (unused in packing mode) */ REG_UPDATE(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, MCIF_ADDR(params->chroma_address[1])); REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[1])); /* right eye offset for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB_BUF_2_ADDR_C_OFFSET, 0); /* buffer address for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, MCIF_ADDR(params->luma_address[2])); REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[2])); /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB_BUF_3_ADDR_Y_OFFSET, 0); /* buffer address for Chroma in planar mode (unused in packing mode) */ REG_UPDATE(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, MCIF_ADDR(params->chroma_address[2])); REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[2])); /* right eye offset for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB_BUF_3_ADDR_C_OFFSET, 0); /* buffer address for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, MCIF_ADDR(params->luma_address[3])); REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[3])); /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB_BUF_4_ADDR_Y_OFFSET, 0); /* buffer address for Chroma in planar mode (unused in packing mode) */ REG_UPDATE(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, MCIF_ADDR(params->chroma_address[3])); REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[3])); /* right eye offset for packing mode or Luma in planar mode */ REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB_BUF_4_ADDR_C_OFFSET, 0); /* setup luma & chroma size * should be enough to contain a whole frame Luma data, * the programmed value is frame buffer size [27:8], 256-byte aligned */ REG_UPDATE(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, (params->luma_pitch>>8) * dest_height); REG_UPDATE(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, (params->chroma_pitch>>8) * dest_height); /* enable address fence */ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, 1); /* setup pitch, the programmed value is [15:8], 256B align */ REG_UPDATE_2(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, params->luma_pitch >> 8, MCIF_WB_BUF_CHROMA_PITCH, params->chroma_pitch >> 8); /* Set pitch for MC cache warm up mode */ /* Pitch is 256 bytes aligned. The default pitch is 4K */ /* default is 0x10 */ REG_UPDATE(MCIF_WB_WARM_UP_CNTL, MCIF_WB_PITCH_SIZE_WARMUP, params->warmup_pitch); } static void mmhubbub2_config_mcif_arb(struct mcif_wb *mcif_wb, struct mcif_arb_params *params) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); /* Programmed by the video driver based on the CRTC timing (for DWB) */ REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, params->time_per_pixel); /* Programming dwb watermark */ /* Watermark to generate urgent in MCIF_WB_CLI, value is determined by MCIF_WB_CLI_WATERMARK_MASK. */ /* Program in ns. A formula will be provided in the pseudo code to calculate the value. */ REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x0); /* urgent_watermarkA */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[0]); REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x1); /* urgent_watermarkB */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[1]); REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x2); /* urgent_watermarkC */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[2]); REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x3); /* urgent_watermarkD */ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[3]); /* Programming nb pstate watermark */ /* nbp_state_change_watermarkA */ REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x0); REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[0]); /* nbp_state_change_watermarkB */ REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x1); REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[1]); /* nbp_state_change_watermarkC */ REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x2); REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[2]); /* nbp_state_change_watermarkD */ REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x3); REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[3]); /* max_scaled_time */ REG_UPDATE(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, params->max_scaled_time); /* slice_lines */ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, params->slice_lines-1); /* Set arbitration unit for Luma/Chroma */ /* arb_unit=2 should be chosen for more efficiency */ /* Arbitration size, 0: 512 bytes 1: 1024 bytes 2: 2048 Bytes */ REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice); } void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb, struct mcif_irq_params *params) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); /* Set interrupt mask */ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, params->sw_int_en); REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, params->sw_slice_int_en); REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, params->sw_overrun_int_en); REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, params->vce_int_en); if (mcif_wb20->mcif_wb_mask->MCIF_WB_BUFMGR_VCE_SLICE_INT_EN) REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en); } void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); /* Enable Mcifwb */ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, 1); } void mmhubbub2_disable_mcif(struct mcif_wb *mcif_wb) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); /* disable buffer manager */ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, 0); } /* set which group of pstate watermark to use and set wbif watermark change request */ /* static void mmhubbub2_wbif_watermark_change_req(struct mcif_wb *mcif_wb, unsigned int wm_set) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); uint32_t change_req; REG_GET(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, &change_req); change_req = (change_req == 0) ? 1 : 0; REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_SEL, wm_set); REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, change_req); } */ /* Set watermark change interrupt disable bit */ /* static void mmhubbub2_set_wbif_watermark_change_int_disable(struct mcif_wb *mcif_wb, unsigned int ack_int_dis) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_DIS, ack_int_dis); } */ /* Read watermark change interrupt status */ /* unsigned int mmhubbub2_get_wbif_watermark_change_int_status(struct mcif_wb *mcif_wb) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); uint32_t irq_status; REG_GET(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_STATUS, &irq_status); return irq_status; } */ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb, struct mcif_buf_params *mcif_params, enum dwb_scaler_mode out_format, unsigned int dest_width, unsigned int dest_height, struct mcif_wb_frame_dump_info *dump_info, unsigned char *luma_buffer, unsigned char *chroma_buffer, unsigned char *dest_luma_buffer, unsigned char *dest_chroma_buffer) { struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf); memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height); memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2); REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0); dump_info->format = out_format; dump_info->width = dest_width; dump_info->height = dest_height; dump_info->luma_pitch = mcif_params->luma_pitch; dump_info->chroma_pitch = mcif_params->chroma_pitch; dump_info->size = dest_height * (mcif_params->luma_pitch + mcif_params->chroma_pitch); } static const struct mcif_wb_funcs dcn20_mmhubbub_funcs = { .enable_mcif = mmhubbub2_enable_mcif, .disable_mcif = mmhubbub2_disable_mcif, .config_mcif_buf = mmhubbub2_config_mcif_buf, .config_mcif_arb = mmhubbub2_config_mcif_arb, .config_mcif_irq = mmhubbub2_config_mcif_irq, .dump_frame = mcifwb2_dump_frame, }; void dcn20_mmhubbub_construct(struct dcn20_mmhubbub *mcif_wb20, struct dc_context *ctx, const struct dcn20_mmhubbub_registers *mcif_wb_regs, const struct dcn20_mmhubbub_shift *mcif_wb_shift, const struct dcn20_mmhubbub_mask *mcif_wb_mask, int inst) { mcif_wb20->base.ctx = ctx; mcif_wb20->base.inst = inst; mcif_wb20->base.funcs = &dcn20_mmhubbub_funcs; mcif_wb20->mcif_wb_regs = mcif_wb_regs; mcif_wb20->mcif_wb_shift = mcif_wb_shift; mcif_wb20->mcif_wb_mask = mcif_wb_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c