python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "../dmub_srv.h" #include "dmub_reg.h" #include "dmub_dcn32.h" #include "dcn/dcn_3_2_0_offset.h" #include "dcn/dcn_3_2_0_sh_mask.h" #define DCN_BASE__INST0_SEG2 0x000034C0 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg #define CTX dmub #define REGS dmub->regs_dcn32 #define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name) const struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs = { #define DMUB_SR(reg) REG_OFFSET_EXP(reg), { DMUB_DCN32_REGS() DMCUB_INTERNAL_REGS() }, #undef DMUB_SR #define DMUB_SF(reg, field) FD_MASK(reg, field), { DMUB_DCN32_FIELDS() }, #undef DMUB_SF #define DMUB_SF(reg, field) FD_SHIFT(reg, field), { DMUB_DCN32_FIELDS() }, #undef DMUB_SF }; static void dmub_dcn32_get_fb_base_offset(struct dmub_srv *dmub, uint64_t *fb_base, uint64_t *fb_offset) { uint32_t tmp; if (dmub->fb_base || dmub->fb_offset) { *fb_base = dmub->fb_base; *fb_offset = dmub->fb_offset; return; } REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); *fb_base = (uint64_t)tmp << 24; REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); *fb_offset = (uint64_t)tmp << 24; } static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in, uint64_t fb_base, uint64_t fb_offset, union dmub_addr *addr_out) { addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; } void dmub_dcn32_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; const uint32_t timeout = 30; uint32_t in_reset, scratch, i; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); if (in_reset == 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); /** * Timeout covers both the ACK and the wait * for remaining work to finish. * * This is mostly bound by the PHY disable sequence. * Each register check will be greater than 1us, so * don't bother using udelay. */ for (i = 0; i < timeout; ++i) { if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) break; } for (i = 0; i < timeout; ++i) { scratch = dmub->hw_funcs.get_gpint_response(dmub); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; } /* Force reset in case we timed out, DMCUB is likely hung. */ } REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); /* Clear the GPINT command manually so we don't reset again. */ cmd.all = 0; dmub->hw_funcs.set_gpint(dmub, cmd); } void dmub_dcn32_reset_release(struct dmub_srv *dmub) { REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0); } void dmub_dcn32_backdoor_load(struct dmub_srv *dmub, const struct dmub_window *cw0, const struct dmub_window *cw1) { union dmub_addr offset; uint64_t fb_base, fb_offset; dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset); REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, DMCUB_REGION3_CW0_ENABLE, 1); dmub_dcn32_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, DMCUB_REGION3_CW1_ENABLE, 1); REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 0x20); } void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub, const struct dmub_window *cw0, const struct dmub_window *cw1) { union dmub_addr offset; REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); offset = cw0->offset; REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, DMCUB_REGION3_CW0_ENABLE, 1); offset = cw1->offset; REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, DMCUB_REGION3_CW1_ENABLE, 1); REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 0x20); } void dmub_dcn32_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw2, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, const struct dmub_window *cw6) { union dmub_addr offset; offset = cw3->offset; REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, DMCUB_REGION3_CW3_ENABLE, 1); offset = cw4->offset; REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base); REG_SET_2(DMCUB_REGION3_CW4_TOP_ADDRESS, 0, DMCUB_REGION3_CW4_TOP_ADDRESS, cw4->region.top, DMCUB_REGION3_CW4_ENABLE, 1); offset = cw5->offset; REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, DMCUB_REGION5_TOP_ADDRESS, cw5->region.top - cw5->region.base - 1, DMCUB_REGION5_ENABLE, 1); offset = cw6->offset; REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, DMCUB_REGION3_CW6_ENABLE, 1); } void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1) { REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base); REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); } uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_WPTR); } uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_RPTR); } void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) { REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); } void dmub_dcn32_setup_out_mailbox(struct dmub_srv *dmub, const struct dmub_region *outbox1) { REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, outbox1->base); REG_WRITE(DMCUB_OUTBOX1_SIZE, outbox1->top - outbox1->base); } uint32_t dmub_dcn32_get_outbox1_wptr(struct dmub_srv *dmub) { /** * outbox1 wptr register is accessed without locks (dal & dc) * and to be called only by dmub_srv_stat_get_notification() */ return REG_READ(DMCUB_OUTBOX1_WPTR); } void dmub_dcn32_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) { /** * outbox1 rptr register is accessed without locks (dal & dc) * and to be called only by dmub_srv_stat_get_notification() */ REG_WRITE(DMCUB_OUTBOX1_RPTR, rptr_offset); } bool dmub_dcn32_is_hw_init(struct dmub_srv *dmub) { union dmub_fw_boot_status status; uint32_t is_hw_init; status.all = REG_READ(DMCUB_SCRATCH0); REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init); return is_hw_init != 0 && status.bits.dal_fw; } bool dmub_dcn32_is_supported(struct dmub_srv *dmub) { uint32_t supported = 0; REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported); return supported; } void dmub_dcn32_set_gpint(struct dmub_srv *dmub, union dmub_gpint_data_register reg) { REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all); } bool dmub_dcn32_is_gpint_acked(struct dmub_srv *dmub, union dmub_gpint_data_register reg) { union dmub_gpint_data_register test; reg.bits.status = 0; test.all = REG_READ(DMCUB_GPINT_DATAIN1); return test.all == reg.all; } uint32_t dmub_dcn32_get_gpint_response(struct dmub_srv *dmub) { return REG_READ(DMCUB_SCRATCH7); } uint32_t dmub_dcn32_get_gpint_dataout(struct dmub_srv *dmub) { uint32_t dataout = REG_READ(DMCUB_GPINT_DATAOUT); REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 0); REG_WRITE(DMCUB_GPINT_DATAOUT, 0); REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 1); REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 0); REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 1); return dataout; } union dmub_fw_boot_status dmub_dcn32_get_fw_boot_status(struct dmub_srv *dmub) { union dmub_fw_boot_status status; status.all = REG_READ(DMCUB_SCRATCH0); return status; } void dmub_dcn32_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) { union dmub_fw_boot_options boot_options = {0}; boot_options.bits.z10_disable = params->disable_z10; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } void dmub_dcn32_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip) { union dmub_fw_boot_options boot_options; boot_options.all = REG_READ(DMCUB_SCRATCH14); boot_options.bits.skip_phy_init_panel_sequence = skip; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } void dmub_dcn32_setup_outbox0(struct dmub_srv *dmub, const struct dmub_region *outbox0) { REG_WRITE(DMCUB_OUTBOX0_BASE_ADDRESS, outbox0->base); REG_WRITE(DMCUB_OUTBOX0_SIZE, outbox0->top - outbox0->base); } uint32_t dmub_dcn32_get_outbox0_wptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_OUTBOX0_WPTR); } void dmub_dcn32_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) { REG_WRITE(DMCUB_OUTBOX0_RPTR, rptr_offset); } uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub) { return REG_READ(DMCUB_TIMER_CURRENT); } void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) { uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; if (!dmub || !diag_data) return; memset(diag_data, 0, sizeof(*diag_data)); diag_data->dmcub_version = dmub->fw_version; diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); diag_data->is_dmcub_enabled = is_dmub_enabled; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); diag_data->is_dmcub_soft_reset = is_soft_reset; REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); diag_data->is_dmcub_secure_reset = is_sec_reset; REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); diag_data->is_traceport_en = is_traceport_enabled; REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); diag_data->is_cw0_enabled = is_cw0_enabled; REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) { /* DMCUB_REGION3_TMR_AXI_SPACE values: * 0b011 (0x3) - FB physical address * 0b100 (0x4) - GPU virtual address * * Default value is 0x3 (FB Physical address for TMR). When programming * DMUB to be in system memory, change to 0x4. The system memory allocated * is accessible by both GPU and CPU, so we use GPU virtual address. */ REG_WRITE(DMCUB_REGION3_TMR_AXI_SPACE, 0x4); } void dmub_dcn32_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data) { REG_WRITE(DMCUB_INBOX0_WPTR, data.inbox0_cmd_common.all); } void dmub_dcn32_clear_inbox0_ack_register(struct dmub_srv *dmub) { REG_WRITE(DMCUB_SCRATCH17, 0); } uint32_t dmub_dcn32_read_inbox0_ack_register(struct dmub_srv *dmub) { return REG_READ(DMCUB_SCRATCH17); }
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "../dmub_srv.h" #include "dmub_reg.h" #include "dmub_dcn302.h" #include "dimgrey_cavefish_ip_offset.h" #include "dcn/dcn_3_0_0_offset.h" #include "dcn/dcn_3_0_0_sh_mask.h" #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg #define CTX dmub #define REGS dmub->regs /* Registers. */ const struct dmub_srv_common_regs dmub_srv_dcn302_regs = { #define DMUB_SR(reg) REG_OFFSET(reg), { DMUB_COMMON_REGS() DMCUB_INTERNAL_REGS() }, #undef DMUB_SR #define DMUB_SF(reg, field) FD_MASK(reg, field), { DMUB_COMMON_FIELDS() }, #undef DMUB_SF #define DMUB_SF(reg, field) FD_SHIFT(reg, field), { DMUB_COMMON_FIELDS() }, #undef DMUB_SF }; /* Shared functions. */
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "../dmub_srv.h" #include "dmub_dcn20.h" #include "dmub_dcn21.h" #include "dmub_cmd.h" #include "dmub_dcn30.h" #include "dmub_dcn301.h" #include "dmub_dcn302.h" #include "dmub_dcn303.h" #include "dmub_dcn31.h" #include "dmub_dcn314.h" #include "dmub_dcn315.h" #include "dmub_dcn316.h" #include "dmub_dcn32.h" #include "os_types.h" /* * Note: the DMUB service is standalone. No additional headers should be * added below or above this line unless they reside within the DMUB * folder. */ /* Alignment for framebuffer memory. */ #define DMUB_FB_ALIGNMENT (1024 * 1024) /* Stack size. */ #define DMUB_STACK_SIZE (128 * 1024) /* Context size. */ #define DMUB_CONTEXT_SIZE (512 * 1024) /* Mailbox size : Ring buffers are required for both inbox and outbox */ #define DMUB_MAILBOX_SIZE ((2 * DMUB_RB_SIZE)) /* Default state size if meta is absent. */ #define DMUB_FW_STATE_SIZE (64 * 1024) /* Default tracebuffer size if meta is absent. */ #define DMUB_TRACE_BUFFER_SIZE (64 * 1024) /* Default scratch mem size. */ #define DMUB_SCRATCH_MEM_SIZE (256) /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) /* Base addresses. */ #define DMUB_CW0_BASE (0x60000000) #define DMUB_CW1_BASE (0x61000000) #define DMUB_CW3_BASE (0x63000000) #define DMUB_CW4_BASE (0x64000000) #define DMUB_CW5_BASE (0x65000000) #define DMUB_CW6_BASE (0x66000000) #define DMUB_REGION5_BASE (0xA0000000) static inline uint32_t dmub_align(uint32_t val, uint32_t factor) { return (val + factor - 1) / factor * factor; } void dmub_flush_buffer_mem(const struct dmub_fb *fb) { const uint8_t *base = (const uint8_t *)fb->cpu_addr; uint8_t buf[64]; uint32_t pos, end; /** * Read 64-byte chunks since we don't want to store a * large temporary buffer for this purpose. */ end = fb->size / sizeof(buf) * sizeof(buf); for (pos = 0; pos < end; pos += sizeof(buf)) dmub_memcpy(buf, base + pos, sizeof(buf)); /* Read anything leftover into the buffer. */ if (end < fb->size) dmub_memcpy(buf, base + pos, fb->size - end); } static const struct dmub_fw_meta_info * dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset) { const union dmub_fw_meta *meta; if (!blob || !blob_size) return NULL; if (blob_size < sizeof(union dmub_fw_meta) + meta_offset) return NULL; meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset - sizeof(union dmub_fw_meta)); if (meta->info.magic_value != DMUB_FW_META_MAGIC) return NULL; return &meta->info; } static const struct dmub_fw_meta_info * dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) { const struct dmub_fw_meta_info *info = NULL; if (params->fw_bss_data && params->bss_data_size) { /* Legacy metadata region. */ info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data, params->bss_data_size, DMUB_FW_META_OFFSET); } else if (params->fw_inst_const && params->inst_const_size) { /* Combined metadata region - can be aligned to 16-bytes. */ uint32_t i; for (i = 0; i < 16; ++i) { info = dmub_get_fw_meta_info_from_blob( params->fw_inst_const, params->inst_const_size, i); if (info) break; } } return info; } static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) { struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; switch (asic) { case DMUB_ASIC_DCN20: case DMUB_ASIC_DCN21: case DMUB_ASIC_DCN30: case DMUB_ASIC_DCN301: case DMUB_ASIC_DCN302: case DMUB_ASIC_DCN303: dmub->regs = &dmub_srv_dcn20_regs; funcs->reset = dmub_dcn20_reset; funcs->reset_release = dmub_dcn20_reset_release; funcs->backdoor_load = dmub_dcn20_backdoor_load; funcs->setup_windows = dmub_dcn20_setup_windows; funcs->setup_mailbox = dmub_dcn20_setup_mailbox; funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; funcs->is_supported = dmub_dcn20_is_supported; funcs->is_hw_init = dmub_dcn20_is_hw_init; funcs->set_gpint = dmub_dcn20_set_gpint; funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked; funcs->get_gpint_response = dmub_dcn20_get_gpint_response; funcs->get_fw_status = dmub_dcn20_get_fw_boot_status; funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options; funcs->skip_dmub_panel_power_sequence = dmub_dcn20_skip_dmub_panel_power_sequence; funcs->get_current_time = dmub_dcn20_get_current_time; // Out mailbox register access functions for RN and above funcs->setup_out_mailbox = dmub_dcn20_setup_out_mailbox; funcs->get_outbox1_wptr = dmub_dcn20_get_outbox1_wptr; funcs->set_outbox1_rptr = dmub_dcn20_set_outbox1_rptr; //outbox0 call stacks funcs->setup_outbox0 = dmub_dcn20_setup_outbox0; funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr; funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr; funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data; if (asic == DMUB_ASIC_DCN21) dmub->regs = &dmub_srv_dcn21_regs; if (asic == DMUB_ASIC_DCN30) { dmub->regs = &dmub_srv_dcn30_regs; funcs->backdoor_load = dmub_dcn30_backdoor_load; funcs->setup_windows = dmub_dcn30_setup_windows; } if (asic == DMUB_ASIC_DCN301) { dmub->regs = &dmub_srv_dcn301_regs; funcs->backdoor_load = dmub_dcn30_backdoor_load; funcs->setup_windows = dmub_dcn30_setup_windows; } if (asic == DMUB_ASIC_DCN302) { dmub->regs = &dmub_srv_dcn302_regs; funcs->backdoor_load = dmub_dcn30_backdoor_load; funcs->setup_windows = dmub_dcn30_setup_windows; } if (asic == DMUB_ASIC_DCN303) { dmub->regs = &dmub_srv_dcn303_regs; funcs->backdoor_load = dmub_dcn30_backdoor_load; funcs->setup_windows = dmub_dcn30_setup_windows; } break; case DMUB_ASIC_DCN31: case DMUB_ASIC_DCN31B: case DMUB_ASIC_DCN314: case DMUB_ASIC_DCN315: case DMUB_ASIC_DCN316: if (asic == DMUB_ASIC_DCN314) { dmub->regs_dcn31 = &dmub_srv_dcn314_regs; funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported; } else if (asic == DMUB_ASIC_DCN315) { dmub->regs_dcn31 = &dmub_srv_dcn315_regs; } else if (asic == DMUB_ASIC_DCN316) { dmub->regs_dcn31 = &dmub_srv_dcn316_regs; } else { dmub->regs_dcn31 = &dmub_srv_dcn31_regs; funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported; } funcs->reset = dmub_dcn31_reset; funcs->reset_release = dmub_dcn31_reset_release; funcs->backdoor_load = dmub_dcn31_backdoor_load; funcs->setup_windows = dmub_dcn31_setup_windows; funcs->setup_mailbox = dmub_dcn31_setup_mailbox; funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr; funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox; funcs->get_outbox1_wptr = dmub_dcn31_get_outbox1_wptr; funcs->set_outbox1_rptr = dmub_dcn31_set_outbox1_rptr; funcs->is_supported = dmub_dcn31_is_supported; funcs->is_hw_init = dmub_dcn31_is_hw_init; funcs->set_gpint = dmub_dcn31_set_gpint; funcs->is_gpint_acked = dmub_dcn31_is_gpint_acked; funcs->get_gpint_response = dmub_dcn31_get_gpint_response; funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout; funcs->get_fw_status = dmub_dcn31_get_fw_boot_status; funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option; funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options; funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence; //outbox0 call stacks funcs->setup_outbox0 = dmub_dcn31_setup_outbox0; funcs->get_outbox0_wptr = dmub_dcn31_get_outbox0_wptr; funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr; funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data; funcs->should_detect = dmub_dcn31_should_detect; funcs->get_current_time = dmub_dcn31_get_current_time; break; case DMUB_ASIC_DCN32: case DMUB_ASIC_DCN321: dmub->regs_dcn32 = &dmub_srv_dcn32_regs; funcs->configure_dmub_in_system_memory = dmub_dcn32_configure_dmub_in_system_memory; funcs->send_inbox0_cmd = dmub_dcn32_send_inbox0_cmd; funcs->clear_inbox0_ack_register = dmub_dcn32_clear_inbox0_ack_register; funcs->read_inbox0_ack_register = dmub_dcn32_read_inbox0_ack_register; funcs->reset = dmub_dcn32_reset; funcs->reset_release = dmub_dcn32_reset_release; funcs->backdoor_load = dmub_dcn32_backdoor_load; funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode; funcs->setup_windows = dmub_dcn32_setup_windows; funcs->setup_mailbox = dmub_dcn32_setup_mailbox; funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr; funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox; funcs->get_outbox1_wptr = dmub_dcn32_get_outbox1_wptr; funcs->set_outbox1_rptr = dmub_dcn32_set_outbox1_rptr; funcs->is_supported = dmub_dcn32_is_supported; funcs->is_hw_init = dmub_dcn32_is_hw_init; funcs->set_gpint = dmub_dcn32_set_gpint; funcs->is_gpint_acked = dmub_dcn32_is_gpint_acked; funcs->get_gpint_response = dmub_dcn32_get_gpint_response; funcs->get_gpint_dataout = dmub_dcn32_get_gpint_dataout; funcs->get_fw_status = dmub_dcn32_get_fw_boot_status; funcs->enable_dmub_boot_options = dmub_dcn32_enable_dmub_boot_options; funcs->skip_dmub_panel_power_sequence = dmub_dcn32_skip_dmub_panel_power_sequence; /* outbox0 call stacks */ funcs->setup_outbox0 = dmub_dcn32_setup_outbox0; funcs->get_outbox0_wptr = dmub_dcn32_get_outbox0_wptr; funcs->set_outbox0_rptr = dmub_dcn32_set_outbox0_rptr; funcs->get_current_time = dmub_dcn32_get_current_time; funcs->get_diagnostic_data = dmub_dcn32_get_diagnostic_data; break; default: return false; } return true; } enum dmub_status dmub_srv_create(struct dmub_srv *dmub, const struct dmub_srv_create_params *params) { enum dmub_status status = DMUB_STATUS_OK; dmub_memset(dmub, 0, sizeof(*dmub)); dmub->funcs = params->funcs; dmub->user_ctx = params->user_ctx; dmub->asic = params->asic; dmub->fw_version = params->fw_version; dmub->is_virtual = params->is_virtual; /* Setup asic dependent hardware funcs. */ if (!dmub_srv_hw_setup(dmub, params->asic)) { status = DMUB_STATUS_INVALID; goto cleanup; } /* Override (some) hardware funcs based on user params. */ if (params->hw_funcs) { if (params->hw_funcs->emul_get_inbox1_rptr) dmub->hw_funcs.emul_get_inbox1_rptr = params->hw_funcs->emul_get_inbox1_rptr; if (params->hw_funcs->emul_set_inbox1_wptr) dmub->hw_funcs.emul_set_inbox1_wptr = params->hw_funcs->emul_set_inbox1_wptr; if (params->hw_funcs->is_supported) dmub->hw_funcs.is_supported = params->hw_funcs->is_supported; } /* Sanity checks for required hw func pointers. */ if (!dmub->hw_funcs.get_inbox1_rptr || !dmub->hw_funcs.set_inbox1_wptr) { status = DMUB_STATUS_INVALID; goto cleanup; } cleanup: if (status == DMUB_STATUS_OK) dmub->sw_init = true; else dmub_srv_destroy(dmub); return status; } void dmub_srv_destroy(struct dmub_srv *dmub) { dmub_memset(dmub, 0, sizeof(*dmub)); } enum dmub_status dmub_srv_calc_region_info(struct dmub_srv *dmub, const struct dmub_srv_region_params *params, struct dmub_srv_region_info *out) { struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; const struct dmub_fw_meta_info *fw_info; uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; if (!dmub->sw_init) return DMUB_STATUS_INVALID; memset(out, 0, sizeof(*out)); out->num_regions = DMUB_NUM_WINDOWS; inst->base = 0x0; inst->top = inst->base + params->inst_const_size; data->base = dmub_align(inst->top, 256); data->top = data->base + params->bss_data_size; /* * All cache windows below should be aligned to the size * of the DMCUB cache line, 64 bytes. */ stack->base = dmub_align(data->top, 256); stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; bios->base = dmub_align(stack->top, 256); bios->top = bios->base + params->vbios_size; mail->base = dmub_align(bios->top, 256); mail->top = mail->base + DMUB_MAILBOX_SIZE; fw_info = dmub_get_fw_meta_info(params); if (fw_info) { fw_state_size = fw_info->fw_region_size; trace_buffer_size = fw_info->trace_buffer_size; /** * If DM didn't fill in a version, then fill it in based on * the firmware meta now that we have it. * * TODO: Make it easier for driver to extract this out to * pass during creation. */ if (dmub->fw_version == 0) dmub->fw_version = fw_info->fw_version; } trace_buff->base = dmub_align(mail->top, 256); trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); fw_state->base = dmub_align(trace_buff->top, 256); fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); scratch_mem->base = dmub_align(fw_state->top, 256); scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); out->fb_size = dmub_align(scratch_mem->top, 4096); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, const struct dmub_srv_fb_params *params, struct dmub_srv_fb_info *out) { uint8_t *cpu_base; uint64_t gpu_base; uint32_t i; if (!dmub->sw_init) return DMUB_STATUS_INVALID; memset(out, 0, sizeof(*out)); if (params->region_info->num_regions != DMUB_NUM_WINDOWS) return DMUB_STATUS_INVALID; cpu_base = (uint8_t *)params->cpu_addr; gpu_base = params->gpu_addr; for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { const struct dmub_region *reg = &params->region_info->regions[i]; out->fb[i].cpu_addr = cpu_base + reg->base; out->fb[i].gpu_addr = gpu_base + reg->base; out->fb[i].size = reg->top - reg->base; } out->num_fb = DMUB_NUM_WINDOWS; return DMUB_STATUS_OK; } enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, bool *is_supported) { *is_supported = false; if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (dmub->hw_funcs.is_supported) *is_supported = dmub->hw_funcs.is_supported(dmub); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) { *is_hw_init = false; if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (!dmub->hw_init) return DMUB_STATUS_OK; if (dmub->hw_funcs.is_hw_init) *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) { struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST]; struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK]; struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA]; struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; struct dmub_rb_init_params rb_params, outbox0_rb_params; struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; struct dmub_region inbox1, outbox1, outbox0; if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) { ASSERT(0); return DMUB_STATUS_INVALID; } dmub->fb_base = params->fb_base; dmub->fb_offset = params->fb_offset; dmub->psp_version = params->psp_version; if (dmub->hw_funcs.reset) dmub->hw_funcs.reset(dmub); /* reset the cache of the last wptr as well now that hw is reset */ dmub->inbox1_last_wptr = 0; cw0.offset.quad_part = inst_fb->gpu_addr; cw0.region.base = DMUB_CW0_BASE; cw0.region.top = cw0.region.base + inst_fb->size - 1; cw1.offset.quad_part = stack_fb->gpu_addr; cw1.region.base = DMUB_CW1_BASE; cw1.region.top = cw1.region.base + stack_fb->size - 1; if (params->fw_in_system_memory && dmub->hw_funcs.configure_dmub_in_system_memory) dmub->hw_funcs.configure_dmub_in_system_memory(dmub); if (params->load_inst_const && dmub->hw_funcs.backdoor_load) { /** * Read back all the instruction memory so we don't hang the * DMCUB when backdoor loading if the write from x86 hasn't been * flushed yet. This only occurs in backdoor loading. */ dmub_flush_buffer_mem(inst_fb); if (params->fw_in_system_memory && dmub->hw_funcs.backdoor_load_zfb_mode) dmub->hw_funcs.backdoor_load_zfb_mode(dmub, &cw0, &cw1); else dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); } cw2.offset.quad_part = data_fb->gpu_addr; cw2.region.base = DMUB_CW0_BASE + inst_fb->size; cw2.region.top = cw2.region.base + data_fb->size; cw3.offset.quad_part = bios_fb->gpu_addr; cw3.region.base = DMUB_CW3_BASE; cw3.region.top = cw3.region.base + bios_fb->size; cw4.offset.quad_part = mail_fb->gpu_addr; cw4.region.base = DMUB_CW4_BASE; cw4.region.top = cw4.region.base + mail_fb->size; /** * Doubled the mailbox region to accomodate inbox and outbox. * Note: Currently, currently total mailbox size is 16KB. It is split * equally into 8KB between inbox and outbox. If this config is * changed, then uncached base address configuration of outbox1 * has to be updated in funcs->setup_out_mailbox. */ inbox1.base = cw4.region.base; inbox1.top = cw4.region.base + DMUB_RB_SIZE; outbox1.base = inbox1.top; outbox1.top = cw4.region.top; cw5.offset.quad_part = tracebuff_fb->gpu_addr; cw5.region.base = DMUB_CW5_BASE; cw5.region.top = cw5.region.base + tracebuff_fb->size; outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET; outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET; cw6.offset.quad_part = fw_state_fb->gpu_addr; cw6.region.base = DMUB_CW6_BASE; cw6.region.top = cw6.region.base + fw_state_fb->size; dmub->fw_state = fw_state_fb->cpu_addr; dmub->scratch_mem_fb = *scratch_mem_fb; if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); if (dmub->hw_funcs.setup_outbox0) dmub->hw_funcs.setup_outbox0(dmub, &outbox0); if (dmub->hw_funcs.setup_mailbox) dmub->hw_funcs.setup_mailbox(dmub, &inbox1); if (dmub->hw_funcs.setup_out_mailbox) dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1); dmub_memset(&rb_params, 0, sizeof(rb_params)); rb_params.ctx = dmub; rb_params.base_address = mail_fb->cpu_addr; rb_params.capacity = DMUB_RB_SIZE; dmub_rb_init(&dmub->inbox1_rb, &rb_params); // Initialize outbox1 ring buffer rb_params.ctx = dmub; rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE); rb_params.capacity = DMUB_RB_SIZE; dmub_rb_init(&dmub->outbox1_rb, &rb_params); dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params)); outbox0_rb_params.ctx = dmub; outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET); outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64); dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params); /* Report to DMUB what features are supported by current driver */ if (dmub->hw_funcs.enable_dmub_boot_options) dmub->hw_funcs.enable_dmub_boot_options(dmub, params); if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual) dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, params->skip_panel_power_sequence); if (dmub->hw_funcs.reset_release && !dmub->is_virtual) dmub->hw_funcs.reset_release(dmub); dmub->hw_init = true; return DMUB_STATUS_OK; } enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) { if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; } return DMUB_STATUS_OK; } enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) { if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (dmub->hw_funcs.reset) dmub->hw_funcs.reset(dmub); /* mailboxes have been reset in hw, so reset the sw state as well */ dmub->inbox1_last_wptr = 0; dmub->inbox1_rb.wrpt = 0; dmub->inbox1_rb.rptr = 0; dmub->outbox0_rb.wrpt = 0; dmub->outbox0_rb.rptr = 0; dmub->outbox1_rb.wrpt = 0; dmub->outbox1_rb.rptr = 0; dmub->hw_init = false; return DMUB_STATUS_OK; } enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, const union dmub_rb_cmd *cmd) { if (!dmub->hw_init) return DMUB_STATUS_INVALID; if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) return DMUB_STATUS_OK; return DMUB_STATUS_QUEUE_FULL; } enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) { struct dmub_rb flush_rb; if (!dmub->hw_init) return DMUB_STATUS_INVALID; /** * Read back all the queued commands to ensure that they've * been flushed to framebuffer memory. Otherwise DMCUB might * read back stale, fully invalid or partially invalid data. */ flush_rb = dmub->inbox1_rb; flush_rb.rptr = dmub->inbox1_last_wptr; dmub_rb_flush_pending(&flush_rb); dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; return DMUB_STATUS_OK; } enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, uint32_t timeout_us) { uint32_t i; if (!dmub->hw_init) return DMUB_STATUS_INVALID; for (i = 0; i <= timeout_us; i += 100) { union dmub_fw_boot_status status = dmub->hw_funcs.get_fw_status(dmub); if (status.bits.dal_fw && status.bits.mailbox_rdy) return DMUB_STATUS_OK; udelay(100); } return DMUB_STATUS_TIMEOUT; } enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, uint32_t timeout_us) { uint32_t i, rptr; if (!dmub->hw_init) return DMUB_STATUS_INVALID; for (i = 0; i <= timeout_us; ++i) { rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); if (rptr > dmub->inbox1_rb.capacity) return DMUB_STATUS_HW_FAILURE; dmub->inbox1_rb.rptr = rptr; if (dmub_rb_empty(&dmub->inbox1_rb)) return DMUB_STATUS_OK; udelay(1); } return DMUB_STATUS_TIMEOUT; } enum dmub_status dmub_srv_send_gpint_command(struct dmub_srv *dmub, enum dmub_gpint_command command_code, uint16_t param, uint32_t timeout_us) { union dmub_gpint_data_register reg; uint32_t i; if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (!dmub->hw_funcs.set_gpint) return DMUB_STATUS_INVALID; if (!dmub->hw_funcs.is_gpint_acked) return DMUB_STATUS_INVALID; reg.bits.status = 1; reg.bits.command_code = command_code; reg.bits.param = param; dmub->hw_funcs.set_gpint(dmub, reg); for (i = 0; i < timeout_us; ++i) { udelay(1); if (dmub->hw_funcs.is_gpint_acked(dmub, reg)) return DMUB_STATUS_OK; } return DMUB_STATUS_TIMEOUT; } enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub, uint32_t *response) { *response = 0; if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (!dmub->hw_funcs.get_gpint_response) return DMUB_STATUS_INVALID; *response = dmub->hw_funcs.get_gpint_response(dmub); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_get_gpint_dataout(struct dmub_srv *dmub, uint32_t *dataout) { *dataout = 0; if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (!dmub->hw_funcs.get_gpint_dataout) return DMUB_STATUS_INVALID; *dataout = dmub->hw_funcs.get_gpint_dataout(dmub); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, union dmub_fw_boot_status *status) { status->all = 0; if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (dmub->hw_funcs.get_fw_status) *status = dmub->hw_funcs.get_fw_status(dmub); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, union dmub_fw_boot_options *option) { option->all = 0; if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (dmub->hw_funcs.get_fw_boot_option) *option = dmub->hw_funcs.get_fw_boot_option(dmub); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, bool skip) { if (!dmub->sw_init) return DMUB_STATUS_INVALID; if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual) dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) { enum dmub_status status = DMUB_STATUS_OK; // Queue command status = dmub_srv_cmd_queue(dmub, cmd); if (status != DMUB_STATUS_OK) return status; // Execute command status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) return status; // Wait for DMUB to process command status = dmub_srv_wait_for_idle(dmub, 100000); if (status != DMUB_STATUS_OK) return status; // Copy data back from ring buffer into command dmub_rb_get_return_data(&dmub->inbox1_rb, cmd); return status; } static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb, void *entry) { const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t); uint64_t *dst = (uint64_t *)entry; uint8_t i; uint8_t loop_count; if (rb->rptr == rb->wrpt) return false; loop_count = sizeof(struct dmcub_trace_buf_entry) / sizeof(uint64_t); // copying data for (i = 0; i < loop_count; i++) *dst++ = *src++; rb->rptr += sizeof(struct dmcub_trace_buf_entry); rb->rptr %= rb->capacity; return true; } bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry) { dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub); return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry); } bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) { if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data) return false; dmub->hw_funcs.get_diagnostic_data(dmub, diag_data); return true; } bool dmub_srv_should_detect(struct dmub_srv *dmub) { if (!dmub->hw_init || !dmub->hw_funcs.should_detect) return false; return dmub->hw_funcs.should_detect(dmub); } enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub) { if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register) return DMUB_STATUS_INVALID; dmub->hw_funcs.clear_inbox0_ack_register(dmub); return DMUB_STATUS_OK; } enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us) { uint32_t i = 0; uint32_t ack = 0; if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register) return DMUB_STATUS_INVALID; for (i = 0; i <= timeout_us; i++) { ack = dmub->hw_funcs.read_inbox0_ack_register(dmub); if (ack) return DMUB_STATUS_OK; } return DMUB_STATUS_TIMEOUT; } enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data) { if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd) return DMUB_STATUS_INVALID; dmub->hw_funcs.send_inbox0_cmd(dmub, data); return DMUB_STATUS_OK; }
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dmub_reg.h" #include "../dmub_srv.h" struct dmub_reg_value_masks { uint32_t value; uint32_t mask; }; static inline void set_reg_field_value_masks(struct dmub_reg_value_masks *field_value_mask, uint32_t value, uint32_t mask, uint8_t shift) { field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); field_value_mask->mask = field_value_mask->mask | mask; } static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, va_list ap) { uint32_t shift, mask, field_value; int i = 1; /* gather all bits value/mask getting updated in this register */ set_reg_field_value_masks(field_value_mask, field_value1, mask1, shift1); while (i < n) { shift = va_arg(ap, uint32_t); mask = va_arg(ap, uint32_t); field_value = va_arg(ap, uint32_t); set_reg_field_value_masks(field_value_mask, field_value, mask, shift); i++; } } static inline uint32_t get_reg_field_value_ex(uint32_t reg_value, uint32_t mask, uint8_t shift) { return (mask & reg_value) >> shift; } void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) { struct dmub_reg_value_masks field_value_mask = { 0 }; uint32_t reg_val; va_list ap; va_start(ap, field_value1); set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, field_value1, ap); va_end(ap); reg_val = srv->funcs.reg_read(srv->user_ctx, addr); reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; srv->funcs.reg_write(srv->user_ctx, addr, reg_val); } void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) { struct dmub_reg_value_masks field_value_mask = { 0 }; va_list ap; va_start(ap, field_value1); set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, field_value1, ap); va_end(ap); reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; srv->funcs.reg_write(srv->user_ctx, addr, reg_val); } void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift, uint32_t mask, uint32_t *field_value) { uint32_t reg_val = srv->funcs.reg_read(srv->user_ctx, addr); *field_value = get_reg_field_value_ex(reg_val, mask, shift); }
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * * Authors: AMD */ #include "../dmub_srv.h" #include "dmub_reg.h" #include "dmub_dcn303.h" #include "sienna_cichlid_ip_offset.h" #include "dcn/dcn_3_0_3_offset.h" #include "dcn/dcn_3_0_3_sh_mask.h" #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg #define CTX dmub #define REGS dmub->regs /* Registers. */ const struct dmub_srv_common_regs dmub_srv_dcn303_regs = { #define DMUB_SR(reg) REG_OFFSET(reg), { DMUB_COMMON_REGS() DMCUB_INTERNAL_REGS() }, #undef DMUB_SR #define DMUB_SF(reg, field) FD_MASK(reg, field), { DMUB_COMMON_FIELDS() }, #undef DMUB_SF #define DMUB_SF(reg, field) FD_SHIFT(reg, field), { DMUB_COMMON_FIELDS() }, #undef DMUB_SF }; /* Shared functions. */
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dmub/dmub_srv_stat.h" #include "dmub/inc/dmub_cmd.h" /** * DOC: DMUB_SRV STAT Interface * * These interfaces are called without acquiring DAL and DC locks. * Hence, there is limitations on whese interfaces can access. Only * variables exclusively defined for these interfaces can be modified. */ /** * dmub_srv_stat_get_notification - Retrieves a dmub outbox notification, set up dmub notification * structure with message information. Also a pending bit if queue * is having more notifications * @dmub: dmub srv structure * @notify: dmub notification structure to be filled up * * Returns: dmub_status */ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, struct dmub_notification *notify) { /** * This function is called without dal and dc locks, so * we shall not modify any dmub variables, only dmub->outbox1_rb * is exempted as it is exclusively accessed by this function */ union dmub_rb_out_cmd cmd = {0}; if (!dmub->hw_init) { notify->type = DMUB_NOTIFICATION_NO_DATA; notify->pending_notification = false; return DMUB_STATUS_INVALID; } /* Get write pointer which is updated by dmub */ dmub->outbox1_rb.wrpt = dmub->hw_funcs.get_outbox1_wptr(dmub); if (!dmub_rb_out_front(&dmub->outbox1_rb, &cmd)) { notify->type = DMUB_NOTIFICATION_NO_DATA; notify->pending_notification = false; return DMUB_STATUS_OK; } switch (cmd.cmd_common.header.type) { case DMUB_OUT_CMD__DP_AUX_REPLY: notify->type = DMUB_NOTIFICATION_AUX_REPLY; notify->link_index = cmd.dp_aux_reply.control.instance; notify->result = cmd.dp_aux_reply.control.result; dmub_memcpy((void *)&notify->aux_reply, (void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data)); break; case DMUB_OUT_CMD__DP_HPD_NOTIFY: if (cmd.dp_hpd_notify.hpd_data.hpd_type == DP_HPD) { notify->type = DMUB_NOTIFICATION_HPD; notify->hpd_status = cmd.dp_hpd_notify.hpd_data.hpd_status; } else { notify->type = DMUB_NOTIFICATION_HPD_IRQ; } notify->link_index = cmd.dp_hpd_notify.hpd_data.instance; notify->result = AUX_RET_SUCCESS; break; case DMUB_OUT_CMD__SET_CONFIG_REPLY: notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY; notify->link_index = cmd.set_config_reply.set_config_reply_control.instance; notify->sc_status = cmd.set_config_reply.set_config_reply_control.status; break; case DMUB_OUT_CMD__DPIA_NOTIFICATION: notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION; notify->link_index = cmd.dpia_notification.payload.header.instance; if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) { notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw = cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw; notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw = cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw; if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed) notify->result = DPIA_BW_REQ_FAILED; else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded) notify->result = DPIA_BW_REQ_SUCCESS; else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed) notify->result = DPIA_EST_BW_CHANGED; else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed) notify->result = DPIA_BW_ALLOC_CAPS_CHANGED; } break; default: notify->type = DMUB_NOTIFICATION_NO_DATA; break; } /* Pop outbox1 ringbuffer and update read pointer */ dmub_rb_pop_front(&dmub->outbox1_rb); dmub->hw_funcs.set_outbox1_rptr(dmub, dmub->outbox1_rb.rptr); /** * Notify dc whether dmub has a pending outbox message, * this is to avoid one more call to dmub_srv_stat_get_notification */ if (dmub_rb_empty(&dmub->outbox1_rb)) notify->pending_notification = false; else notify->pending_notification = true; return DMUB_STATUS_OK; }
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "../dmub_srv.h" #include "dmub_reg.h" #include "dmub_dcn314.h" #include "dcn/dcn_3_1_4_offset.h" #include "dcn/dcn_3_1_4_sh_mask.h" #define DCN_BASE__INST0_SEG0 0x00000012 #define DCN_BASE__INST0_SEG1 0x000000C0 #define DCN_BASE__INST0_SEG2 0x000034C0 #define DCN_BASE__INST0_SEG3 0x00009000 #define DCN_BASE__INST0_SEG4 0x02403C00 #define DCN_BASE__INST0_SEG5 0 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg #define CTX dmub #define REGS dmub->regs_dcn31 #define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name) /* Registers. */ const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = { #define DMUB_SR(reg) REG_OFFSET_EXP(reg), { DMUB_DCN31_REGS() DMCUB_INTERNAL_REGS() }, #undef DMUB_SR #define DMUB_SF(reg, field) FD_MASK(reg, field), { DMUB_DCN31_FIELDS() }, #undef DMUB_SF #define DMUB_SF(reg, field) FD_SHIFT(reg, field), { DMUB_DCN31_FIELDS() }, #undef DMUB_SF }; bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub) { return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16); }
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "../dmub_srv.h" #include "dmub_reg.h" #include "dmub_dcn20.h" #include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_sh_mask.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg #define CTX dmub #define REGS dmub->regs /* Registers. */ const struct dmub_srv_common_regs dmub_srv_dcn20_regs = { #define DMUB_SR(reg) REG_OFFSET(reg), { DMUB_COMMON_REGS() DMCUB_INTERNAL_REGS() }, #undef DMUB_SR #define DMUB_SF(reg, field) FD_MASK(reg, field), { DMUB_COMMON_FIELDS() }, #undef DMUB_SF #define DMUB_SF(reg, field) FD_SHIFT(reg, field), { DMUB_COMMON_FIELDS() }, #undef DMUB_SF }; /* Shared functions. */ static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub, uint64_t *fb_base, uint64_t *fb_offset) { uint32_t tmp; if (dmub->fb_base || dmub->fb_offset) { *fb_base = dmub->fb_base; *fb_offset = dmub->fb_offset; return; } REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); *fb_base = (uint64_t)tmp << 24; REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); *fb_offset = (uint64_t)tmp << 24; } static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in, uint64_t fb_base, uint64_t fb_offset, union dmub_addr *addr_out) { addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; } bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub) { /* Cached inbox is not supported in this fw version range */ return !(dmub->fw_version >= DMUB_FW_VERSION(1, 0, 0) && dmub->fw_version <= DMUB_FW_VERSION(1, 10, 0)); } void dmub_dcn20_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; const uint32_t timeout = 30; uint32_t in_reset, scratch, i; REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &in_reset); if (in_reset == 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); /** * Timeout covers both the ACK and the wait * for remaining work to finish. * * This is mostly bound by the PHY disable sequence. * Each register check will be greater than 1us, so * don't bother using udelay. */ for (i = 0; i < timeout; ++i) { if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) break; } for (i = 0; i < timeout; ++i) { scratch = dmub->hw_funcs.get_gpint_response(dmub); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; } /* Clear the GPINT command manually so we don't reset again. */ cmd.all = 0; dmub->hw_funcs.set_gpint(dmub, cmd); /* Force reset in case we timed out, DMCUB is likely hung. */ } REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); } void dmub_dcn20_reset_release(struct dmub_srv *dmub) { REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0); } void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, const struct dmub_window *cw0, const struct dmub_window *cw1) { union dmub_addr offset; uint64_t fb_base, fb_offset; dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, DMCUB_MEM_WRITE_SPACE, 0x3); dmub_dcn20_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, DMCUB_REGION3_CW0_ENABLE, 1); dmub_dcn20_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, DMCUB_REGION3_CW1_ENABLE, 1); REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 0x20); } void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw2, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, const struct dmub_window *cw6) { union dmub_addr offset; uint64_t fb_base, fb_offset; dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); if (cw2->region.base != cw2->region.top) { dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, DMCUB_REGION3_CW2_ENABLE, 1); } else { REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0); REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0); REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0); REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0); } dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, DMCUB_REGION3_CW3_ENABLE, 1); /* TODO: Move this to CW4. */ dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset); /* New firmware can support CW4. */ if (dmub_dcn20_use_cached_inbox(dmub)) { REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base); REG_SET_2(DMCUB_REGION3_CW4_TOP_ADDRESS, 0, DMCUB_REGION3_CW4_TOP_ADDRESS, cw4->region.top, DMCUB_REGION3_CW4_ENABLE, 1); } else { REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, 1); } dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, DMCUB_REGION5_TOP_ADDRESS, cw5->region.top - cw5->region.base - 1, DMCUB_REGION5_ENABLE, 1); dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, DMCUB_REGION3_CW6_ENABLE, 1); } void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1) { /* New firmware can support CW4 for the inbox. */ if (dmub_dcn20_use_cached_inbox(dmub)) REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base); else REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000); REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); } uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_WPTR); } uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_RPTR); } void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) { REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); } void dmub_dcn20_setup_out_mailbox(struct dmub_srv *dmub, const struct dmub_region *outbox1) { /* New firmware can support CW4 for the outbox. */ if (dmub_dcn20_use_cached_inbox(dmub)) REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, outbox1->base); else REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, 0x80002000); REG_WRITE(DMCUB_OUTBOX1_SIZE, outbox1->top - outbox1->base); } uint32_t dmub_dcn20_get_outbox1_wptr(struct dmub_srv *dmub) { /** * outbox1 wptr register is accessed without locks (dal & dc) * and to be called only by dmub_srv_stat_get_notification() */ return REG_READ(DMCUB_OUTBOX1_WPTR); } void dmub_dcn20_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) { /** * outbox1 rptr register is accessed without locks (dal & dc) * and to be called only by dmub_srv_stat_get_notification() */ REG_WRITE(DMCUB_OUTBOX1_RPTR, rptr_offset); } void dmub_dcn20_setup_outbox0(struct dmub_srv *dmub, const struct dmub_region *outbox0) { REG_WRITE(DMCUB_OUTBOX0_BASE_ADDRESS, outbox0->base); REG_WRITE(DMCUB_OUTBOX0_SIZE, outbox0->top - outbox0->base); } uint32_t dmub_dcn20_get_outbox0_wptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_OUTBOX0_WPTR); } void dmub_dcn20_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) { REG_WRITE(DMCUB_OUTBOX0_RPTR, rptr_offset); } bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub) { uint32_t is_hw_init; REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init); return is_hw_init != 0; } bool dmub_dcn20_is_supported(struct dmub_srv *dmub) { uint32_t supported = 0; REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported); return supported; } void dmub_dcn20_set_gpint(struct dmub_srv *dmub, union dmub_gpint_data_register reg) { REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all); } bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub, union dmub_gpint_data_register reg) { union dmub_gpint_data_register test; reg.bits.status = 0; test.all = REG_READ(DMCUB_GPINT_DATAIN1); return test.all == reg.all; } uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub) { return REG_READ(DMCUB_SCRATCH7); } union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub) { union dmub_fw_boot_status status; status.all = REG_READ(DMCUB_SCRATCH0); return status; } void dmub_dcn20_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) { union dmub_fw_boot_options boot_options = {0}; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } void dmub_dcn20_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip) { union dmub_fw_boot_options boot_options; boot_options.all = REG_READ(DMCUB_SCRATCH14); boot_options.bits.skip_phy_init_panel_sequence = skip; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub) { return REG_READ(DMCUB_TIMER_CURRENT); } void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) { uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; if (!dmub || !diag_data) return; memset(diag_data, 0, sizeof(*diag_data)); diag_data->dmcub_version = dmub->fw_version; diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); diag_data->is_dmcub_enabled = is_dmub_enabled; REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset); diag_data->is_dmcub_soft_reset = is_soft_reset; REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); diag_data->is_dmcub_secure_reset = is_sec_reset; REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); diag_data->is_traceport_en = is_traceport_enabled; REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); diag_data->is_cw0_enabled = is_cw0_enabled; REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; }
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "../dmub_srv.h" #include "dmub_reg.h" #include "dmub_dcn20.h" #include "dmub_dcn30.h" #include "sienna_cichlid_ip_offset.h" #include "dcn/dcn_3_0_0_offset.h" #include "dcn/dcn_3_0_0_sh_mask.h" #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg #define CTX dmub #define REGS dmub->regs /* Registers. */ const struct dmub_srv_common_regs dmub_srv_dcn30_regs = { #define DMUB_SR(reg) REG_OFFSET(reg), { DMUB_COMMON_REGS() DMCUB_INTERNAL_REGS() }, #undef DMUB_SR #define DMUB_SF(reg, field) FD_MASK(reg, field), { DMUB_COMMON_FIELDS() }, #undef DMUB_SF #define DMUB_SF(reg, field) FD_SHIFT(reg, field), { DMUB_COMMON_FIELDS() }, #undef DMUB_SF }; /* Shared functions. */ static void dmub_dcn30_get_fb_base_offset(struct dmub_srv *dmub, uint64_t *fb_base, uint64_t *fb_offset) { uint32_t tmp; if (dmub->fb_base || dmub->fb_offset) { *fb_base = dmub->fb_base; *fb_offset = dmub->fb_offset; return; } REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); *fb_base = (uint64_t)tmp << 24; REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); *fb_offset = (uint64_t)tmp << 24; } static inline void dmub_dcn30_translate_addr(const union dmub_addr *addr_in, uint64_t fb_base, uint64_t fb_offset, union dmub_addr *addr_out) { addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; } void dmub_dcn30_backdoor_load(struct dmub_srv *dmub, const struct dmub_window *cw0, const struct dmub_window *cw1) { union dmub_addr offset; uint64_t fb_base, fb_offset; dmub_dcn30_get_fb_base_offset(dmub, &fb_base, &fb_offset); REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); /* MEM_CTNL read/write space doesn't exist. */ dmub_dcn30_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, DMCUB_REGION3_CW0_ENABLE, 1); dmub_dcn30_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, DMCUB_REGION3_CW1_ENABLE, 1); REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 0x20); } void dmub_dcn30_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw2, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, const struct dmub_window *cw6) { union dmub_addr offset; /* sienna_cichlid has hardwired virtual addressing for CW2-CW7 */ offset = cw2->offset; if (cw2->region.base != cw2->region.top) { REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, DMCUB_REGION3_CW2_ENABLE, 1); } else { REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0); REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0); REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0); REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0); } offset = cw3->offset; REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, DMCUB_REGION3_CW3_ENABLE, 1); offset = cw4->offset; /* New firmware can support CW4. */ if (dmub_dcn20_use_cached_inbox(dmub)) { REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base); REG_SET_2(DMCUB_REGION3_CW4_TOP_ADDRESS, 0, DMCUB_REGION3_CW4_TOP_ADDRESS, cw4->region.top, DMCUB_REGION3_CW4_ENABLE, 1); } else { REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, 1); } offset = cw5->offset; REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, DMCUB_REGION5_TOP_ADDRESS, cw5->region.top - cw5->region.base - 1, DMCUB_REGION5_ENABLE, 1); offset = cw6->offset; REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, DMCUB_REGION3_CW6_ENABLE, 1); }
linux-master
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "amdgpu_dm_mst_types.h" #include "amdgpu_dm_hdcp.h" #include "dc.h" #include "dm_helpers.h" #include "ddc_service_types.h" #include "dpcd_defs.h" #include "dmub_cmd.h" #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" #endif #include "dc/dcn20/dcn20_resource.h" #define PEAK_FACTOR_X1000 1006 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { ssize_t result = 0; struct aux_payload payload; enum aux_return_code_type operation_result; struct amdgpu_device *adev; struct ddc_service *ddc; if (WARN_ON(msg->size > 16)) return -E2BIG; payload.address = msg->address; payload.data = msg->buffer; payload.length = msg->size; payload.reply = &msg->reply; payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; payload.write = (msg->request & DP_AUX_I2C_READ) == 0; payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; payload.write_status_update = (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); /* * w/a on certain intel platform where hpd is unexpected to pull low during * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON * aux transaction is succuess in such case, therefore bypass the error */ ddc = TO_DM_AUX(aux)->ddc_service; adev = ddc->ctx->driver_context; if (adev->dm.aux_hpd_discon_quirk) { if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && operation_result == AUX_RET_ERROR_HPD_DISCON) { result = 0; operation_result = AUX_RET_SUCCESS; } } if (payload.write && result >= 0) result = msg->size; if (result < 0) switch (operation_result) { case AUX_RET_SUCCESS: break; case AUX_RET_ERROR_HPD_DISCON: case AUX_RET_ERROR_UNKNOWN: case AUX_RET_ERROR_INVALID_OPERATION: case AUX_RET_ERROR_PROTOCOL_ERROR: result = -EIO; break; case AUX_RET_ERROR_INVALID_REPLY: case AUX_RET_ERROR_ENGINE_ACQUIRE: result = -EBUSY; break; case AUX_RET_ERROR_TIMEOUT: result = -ETIMEDOUT; break; } return result; } static void dm_dp_mst_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_sink) { dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); dc_sink_release(aconnector->dc_sink); } kfree(aconnector->edid); drm_connector_cleanup(connector); drm_dp_mst_put_port_malloc(aconnector->mst_output_port); kfree(aconnector); } static int amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); int r; r = drm_dp_mst_connector_late_register(connector, amdgpu_dm_connector->mst_output_port); if (r < 0) return r; #if defined(CONFIG_DEBUG_FS) connector_debugfs_init(amdgpu_dm_connector); #endif return 0; } static void amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct drm_dp_mst_port *port = aconnector->mst_output_port; struct amdgpu_dm_connector *root = aconnector->mst_root; struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_sink = aconnector->dc_sink; drm_dp_mst_connector_early_unregister(connector, port); /* * Release dc_sink for connector which its attached port is * no longer in the mst topology */ drm_modeset_lock(&root->mst_mgr.base.lock, NULL); if (dc_sink) { if (dc_link->sink_count) dc_link_remove_remote_sink(dc_link, dc_sink); DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", dc_sink, dc_link->sink_count); dc_sink_release(dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; } aconnector->mst_status = MST_STATUS_DEFAULT; drm_modeset_unlock(&root->mst_mgr.base.lock); } static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = dm_dp_mst_connector_destroy, .reset = amdgpu_dm_connector_funcs_reset, .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_set_property = amdgpu_dm_connector_atomic_set_property, .atomic_get_property = amdgpu_dm_connector_atomic_get_property, .late_register = amdgpu_dm_mst_connector_late_register, .early_unregister = amdgpu_dm_mst_connector_early_unregister, }; bool needs_dsc_aux_workaround(struct dc_link *link) { if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) return true; return false; } static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port) { u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) { if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) { DRM_INFO("Synaptics Cascaded MST hub\n"); return true; } } return false; } static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) { struct dc_sink *dc_sink = aconnector->dc_sink; struct drm_dp_mst_port *port = aconnector->mst_output_port; u8 dsc_caps[16] = { 0 }; u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2 u8 *dsc_branch_dec_caps = NULL; aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); /* * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs * because it only check the dsc/fec caps of the "port variable" and not the dock * * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display * * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux * */ if (!aconnector->dsc_aux && !port->parent->port_parent && needs_dsc_aux_workaround(aconnector->dc_link)) aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; /* synaptics cascaded MST hub case */ if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port)) aconnector->dsc_aux = port->mgr->aux; if (!aconnector->dsc_aux) return false; if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0) return false; if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3) dsc_branch_dec_caps = dsc_branch_dec_caps_raw; if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, dsc_caps, dsc_branch_dec_caps, &dc_sink->dsc_caps.dsc_dec_caps)) return false; return true; } static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) { union dp_downstream_port_present ds_port_present; if (!aconnector->dsc_aux) return false; if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) { DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n"); return false; } aconnector->mst_downstream_port_present = ds_port_present; DRM_INFO("Downstream port present %d, type %d\n", ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE); return true; } static int dm_dp_mst_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); int ret = 0; if (!aconnector) return drm_add_edid_modes(connector, NULL); if (!aconnector->edid) { struct edid *edid; edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); if (!edid) { amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, false); drm_connector_update_edid_property( &aconnector->base, NULL); DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name); if (!aconnector->dc_sink) { struct dc_sink *dc_sink; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; dc_sink = dc_link_add_remote_sink( aconnector->dc_link, NULL, 0, &init_params); if (!dc_sink) { DRM_ERROR("Unable to add a remote sink\n"); return 0; } DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", dc_sink, aconnector->dc_link->sink_count); dc_sink->priv = aconnector; aconnector->dc_sink = dc_sink; } return ret; } aconnector->edid = edid; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, true); } if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; } if (!aconnector->dc_sink) { struct dc_sink *dc_sink; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; dc_sink = dc_link_add_remote_sink( aconnector->dc_link, (uint8_t *)aconnector->edid, (aconnector->edid->extensions + 1) * EDID_LENGTH, &init_params); if (!dc_sink) { DRM_ERROR("Unable to add a remote sink\n"); return 0; } DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", dc_sink, aconnector->dc_link->sink_count); dc_sink->priv = aconnector; /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; /* when display is unplugged from mst hub, connctor will be * destroyed within dm_dp_mst_connector_destroy. connector * hdcp perperties, like type, undesired, desired, enabled, * will be lost. So, save hdcp properties into hdcp_work within * amdgpu_dm_atomic_commit_tail. if the same display is * plugged back with same display index, its hdcp properties * will be retrieved from hdcp_work within dm_dp_mst_get_modes */ if (aconnector->dc_sink && connector->state) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); if (adev->dm.hdcp_workqueue) { struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index]; connector->state->hdcp_content_type = hdcp_w->hdcp_content_type[connector->index]; connector->state->content_protection = hdcp_w->content_protection[connector->index]; } } if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( connector, aconnector->edid); if (!validate_dsc_caps_on_connector(aconnector)) memset(&aconnector->dc_sink->dsc_caps, 0, sizeof(aconnector->dc_sink->dsc_caps)); if (!retrieve_downstream_port_device(aconnector)) memset(&aconnector->mst_downstream_port_present, 0, sizeof(aconnector->mst_downstream_port_present)); } } drm_connector_update_edid_property( &aconnector->base, aconnector->edid); ret = drm_add_edid_modes(connector, aconnector->edid); return ret; } static struct drm_encoder * dm_mst_atomic_best_encoder(struct drm_connector *connector, struct drm_atomic_state *state) { struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector); struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); return &adev->dm.mst_encoders[acrtc->crtc_id].base; } static int dm_dp_mst_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_dm_connector *master = aconnector->mst_root; struct drm_dp_mst_port *port = aconnector->mst_output_port; int connection_status; if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, aconnector->mst_output_port); if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { uint8_t dpcd_rev; int ret; ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev); if (ret == 1) { port->dpcd_rev = dpcd_rev; /* Could be DP1.2 DP Rx case*/ if (!dpcd_rev) { ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev); if (ret == 1) port->dpcd_rev = dpcd_rev; } if (!dpcd_rev) DRM_DEBUG_KMS("Can't decide DPCD revision number!"); } /* * Could be legacy sink, logical port etc on DP1.2. * Will get Nack under these cases when issue remote * DPCD read. */ if (ret != 1) DRM_DEBUG_KMS("Can't access DPCD"); } else if (port->pdt == DP_PEER_DEVICE_NONE) { port->dpcd_rev = 0; } /* * Release dc_sink for connector which unplug event is notified by CSN msg */ if (connection_status == connector_status_disconnected && aconnector->dc_sink) { if (aconnector->dc_link->sink_count) dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", aconnector->dc_link, aconnector->dc_link->sink_count); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, false); } return connection_status; } static int dm_dp_mst_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr; struct drm_dp_mst_port *mst_port = aconnector->mst_output_port; return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port); } static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { .get_modes = dm_dp_mst_get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, .atomic_best_encoder = dm_mst_atomic_best_encoder, .detect_ctx = dm_dp_mst_detect, .atomic_check = dm_dp_mst_atomic_check, }; static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { .destroy = amdgpu_dm_encoder_destroy, }; void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); int i; for (i = 0; i < adev->dm.display_indexes_num; i++) { struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; struct drm_encoder *encoder = &amdgpu_encoder->base; encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); drm_encoder_init( dev, &amdgpu_encoder->base, &amdgpu_dm_encoder_funcs, DRM_MODE_ENCODER_DPMST, NULL); drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); } } static struct drm_connector * dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop) { struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); struct drm_device *dev = master->base.dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; int i; aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) return NULL; connector = &aconnector->base; aconnector->mst_output_port = port; aconnector->mst_root = master; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_PROBE, true); if (drm_connector_init( dev, connector, &dm_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort)) { kfree(aconnector); return NULL; } drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs); amdgpu_dm_connector_init_helper( &adev->dm, aconnector, DRM_MODE_CONNECTOR_DisplayPort, master->dc_link, master->connector_id); for (i = 0; i < adev->dm.display_indexes_num; i++) { drm_connector_attach_encoder(&aconnector->base, &adev->dm.mst_encoders[i].base); } connector->max_bpc_property = master->base.max_bpc_property; if (connector->max_bpc_property) drm_connector_attach_max_bpc_property(connector, 8, 16); connector->vrr_capable_property = master->base.vrr_capable_property; if (connector->vrr_capable_property) drm_connector_attach_vrr_capable_property(connector); drm_object_attach_property( &connector->base, dev->mode_config.path_property, 0); drm_object_attach_property( &connector->base, dev->mode_config.tile_property, 0); drm_connector_set_path_property(connector, pathprop); /* * Initialize connector state before adding the connectror to drm and * framebuffer lists */ amdgpu_dm_connector_funcs_reset(connector); drm_dp_mst_get_port_malloc(port); return connector; } void dm_handle_mst_sideband_msg_ready_event( struct drm_dp_mst_topology_mgr *mgr, enum mst_msg_ready_type msg_rdy_type) { uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; uint8_t dret; bool new_irq_handled = false; int dpcd_addr; uint8_t dpcd_bytes_to_read; const uint8_t max_process_count = 30; uint8_t process_count = 0; u8 retry; struct amdgpu_dm_connector *aconnector = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; /* DPCD 0x200 - 0x201 for downstream IRQ */ dpcd_addr = DP_SINK_COUNT; } else { dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; /* DPCD 0x2002 - 0x2005 for downstream IRQ */ dpcd_addr = DP_SINK_COUNT_ESI; } mutex_lock(&aconnector->handle_mst_msg_ready); while (process_count < max_process_count) { u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; process_count++; dret = drm_dp_dpcd_read( &aconnector->dm_dp_aux.aux, dpcd_addr, esi, dpcd_bytes_to_read); if (dret != dpcd_bytes_to_read) { DRM_DEBUG_KMS("DPCD read and acked number is not as expected!"); break; } DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); switch (msg_rdy_type) { case DOWN_REP_MSG_RDY_EVENT: /* Only handle DOWN_REP_MSG_RDY case*/ esi[1] &= DP_DOWN_REP_MSG_RDY; break; case UP_REQ_MSG_RDY_EVENT: /* Only handle UP_REQ_MSG_RDY case*/ esi[1] &= DP_UP_REQ_MSG_RDY; break; default: /* Handle both cases*/ esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); break; } if (!esi[1]) break; /* handle MST irq */ if (aconnector->mst_mgr.mst_state) drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, esi, ack, &new_irq_handled); if (new_irq_handled) { /* ACK at DPCD to notify down stream */ for (retry = 0; retry < 3; retry++) { ssize_t wret; wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, dpcd_addr + 1, ack[1]); if (wret == 1) break; } if (retry == 3) { DRM_ERROR("Failed to ack MST event.\n"); break; } drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); new_irq_handled = false; } else { break; } } mutex_unlock(&aconnector->handle_mst_msg_ready); if (process_count == max_process_count) DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); } static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) { dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT); } static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { .add_connector = dm_dp_add_mst_connector, .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready, }; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, int link_index) { struct dc_link_settings max_link_enc_cap = {0}; aconnector->dm_dp_aux.aux.name = kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", link_index); aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; aconnector->dm_dp_aux.aux.drm_dev = dm->ddev; aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; drm_dp_aux_init(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, &aconnector->base); if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) return; dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap); aconnector->mst_mgr.cbs = &dm_mst_cbs; drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev), &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id); drm_connector_attach_dp_subconnector_property(&aconnector->base); } int dm_mst_get_pbn_divider(struct dc_link *link) { if (!link) return 0; return dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) / (8 * 1000 * 54); } struct dsc_mst_fairness_params { struct dc_crtc_timing *timing; struct dc_sink *sink; struct dc_dsc_bw_range bw_range; bool compression_possible; struct drm_dp_mst_port *port; enum dsc_clock_force_state clock_force_enable; uint32_t num_slices_h; uint32_t num_slices_v; uint32_t bpp_overwrite; struct amdgpu_dm_connector *aconnector; }; static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) { u8 link_coding_cap; uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); if (link_coding_cap == DP_128b_132b_ENCODING) fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; return fec_overhead_multiplier_x1000; } static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) { u64 peak_kbps = kbps; peak_kbps *= 1006; peak_kbps *= fec_overhead_multiplier_x1000; peak_kbps = div_u64(peak_kbps, 1000 * 1000); return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); } static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, int count, int k) { struct drm_connector *drm_connector; int i; struct dc_dsc_config_options dsc_options = {0}; for (i = 0; i < count; i++) { drm_connector = &params[i].aconnector->base; dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options); dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); if (vars[i + k].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], &params[i].sink->dsc_caps.dsc_dec_caps, &dsc_options, 0, params[i].timing, dc_link_get_highest_encoding_format(params[i].aconnector->dc_link), &params[i].timing->dsc_cfg)) { params[i].timing->flags.DSC = 1; if (params[i].bpp_overwrite) params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; else params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; if (params[i].num_slices_h) params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; if (params[i].num_slices_v) params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v; } else { params[i].timing->flags.DSC = 0; } params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn; } for (i = 0; i < count; i++) { if (params[i].sink) { if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && params[i].sink->sink_signal != SIGNAL_TYPE_NONE) DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i, params[i].sink->edid_caps.display_name); } DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n", params[i].timing->flags.DSC, params[i].timing->dsc_cfg.bits_per_pixel, vars[i + k].pbn); } } static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) { struct dc_dsc_config dsc_config; u64 kbps; struct drm_connector *drm_connector = &param.aconnector->base; struct dc_dsc_config_options dsc_options = {0}; dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); dc_dsc_compute_config( param.sink->ctx->dc->res_pool->dscs[0], &param.sink->dsc_caps.dsc_dec_caps, &dsc_options, (int) kbps, param.timing, dc_link_get_highest_encoding_format(param.aconnector->dc_link), &dsc_config); return dsc_config.bits_per_pixel; } static int increase_dsc_bpp(struct drm_atomic_state *state, struct drm_dp_mst_topology_state *mst_state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, int count, int k) { int i; bool bpp_increased[MAX_PIPES]; int initial_slack[MAX_PIPES]; int min_initial_slack; int next_index; int remaining_to_increase = 0; int link_timeslots_used; int fair_pbn_alloc; int ret = 0; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { initial_slack[i] = 0; bpp_increased[i] = true; } } while (remaining_to_increase) { next_index = -1; min_initial_slack = -1; for (i = 0; i < count; i++) { if (!bpp_increased[i]) { if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { min_initial_slack = initial_slack[i]; next_index = i; } } } if (next_index == -1) break; link_timeslots_used = 0; for (i = 0; i < count; i++) link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); if (ret < 0) return ret; ret = drm_dp_mst_atomic_check(state); if (ret == 0) { vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); } else { vars[next_index].pbn -= fair_pbn_alloc; ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); if (ret < 0) return ret; } } else { vars[next_index].pbn += initial_slack[next_index]; ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); if (ret < 0) return ret; ret = drm_dp_mst_atomic_check(state); if (ret == 0) { vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; } else { vars[next_index].pbn -= initial_slack[next_index]; ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); if (ret < 0) return ret; } } bpp_increased[next_index] = true; remaining_to_increase--; } return 0; } static int try_disable_dsc(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, int count, int k) { int i; bool tried[MAX_PIPES]; int kbps_increase[MAX_PIPES]; int max_kbps_increase; int next_index; int remaining_to_try = 0; int ret; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; tried[i] = false; remaining_to_try += 1; } else { kbps_increase[i] = 0; tried[i] = true; } } while (remaining_to_try) { next_index = -1; max_kbps_increase = -1; for (i = 0; i < count; i++) { if (!tried[i]) { if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { max_kbps_increase = kbps_increase[i]; next_index = i; } } } if (next_index == -1) break; vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); if (ret < 0) return ret; ret = drm_dp_mst_atomic_check(state); if (ret == 0) { vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); if (ret < 0) return ret; } tried[next_index] = true; remaining_to_try--; } return 0; } static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link, struct dsc_mst_fairness_vars *vars, struct drm_dp_mst_topology_mgr *mgr, int *link_vars_start_index) { struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); int count = 0; int i, k, ret; bool debugfs_overwrite = false; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); memset(params, 0, sizeof(params)); if (IS_ERR(mst_state)) return PTR_ERR(mst_state); /* Set up params */ for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; stream = dc_state->streams[i]; if (stream->link != dc_link) continue; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector) continue; if (!aconnector->mst_output_port) continue; stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; params[count].sink = stream->sink; params[count].aconnector = aconnector; params[count].port = aconnector->mst_output_port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) debugfs_overwrite = true; params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, &stream->timing, dc_link_get_highest_encoding_format(dc_link), &params[count].bw_range)) params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(dc_link)); count++; } if (count == 0) { ASSERT(0); return 0; } /* k is start index of vars for current phy link used by mst hub */ k = *link_vars_start_index; /* set vars start index for next mst hub phy link */ *link_vars_start_index += count; /* Try no compression */ for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, vars[i + k].pbn); if (ret < 0) return ret; } ret = drm_dp_mst_atomic_check(state); if (ret == 0 && !debugfs_overwrite) { set_dsc_configs_from_fairness_vars(params, vars, count, k); return 0; } else if (ret != -ENOSPC) { return ret; } /* Try max compression */ for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, vars[i + k].pbn); if (ret < 0) return ret; } else { vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, vars[i + k].pbn); if (ret < 0) return ret; } } ret = drm_dp_mst_atomic_check(state); if (ret != 0) return ret; /* Optimize degree of compression */ ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); if (ret < 0) return ret; ret = try_disable_dsc(state, dc_link, params, vars, count, k); if (ret < 0) return ret; set_dsc_configs_from_fairness_vars(params, vars, count, k); return 0; } static bool is_dsc_need_re_compute( struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link) { int i, j; bool is_dsc_need_re_compute = false; struct amdgpu_dm_connector *stream_on_link[MAX_PIPES]; int new_stream_on_link_num = 0; struct amdgpu_dm_connector *aconnector; struct dc_stream_state *stream; const struct dc *dc = dc_link->dc; /* only check phy used by dsc mst branch */ if (dc_link->type != dc_connection_mst_branch) return false; if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) return false; for (i = 0; i < MAX_PIPES; i++) stream_on_link[i] = NULL; /* check if there is mode change in new request */ for (i = 0; i < dc_state->stream_count; i++) { struct drm_crtc_state *new_crtc_state; struct drm_connector_state *new_conn_state; stream = dc_state->streams[i]; if (!stream) continue; /* check if stream using the same link for mst */ if (stream->link != dc_link) continue; aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; if (!aconnector) continue; stream_on_link[new_stream_on_link_num] = aconnector; new_stream_on_link_num++; new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); if (!new_conn_state) continue; if (IS_ERR(new_conn_state)) continue; if (!new_conn_state->crtc) continue; new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); if (!new_crtc_state) continue; if (IS_ERR(new_crtc_state)) continue; if (new_crtc_state->enable && new_crtc_state->active) { if (new_crtc_state->mode_changed || new_crtc_state->active_changed || new_crtc_state->connectors_changed) return true; } } /* check current_state if there stream on link but it is not in * new request state */ for (i = 0; i < dc->current_state->stream_count; i++) { stream = dc->current_state->streams[i]; /* only check stream on the mst hub */ if (stream->link != dc_link) continue; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector) continue; for (j = 0; j < new_stream_on_link_num; j++) { if (stream_on_link[j]) { if (aconnector == stream_on_link[j]) break; } } if (j == new_stream_on_link_num) { /* not in new state */ is_dsc_need_re_compute = true; break; } } return is_dsc_need_re_compute; } int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_mgr *mst_mgr; struct resource_pool *res_pool; int link_vars_start_index = 0; int ret = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; for (i = 0; i < dc_state->stream_count; i++) { stream = dc_state->streams[i]; res_pool = stream->ctx->dc->res_pool; if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) continue; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue; if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) continue; if (computed_streams[i]) continue; if (res_pool->funcs->remove_stream_from_ctx && res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) return -EINVAL; if (!is_dsc_need_re_compute(state, dc_state, stream->link)) continue; mst_mgr = aconnector->mst_output_port->mgr; ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, &link_vars_start_index); if (ret != 0) return ret; for (j = 0; j < dc_state->stream_count; j++) { if (dc_state->streams[j]->link == stream->link) computed_streams[j] = true; } } for (i = 0; i < dc_state->stream_count; i++) { stream = dc_state->streams[i]; if (stream->timing.flags.DSC == 1) if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) return -EINVAL; } return ret; } static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_mgr *mst_mgr; int link_vars_start_index = 0; int ret = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; for (i = 0; i < dc_state->stream_count; i++) { stream = dc_state->streams[i]; if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) continue; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue; if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) continue; if (computed_streams[i]) continue; if (!is_dsc_need_re_compute(state, dc_state, stream->link)) continue; mst_mgr = aconnector->mst_output_port->mgr; ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, &link_vars_start_index); if (ret != 0) return ret; for (j = 0; j < dc_state->stream_count; j++) { if (dc_state->streams[j]->link == stream->link) computed_streams[j] = true; } } return ret; } static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, struct dc_stream_state *stream) { int i; struct drm_crtc *crtc; struct drm_crtc_state *new_state, *old_state; for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) { struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state); if (dm_state->stream == stream) return i; } return -1; } static bool is_link_to_dschub(struct dc_link *dc_link) { union dpcd_dsc_basic_capabilities *dsc_caps = &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps; /* only check phy used by dsc mst branch */ if (dc_link->type != dc_connection_mst_branch) return false; if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT || dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) return false; return true; } static bool is_dsc_precompute_needed(struct drm_atomic_state *state) { int i; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; bool ret = false; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state); if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) { ret = false; break; } if (dm_crtc_state->stream && dm_crtc_state->stream->link) if (is_link_to_dschub(dm_crtc_state->stream->link)) ret = true; } return ret; } int pre_validate_dsc(struct drm_atomic_state *state, struct dm_atomic_state **dm_state_ptr, struct dsc_mst_fairness_vars *vars) { int i; struct dm_atomic_state *dm_state; struct dc_state *local_dc_state = NULL; int ret = 0; if (!is_dsc_precompute_needed(state)) { DRM_INFO_ONCE("DSC precompute is not needed.\n"); return 0; } ret = dm_atomic_get_state(state, dm_state_ptr); if (ret != 0) { DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); return ret; } dm_state = *dm_state_ptr; /* * create local vailable for dc_state. copy content of streams of dm_state->context * to local variable. make sure stream pointer of local variable not the same as stream * from dm_state->context. */ local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); if (!local_dc_state) return -ENOMEM; for (i = 0; i < local_dc_state->stream_count; i++) { struct dc_stream_state *stream = dm_state->context->streams[i]; int ind = find_crtc_index_in_state_by_stream(state, stream); if (ind >= 0) { struct amdgpu_dm_connector *aconnector; struct drm_connector_state *drm_new_conn_state; struct dm_connector_state *dm_new_conn_state; struct dm_crtc_state *dm_old_crtc_state; aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, state->crtcs[ind].ptr); drm_new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); local_dc_state->streams[i] = create_validate_stream_for_sink(aconnector, &state->crtcs[ind].new_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); if (local_dc_state->streams[i] == NULL) { ret = -EINVAL; break; } } } if (ret != 0) goto clean_exit; ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); if (ret != 0) { DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); ret = -EINVAL; goto clean_exit; } /* * compare local_streams -> timing with dm_state->context, * if the same set crtc_state->mode-change = 0; */ for (i = 0; i < local_dc_state->stream_count; i++) { struct dc_stream_state *stream = dm_state->context->streams[i]; if (local_dc_state->streams[i] && dc_is_timing_changed(stream, local_dc_state->streams[i])) { DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); } else { int ind = find_crtc_index_in_state_by_stream(state, stream); if (ind >= 0) state->crtcs[ind].new_state->mode_changed = 0; } } clean_exit: for (i = 0; i < local_dc_state->stream_count; i++) { struct dc_stream_state *stream = dm_state->context->streams[i]; if (local_dc_state->streams[i] != stream) dc_stream_release(local_dc_state->streams[i]); } kfree(local_dc_state); return ret; } static unsigned int kbps_from_pbn(unsigned int pbn) { unsigned int kbps = pbn; kbps *= (1000000 / PEAK_FACTOR_X1000); kbps *= 8; kbps *= 54; kbps /= 64; return kbps; } static bool is_dsc_common_config_possible(struct dc_stream_state *stream, struct dc_dsc_bw_range *bw_range) { struct dc_dsc_policy dsc_policy = {0}; dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; } enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream) { int bpp, pbn, branch_max_throughput_mps = 0; struct dc_link_settings cur_link_settings; unsigned int end_to_end_bw_in_kbps = 0; unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; unsigned int max_compressed_bw_in_kbps = 0; struct dc_dsc_bw_range bw_range = {0}; struct drm_dp_mst_topology_mgr *mst_mgr; /* * check if the mode could be supported if DSC pass-through is supported * AND check if there enough bandwidth available to support the mode * with DSC enabled. */ if (is_dsc_common_config_possible(stream, &bw_range) && aconnector->mst_output_port->passthrough_aux) { mst_mgr = aconnector->mst_output_port->mgr; mutex_lock(&mst_mgr->lock); cur_link_settings = stream->link->verified_link_cap; upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings ); down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); /* pick the bottleneck */ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps); mutex_unlock(&mst_mgr->lock); /* * use the maximum dsc compression bandwidth as the required * bandwidth for the mode */ max_compressed_bw_in_kbps = bw_range.min_kbps; if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } else { /* check if mode could be supported within full_pbn */ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); if (pbn > aconnector->mst_output_port->full_pbn) return DC_FAIL_BANDWIDTH_VALIDATE; } /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ switch (stream->timing.pixel_encoding) { case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: branch_max_throughput_mps = aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; break; case PIXEL_ENCODING_YCBCR422: case PIXEL_ENCODING_YCBCR420: branch_max_throughput_mps = aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; break; default: break; } if (branch_max_throughput_mps != 0 && ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) return DC_FAIL_BANDWIDTH_VALIDATE; return DC_OK; }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "amdgpu_dm_hdcp.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "dm_helpers.h" #include <drm/display/drm_hdcp_helper.h> #include "hdcp_psp.h" /* * If the SRM version being loaded is less than or equal to the * currently loaded SRM, psp will return 0xFFFF as the version */ #define PSP_SRM_VERSION_MAX 0xFFFF static bool lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) { struct dc_link *link = handle; struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } static bool lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size) { struct dc_link *link = handle; struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } static bool lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size) { struct dc_link *link = handle; return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size); } static bool lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) { struct dc_link *link = handle; return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); } static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) { struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized."); return NULL; } hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) return NULL; *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version; *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size; return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf; } static int psp_set_srm(struct psp_context *psp, u8 *srm, uint32_t srm_size, uint32_t *srm_version) { struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized."); return -EINVAL; } hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size); hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX) return -EINVAL; *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version; return 0; } static void process_output(struct hdcp_workqueue *hdcp_work) { struct mod_hdcp_output output = hdcp_work->output; if (output.callback_stop) cancel_delayed_work(&hdcp_work->callback_dwork); if (output.callback_needed) schedule_delayed_work(&hdcp_work->callback_dwork, msecs_to_jiffies(output.callback_delay)); if (output.watchdog_timer_stop) cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); if (output.watchdog_timer_needed) schedule_delayed_work(&hdcp_work->watchdog_timer_dwork, msecs_to_jiffies(output.watchdog_timer_delay)); schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0)); } static void link_lock(struct hdcp_workqueue *work, bool lock) { int i = 0; for (i = 0; i < work->max_link; i++) { if (lock) mutex_lock(&work[i].mutex); else mutex_unlock(&work[i].mutex); } } void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, u8 content_type, bool enable_encryption) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct mod_hdcp_link_adjustment link_adjust; struct mod_hdcp_display_adjustment display_adjust; unsigned int conn_index = aconnector->base.index; mutex_lock(&hdcp_w->mutex); hdcp_w->aconnector[conn_index] = aconnector; memset(&link_adjust, 0, sizeof(link_adjust)); memset(&display_adjust, 0, sizeof(display_adjust)); if (enable_encryption) { /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp * (s3 resume case) */ if (hdcp_work->srm_size > 0) psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size, &hdcp_work->srm_version); display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; link_adjust.auth_delay = 2; if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { link_adjust.hdcp1.disable = 1; link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; } schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); } else { display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; cancel_delayed_work(&hdcp_w->property_validate_dwork); } mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output); process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct drm_connector_state *conn_state = aconnector->base.state; unsigned int conn_index = aconnector->base.index; mutex_lock(&hdcp_w->mutex); hdcp_w->aconnector[conn_index] = aconnector; /* the removal of display will invoke auth reset -> hdcp destroy and * we'd expect the Content Protection (CP) property changed back to * DESIRED if at the time ENABLED. CP property change should occur * before the element removed from linked list. */ if (conn_state && conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n", aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms); } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; unsigned int conn_index; mutex_lock(&hdcp_w->mutex); mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); cancel_delayed_work(&hdcp_w->property_validate_dwork); for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; } process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; schedule_work(&hdcp_w->cpirq_work); } static void event_callback(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, callback_dwork); mutex_lock(&hdcp_work->mutex); cancel_delayed_work(&hdcp_work->callback_dwork); mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, &hdcp_work->output); process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); } static void event_property_update(struct work_struct *work) { struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); struct amdgpu_dm_connector *aconnector = NULL; struct drm_device *dev; long ret; unsigned int conn_index; struct drm_connector *connector; struct drm_connector_state *conn_state; for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { aconnector = hdcp_work->aconnector[conn_index]; if (!aconnector) continue; connector = &aconnector->base; /* check if display connected */ if (connector->status != connector_status_connected) continue; conn_state = aconnector->base.state; if (!conn_state) continue; dev = connector->dev; if (!dev) continue; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); mutex_lock(&hdcp_work->mutex); if (conn_state->commit) { ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, 10 * HZ); if (ret == 0) { DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n"); hdcp_work->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; } } if (hdcp_work->encryption_status[conn_index] != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && hdcp_work->encryption_status[conn_index] <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); drm_hdcp_update_content_protection(connector, DRM_MODE_CONTENT_PROTECTION_ENABLED); } else if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && hdcp_work->encryption_status[conn_index] == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { drm_hdcp_update_content_protection(connector, DRM_MODE_CONTENT_PROTECTION_ENABLED); } } else { DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); drm_hdcp_update_content_protection(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED); } mutex_unlock(&hdcp_work->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); } } static void event_property_validate(struct work_struct *work) { struct hdcp_workqueue *hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); struct mod_hdcp_display_query query; struct amdgpu_dm_connector *aconnector; unsigned int conn_index; mutex_lock(&hdcp_work->mutex); for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { aconnector = hdcp_work->aconnector[conn_index]; if (!aconnector) continue; /* check if display connected */ if (aconnector->base.status != connector_status_connected) continue; if (!aconnector->base.state) continue; query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", aconnector->base.index, aconnector->base.state->content_protection, query.encryption_status, hdcp_work->encryption_status[conn_index]); if (query.encryption_status != hdcp_work->encryption_status[conn_index]) { DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", hdcp_work->encryption_status[conn_index], query.encryption_status); hdcp_work->encryption_status[conn_index] = query.encryption_status; DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n"); schedule_work(&hdcp_work->property_update_work); } } mutex_unlock(&hdcp_work->mutex); } static void event_watchdog_timer(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, watchdog_timer_dwork); mutex_lock(&hdcp_work->mutex); cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, &hdcp_work->output); process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); } static void event_cpirq(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); mutex_lock(&hdcp_work->mutex); mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); } void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) { int i = 0; for (i = 0; i < hdcp_work->max_link; i++) { cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); } sysfs_remove_bin_file(kobj, &hdcp_work[0].attr); kfree(hdcp_work->srm); kfree(hdcp_work->srm_temp); kfree(hdcp_work); } static bool enable_assr(void *handle, struct dc_link *link) { struct hdcp_workqueue *hdcp_work = handle; struct mod_hdcp hdcp = hdcp_work->hdcp; struct psp_context *psp = hdcp.config.psp.handle; struct ta_dtm_shared_memory *dtm_cmd; bool res = true; if (!psp->dtm_context.context.initialized) { DRM_INFO("Failed to enable ASSR, DTM TA is not initialized."); return false; } dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf; mutex_lock(&psp->dtm_context.mutex); memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst; dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { DRM_INFO("Failed to enable ASSR"); res = false; } mutex_unlock(&psp->dtm_context.mutex); return res; } static void update_config(void *handle, struct cp_psp_stream_config *config) { struct hdcp_workqueue *hdcp_work = handle; struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; int link_index = aconnector->dc_link->link_index; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct dc_sink *sink = NULL; bool link_is_hdcp14 = false; if (config->dpms_off) { hdcp_remove_display(hdcp_work, link_index, aconnector); return; } memset(display, 0, sizeof(*display)); memset(link, 0, sizeof(*link)); display->index = aconnector->base.index; display->state = MOD_HDCP_DISPLAY_ACTIVE; if (aconnector->dc_sink) sink = aconnector->dc_sink; else if (aconnector->dc_em_sink) sink = aconnector->dc_em_sink; if (sink) link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); display->controller = CONTROLLER_ID_D0 + config->otg_inst; display->dig_fe = config->dig_fe; link->dig_be = config->dig_be; link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; display->stream_enc_idx = config->stream_enc_idx; link->link_enc_idx = config->link_enc_idx; link->dio_output_id = config->dio_output_idx; link->phy_idx = config->phy_idx; if (sink) link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal); link->hdcp_supported_informational = link_is_hdcp14; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; link->dp.assr_enabled = config->assr_enabled; link->dp.mst_enabled = config->mst_enabled; link->dp.dp2_enabled = config->dp2_enabled; link->dp.usb4_enabled = config->usb4_enabled; display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; link->adjust.auth_delay = 2; link->adjust.hdcp1.disable = 0; hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1, (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); mutex_lock(&hdcp_w->mutex); mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } /** * DOC: Add sysfs interface for set/get srm * * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel * will automatically call once or twice depending on the size * * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is * * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096), * srm_data_write can be called multiple times. * * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on * the last call we will send the full SRM. PSP will fail on every call before the last. * * This means we don't know if the SRM is good until the last call. And because of this * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs * * Example 1: * Good SRM size = 5096 * first call to write 4096 -> PSP fails * Second call to write 1000 -> PSP Pass -> SRM is set * * Example 2: * Bad SRM size = 4096 * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this * is the last call) * * Solution?: * 1: Parse the SRM? -> It is signed so we don't know the EOF * 2: We can have another sysfs that passes the size before calling set. -> simpler solution * below * * Easy Solution: * Always call get after Set to verify if set was successful. * +----------------------+ * | Why it works: | * +----------------------+ * PSP will only update its srm if its older than the one we are trying to load. * Always do set first than get. * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer * version and save it * * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the * same(newer) version back and save it * * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is * incorrect/corrupted and we should correct our SRM by getting it from PSP */ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; u32 srm_version = 0; work = container_of(bin_attr, struct hdcp_workqueue, attr); link_lock(work, true); memcpy(work->srm_temp + pos, buffer, count); if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) { DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version); memcpy(work->srm, work->srm_temp, pos + count); work->srm_size = pos + count; work->srm_version = srm_version; } link_lock(work, false); return count; } static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; u8 *srm = NULL; u32 srm_version; u32 srm_size; size_t ret = count; work = container_of(bin_attr, struct hdcp_workqueue, attr); link_lock(work, true); srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size); if (!srm) { ret = -EINVAL; goto ret; } if (pos >= srm_size) ret = 0; if (srm_size - pos < count) { memcpy(buffer, srm + pos, srm_size - pos); ret = srm_size - pos; goto ret; } memcpy(buffer, srm + pos, count); ret: link_lock(work, false); return ret; } /* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory. * * For example, * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent * across boot/reboots/suspend/resume/shutdown * * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP * we need to make the SRM persistent. * * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory. * -The kernel cannot write to the file systems. * -So we need usermode to do this for us, which is why an interface for usermode is needed * * * * Usermode can read/write to/from PSP using the sysfs interface * For example: * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm */ static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */ .write = srm_data_write, .read = srm_data_read, }; struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc) { int max_caps = dc->caps.max_links; struct hdcp_workqueue *hdcp_work; int i = 0; hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL); if (ZERO_OR_NULL_PTR(hdcp_work)) return NULL; hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL); if (!hdcp_work->srm) goto fail_alloc_context; hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL); if (!hdcp_work->srm_temp) goto fail_alloc_context; hdcp_work->max_link = max_caps; for (i = 0; i < max_caps; i++) { mutex_init(&hdcp_work[i].mutex); INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); INIT_WORK(&hdcp_work[i].property_update_work, event_property_update); INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback); INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); hdcp_work[i].hdcp.config.psp.handle = &adev->psp; if (dc->ctx->dce_version == DCN_VERSION_3_1 || dc->ctx->dce_version == DCN_VERSION_3_14 || dc->ctx->dce_version == DCN_VERSION_3_15 || dc->ctx->dce_version == DCN_VERSION_3_16) hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; memset(hdcp_work[i].aconnector, 0, sizeof(struct amdgpu_dm_connector *) * AMDGPU_DM_MAX_DISPLAY_INDEX); memset(hdcp_work[i].encryption_status, 0, sizeof(enum mod_hdcp_encryption_status) * AMDGPU_DM_MAX_DISPLAY_INDEX); } cp_psp->funcs.update_stream_config = update_config; cp_psp->funcs.enable_assr = enable_assr; cp_psp->handle = hdcp_work; /* File created at /sys/class/drm/card0/device/hdcp_srm*/ hdcp_work[0].attr = data_attr; sysfs_bin_attr_init(&hdcp_work[0].attr); if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr)) DRM_WARN("Failed to create device file hdcp_srm"); return hdcp_work; fail_alloc_context: kfree(hdcp_work->srm); kfree(hdcp_work->srm_temp); kfree(hdcp_work); return NULL; }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* The caprices of the preprocessor require that this be declared right here */ #define CREATE_TRACE_POINTS #include "dm_services_types.h" #include "dc.h" #include "link_enc_cfg.h" #include "dc/inc/core_types.h" #include "dal_asic_id.h" #include "dmub/dmub_srv.h" #include "dc/inc/hw/dmcu.h" #include "dc/inc/hw/abm.h" #include "dc/dc_dmub_srv.h" #include "dc/dc_edid_parser.h" #include "dc/dc_stat.h" #include "amdgpu_dm_trace.h" #include "dpcd_defs.h" #include "link/protocols/link_dpcd.h" #include "link_service_types.h" #include "link/protocols/link_dp_capability.h" #include "link/protocols/link_ddc.h" #include "vid.h" #include "amdgpu.h" #include "amdgpu_display.h" #include "amdgpu_ucode.h" #include "atom.h" #include "amdgpu_dm.h" #include "amdgpu_dm_plane.h" #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_hdcp.h" #include <drm/display/drm_hdcp_helper.h> #include "amdgpu_pm.h" #include "amdgpu_atombios.h" #include "amd_shared.h" #include "amdgpu_dm_irq.h" #include "dm_helpers.h" #include "amdgpu_dm_mst_types.h" #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" #endif #include "amdgpu_dm_psr.h" #include "amdgpu_dm_replay.h" #include "ivsrcid/ivsrcid_vislands30.h" #include <linux/backlight.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/pm_runtime.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/component.h> #include <linux/dmi.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> #include <acpi/video.h> #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_sh_mask.h" #include "soc15_hw_ip.h" #include "soc15_common.h" #include "vega10_ip_offset.h" #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 /* Number of bytes in PSP footer for firmware. */ #define PSP_FOOTER_BYTES 0x100 /** * DOC: overview * * The AMDgpu display manager, **amdgpu_dm** (or even simpler, * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM * requests into DC requests, and DC responses into DRM responses. * * The root control structure is &struct amdgpu_display_manager. */ /* basic init/fini API */ static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) { switch (link->dpcd_caps.dongle_type) { case DISPLAY_DONGLE_NONE: return DRM_MODE_SUBCONNECTOR_Native; case DISPLAY_DONGLE_DP_VGA_CONVERTER: return DRM_MODE_SUBCONNECTOR_VGA; case DISPLAY_DONGLE_DP_DVI_CONVERTER: case DISPLAY_DONGLE_DP_DVI_DONGLE: return DRM_MODE_SUBCONNECTOR_DVID; case DISPLAY_DONGLE_DP_HDMI_CONVERTER: case DISPLAY_DONGLE_DP_HDMI_DONGLE: return DRM_MODE_SUBCONNECTOR_HDMIA; case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: default: return DRM_MODE_SUBCONNECTOR_Unknown; } } static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) { struct dc_link *link = aconnector->dc_link; struct drm_connector *connector = &aconnector->base; enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) return; if (aconnector->dc_sink) subconnector = get_subconnector_type(link); drm_object_property_set_value(&connector->base, connector->dev->mode_config.dp_subconnector_property, subconnector); } /* * initializes drm_device display related structures, based on the information * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, * drm_encoder, drm_mode_config * * Returns 0 on success */ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); /* removes and deallocates the drm structures, created by the above function */ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *amdgpu_dm_connector, u32 link_index, struct amdgpu_encoder *amdgpu_encoder); static int amdgpu_dm_encoder_init(struct drm_device *dev, struct amdgpu_encoder *aencoder, uint32_t link_index); static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_rx_irq(void *param); static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); /* * dm_vblank_get_counter * * @brief * Get counter for number of vertical blanks * * @param * struct amdgpu_device *adev - [in] desired amdgpu device * int disp_idx - [in] which CRTC to get the counter from * * @return * Counter for vertical blanks */ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) { struct amdgpu_crtc *acrtc = NULL; if (crtc >= adev->mode_info.num_crtc) return 0; acrtc = adev->mode_info.crtcs[crtc]; if (!acrtc->dm_irq_params.stream) { DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", crtc); return 0; } return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); } static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { u32 v_blank_start, v_blank_end, h_position, v_position; struct amdgpu_crtc *acrtc = NULL; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; acrtc = adev->mode_info.crtcs[crtc]; if (!acrtc->dm_irq_params.stream) { DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", crtc); return 0; } /* * TODO rework base driver to use values directly. * for now parse it back into reg-format */ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, &v_blank_start, &v_blank_end, &h_position, &v_position); *position = v_position | (h_position << 16); *vbl = v_blank_start | (v_blank_end << 16); return 0; } static bool dm_is_idle(void *handle) { /* XXX todo */ return true; } static int dm_wait_for_idle(void *handle) { /* XXX todo */ return 0; } static bool dm_check_soft_reset(void *handle) { return false; } static int dm_soft_reset(void *handle) { /* XXX todo */ return 0; } static struct amdgpu_crtc * get_crtc_by_otg_inst(struct amdgpu_device *adev, int otg_inst) { struct drm_device *dev = adev_to_drm(adev); struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; if (WARN_ON(otg_inst == -1)) return adev->mode_info.crtcs[0]; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { amdgpu_crtc = to_amdgpu_crtc(crtc); if (amdgpu_crtc->otg_inst == otg_inst) return amdgpu_crtc; } return NULL; } static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) return true; else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) return true; else return false; } static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update, int planes_count) { int i, j; for (i = 0, j = planes_count - 1; i < j; i++, j--) swap(array_of_surface_update[i], array_of_surface_update[j]); } /** * update_planes_and_stream_adapter() - Send planes to be updated in DC * * DC has a generic way to update planes and stream via * dc_update_planes_and_stream function; however, DM might need some * adjustments and preparation before calling it. This function is a wrapper * for the dc_update_planes_and_stream that does any required configuration * before passing control to DC. * * @dc: Display Core control structure * @update_type: specify whether it is FULL/MEDIUM/FAST update * @planes_count: planes count to update * @stream: stream state * @stream_update: stream update * @array_of_surface_update: dc surface update pointer * */ static inline bool update_planes_and_stream_adapter(struct dc *dc, int update_type, int planes_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, struct dc_surface_update *array_of_surface_update) { reverse_planes_order(array_of_surface_update, planes_count); /* * Previous frame finished and HW is ready for optimization. */ if (update_type == UPDATE_TYPE_FAST) dc_post_update_surfaces_to_stream(dc); return dc_update_planes_and_stream(dc, array_of_surface_update, planes_count, stream, stream_update); } /** * dm_pflip_high_irq() - Handle pageflip interrupt * @interrupt_params: ignored * * Handles the pageflip interrupt by notifying all interested parties * that the pageflip has been completed. */ static void dm_pflip_high_irq(void *interrupt_params) { struct amdgpu_crtc *amdgpu_crtc; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; unsigned long flags; struct drm_pending_vblank_event *e; u32 vpos, hpos, v_blank_start, v_blank_end; bool vrr_active; amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); /* IRQ could occur when in initial stage */ /* TODO work and BO cleanup */ if (amdgpu_crtc == NULL) { DC_LOG_PFLIP("CRTC is null, returning.\n"); return; } spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc->crtc_id, amdgpu_crtc); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return; } /* page flip completed. */ e = amdgpu_crtc->event; amdgpu_crtc->event = NULL; WARN_ON(!e); vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); /* Fixed refresh rate, or VRR scanout position outside front-porch? */ if (!vrr_active || !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, &v_blank_end, &hpos, &vpos) || (vpos < v_blank_start)) { /* Update to correct count and vblank timestamp if racing with * vblank irq. This also updates to the correct vblank timestamp * even in VRR mode, as scanout is past the front-porch atm. */ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); /* Wake up userspace by sending the pageflip event with proper * count and timestamp of vblank of flip completion. */ if (e) { drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); /* Event sent, so done with vblank for this flip */ drm_crtc_vblank_put(&amdgpu_crtc->base); } } else if (e) { /* VRR active and inside front-porch: vblank count and * timestamp for pageflip event will only be up to date after * drm_crtc_handle_vblank() has been executed from late vblank * irq handler after start of back-porch (vline 0). We queue the * pageflip event for send-out by drm_crtc_handle_vblank() with * updated timestamp and count, once it runs after us. * * We need to open-code this instead of using the helper * drm_crtc_arm_vblank_event(), as that helper would * call drm_crtc_accurate_vblank_count(), which we must * not call in VRR mode while we are in front-porch! */ /* sequence will be replaced by real count during send-out. */ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); e->pipe = amdgpu_crtc->crtc_id; list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); e = NULL; } /* Keep track of vblank of this flip for flip throttling. We use the * cooked hw counter, as that one incremented at start of this vblank * of pageflip completion, so last_flip_vblank is the forbidden count * for queueing new pageflips if vsync + VRR is enabled. */ amdgpu_crtc->dm_irq_params.last_flip_vblank = amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int) !e); } static void dm_vupdate_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; struct drm_device *drm_dev; struct drm_vblank_crtc *vblank; ktime_t frame_duration_ns, previous_timestamp; unsigned long flags; int vrr_active; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); if (acrtc) { vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); drm_dev = acrtc->base.dev; vblank = &drm_dev->vblank[acrtc->base.index]; previous_timestamp = atomic64_read(&irq_params->previous_timestamp); frame_duration_ns = vblank->time - previous_timestamp; if (frame_duration_ns > 0) { trace_amdgpu_refresh_rate_track(acrtc->base.index, frame_duration_ns, ktime_divns(NSEC_PER_SEC, frame_duration_ns)); atomic64_set(&irq_params->previous_timestamp, vblank->time); } DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, vrr_active); /* Core vblank handling is done here after end of front-porch in * vrr mode, as vblank timestamping will give valid results * while now done after front-porch. This will also deliver * page-flip completion events that have been queued to us * if a pageflip happened inside front-porch. */ if (vrr_active) { amdgpu_dm_crtc_handle_vblank(acrtc); /* BTR processing for pre-DCE12 ASICs */ if (acrtc->dm_irq_params.stream && adev->family < AMDGPU_FAMILY_AI) { spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); mod_freesync_handle_v_update( adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); dc_stream_adjust_vmin_vmax( adev->dm.dc, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params.adjust); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } } } } /** * dm_crtc_high_irq() - Handles CRTC interrupt * @interrupt_params: used for determining the CRTC instance * * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK * event handler. */ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; unsigned long flags; int vrr_active; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); if (!acrtc) return; vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, vrr_active, acrtc->dm_irq_params.active_planes); /** * Core vblank handling at start of front-porch is only possible * in non-vrr mode, as only there vblank timestamping will give * valid results while done in front-porch. Otherwise defer it * to dm_vupdate_high_irq after end of front-porch. */ if (!vrr_active) amdgpu_dm_crtc_handle_vblank(acrtc); /** * Following stuff must happen at start of vblank, for crc * computation and below-the-range btr support in vrr mode. */ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); /* BTR updates need to happen before VUPDATE on Vega and above. */ if (adev->family < AMDGPU_FAMILY_AI) return; spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (acrtc->dm_irq_params.stream && acrtc->dm_irq_params.vrr_params.supported && acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { mod_freesync_handle_v_update(adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params.adjust); } /* * If there aren't any active_planes then DCH HUBP may be clock-gated. * In that case, pageflip completion interrupts won't fire and pageflip * completion events won't get delivered. Prevent this by sending * pending pageflip events from here if a flip is still pending. * * If any planes are enabled, use dm_pflip_high_irq() instead, to * avoid race conditions between flip programming and completion, * which could cause too early flip completion events. */ if (adev->family >= AMDGPU_FAMILY_RV && acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && acrtc->dm_irq_params.active_planes == 0) { if (acrtc->event) { drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); acrtc->event = NULL; drm_crtc_vblank_put(&acrtc->base); } acrtc->pflip_status = AMDGPU_FLIP_NONE; } spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /** * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for * DCN generation ASICs * @interrupt_params: interrupt parameters * * Used to set crc window/read out crc value at vertical line 0 position */ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); if (!acrtc) return; amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); } #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ /** * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. * @adev: amdgpu_device pointer * @notify: dmub notification structure * * Dmub AUX or SET_CONFIG command completion processing callback * Copies dmub notification to DM which is to be read by AUX command. * issuing thread and also signals the event to wake up the thread. */ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) { if (adev->dm.dmub_notify) memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) complete(&adev->dm.dmub_aux_transfer_done); } /** * dmub_hpd_callback - DMUB HPD interrupt processing callback. * @adev: amdgpu_device pointer * @notify: dmub notification structure * * Dmub Hpd interrupt processing callback. Gets displayindex through the * ink index and calls helper to do the processing. */ static void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) { struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *hpd_aconnector = NULL; struct drm_connector *connector; struct drm_connector_list_iter iter; struct dc_link *link; u8 link_index = 0; struct drm_device *dev; if (adev == NULL) return; if (notify == NULL) { DRM_ERROR("DMUB HPD callback notification was NULL"); return; } if (notify->link_index > adev->dm.dc->link_count) { DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); return; } link_index = notify->link_index; link = adev->dm.dc->links[link_index]; dev = adev->dm.ddev; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { if (notify->type == DMUB_NOTIFICATION_HPD) DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); else DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", notify->type, link_index); hpd_aconnector = aconnector; break; } } drm_connector_list_iter_end(&iter); if (hpd_aconnector) { if (notify->type == DMUB_NOTIFICATION_HPD) handle_hpd_irq_helper(hpd_aconnector); else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) handle_hpd_rx_irq(hpd_aconnector); } } /** * register_dmub_notify_callback - Sets callback for DMUB notify * @adev: amdgpu_device pointer * @type: Type of dmub notification * @callback: Dmub interrupt callback function * @dmub_int_thread_offload: offload indicator * * API to register a dmub callback handler for a dmub notification * Also sets indicator whether callback processing to be offloaded. * to dmub interrupt handling thread * Return: true if successfully registered, false if there is existing registration */ static bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) { if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { adev->dm.dmub_callback[type] = callback; adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; } else return false; return true; } static void dm_handle_hpd_work(struct work_struct *work) { struct dmub_hpd_work *dmub_hpd_wrk; dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); if (!dmub_hpd_wrk->dmub_notify) { DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); return; } if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, dmub_hpd_wrk->dmub_notify); } kfree(dmub_hpd_wrk->dmub_notify); kfree(dmub_hpd_wrk); } #define DMUB_TRACE_MAX_READ 64 /** * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt * @interrupt_params: used for determining the Outbox instance * * Handles the Outbox Interrupt * event handler. */ static void dm_dmub_outbox1_low_irq(void *interrupt_params) { struct dmub_notification notify; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_display_manager *dm = &adev->dm; struct dmcub_trace_buf_entry entry = { 0 }; u32 count = 0; struct dmub_hpd_work *dmub_hpd_wrk; struct dc_link *plink = NULL; if (dc_enable_dmub_notifications(adev->dm.dc) && irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { do { dc_stat_get_dmub_notification(adev->dm.dc, &notify); if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { DRM_ERROR("DM: notify type %d invalid!", notify.type); continue; } if (!dm->dmub_callback[notify.type]) { DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); continue; } if (dm->dmub_thread_offload[notify.type] == true) { dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); if (!dmub_hpd_wrk) { DRM_ERROR("Failed to allocate dmub_hpd_wrk"); return; } dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification), GFP_ATOMIC); if (!dmub_hpd_wrk->dmub_notify) { kfree(dmub_hpd_wrk); DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); return; } INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); dmub_hpd_wrk->adev = adev; if (notify.type == DMUB_NOTIFICATION_HPD) { plink = adev->dm.dc->links[notify.link_index]; if (plink) { plink->hpd_status = notify.hpd_status == DP_HPD_PLUG; } } queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); } else { dm->dmub_callback[notify.type](adev, &notify); } } while (notify.pending_notification); } do { if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, entry.param0, entry.param1); DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", entry.trace_code, entry.tick_count, entry.param0, entry.param1); } else break; count++; } while (count <= DMUB_TRACE_MAX_READ); if (count > DMUB_TRACE_MAX_READ) DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { return 0; } static int dm_set_powergating_state(void *handle, enum amd_powergating_state state) { return 0; } /* Prototypes of private functions */ static int dm_early_init(void *handle); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct dm_compressor_info *compressor = &adev->dm.compressor; struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); struct drm_display_mode *mode; unsigned long max_size = 0; if (adev->dm.dc->fbc_compressor == NULL) return; if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) return; if (compressor->bo_ptr) return; list_for_each_entry(mode, &connector->modes, head) { if (max_size < mode->htotal * mode->vtotal) max_size = mode->htotal * mode->vtotal; } if (max_size) { int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, &compressor->gpu_addr, &compressor->cpu_addr); if (r) DRM_ERROR("DM: Failed to initialize FBC\n"); else { adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; DRM_INFO("DM: FBC alloc %lu\n", max_size*4); } } } static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, int pipe, bool *enabled, unsigned char *buf, int max_bytes) { struct drm_device *dev = dev_get_drvdata(kdev); struct amdgpu_device *adev = drm_to_adev(dev); struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct amdgpu_dm_connector *aconnector; int ret = 0; *enabled = false; mutex_lock(&adev->dm.audio_lock); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->audio_inst != port) continue; *enabled = true; ret = drm_eld_size(connector->eld); memcpy(buf, connector->eld, min(max_bytes, ret)); break; } drm_connector_list_iter_end(&conn_iter); mutex_unlock(&adev->dm.audio_lock); DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); return ret; } static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { .get_eld = amdgpu_dm_audio_component_get_eld, }; static int amdgpu_dm_audio_component_bind(struct device *kdev, struct device *hda_kdev, void *data) { struct drm_device *dev = dev_get_drvdata(kdev); struct amdgpu_device *adev = drm_to_adev(dev); struct drm_audio_component *acomp = data; acomp->ops = &amdgpu_dm_audio_component_ops; acomp->dev = kdev; adev->dm.audio_component = acomp; return 0; } static void amdgpu_dm_audio_component_unbind(struct device *kdev, struct device *hda_kdev, void *data) { struct drm_device *dev = dev_get_drvdata(kdev); struct amdgpu_device *adev = drm_to_adev(dev); struct drm_audio_component *acomp = data; acomp->ops = NULL; acomp->dev = NULL; adev->dm.audio_component = NULL; } static const struct component_ops amdgpu_dm_audio_component_bind_ops = { .bind = amdgpu_dm_audio_component_bind, .unbind = amdgpu_dm_audio_component_unbind, }; static int amdgpu_dm_audio_init(struct amdgpu_device *adev) { int i, ret; if (!amdgpu_audio) return 0; adev->mode_info.audio.enabled = true; adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; for (i = 0; i < adev->mode_info.audio.num_pins; i++) { adev->mode_info.audio.pin[i].channels = -1; adev->mode_info.audio.pin[i].rate = -1; adev->mode_info.audio.pin[i].bits_per_sample = -1; adev->mode_info.audio.pin[i].status_bits = 0; adev->mode_info.audio.pin[i].category_code = 0; adev->mode_info.audio.pin[i].connected = false; adev->mode_info.audio.pin[i].id = adev->dm.dc->res_pool->audios[i]->inst; adev->mode_info.audio.pin[i].offset = 0; } ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); if (ret < 0) return ret; adev->dm.audio_registered = true; return 0; } static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) { if (!amdgpu_audio) return; if (!adev->mode_info.audio.enabled) return; if (adev->dm.audio_registered) { component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); adev->dm.audio_registered = false; } /* TODO: Disable audio? */ adev->mode_info.audio.enabled = false; } static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) { struct drm_audio_component *acomp = adev->dm.audio_component; if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { DRM_DEBUG_KMS("Notify ELD: %d\n", pin); acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, pin, -1); } } static int dm_dmub_hw_init(struct amdgpu_device *adev) { const struct dmcub_firmware_header_v1_0 *hdr; struct dmub_srv *dmub_srv = adev->dm.dmub_srv; struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; const struct firmware *dmub_fw = adev->dm.dmub_fw; struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; struct abm *abm = adev->dm.dc->res_pool->abm; struct dmub_srv_hw_params hw_params; enum dmub_status status; const unsigned char *fw_inst_const, *fw_bss_data; u32 i, fw_inst_const_size, fw_bss_data_size; bool has_hw_support; if (!dmub_srv) /* DMUB isn't supported on the ASIC. */ return 0; if (!fb_info) { DRM_ERROR("No framebuffer info for DMUB service.\n"); return -EINVAL; } if (!dmub_fw) { /* Firmware required for DMUB support. */ DRM_ERROR("No firmware provided for DMUB.\n"); return -EINVAL; } status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error checking HW support for DMUB: %d\n", status); return -EINVAL; } if (!has_hw_support) { DRM_INFO("DMUB unsupported on ASIC\n"); return 0; } /* Reset DMCUB if it was previously running - before we overwrite its memory. */ status = dmub_srv_hw_reset(dmub_srv); if (status != DMUB_STATUS_OK) DRM_WARN("Error resetting DMUB HW: %d\n", status); hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; fw_inst_const = dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + PSP_HEADER_BYTES; fw_bss_data = dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + le32_to_cpu(hdr->inst_const_bytes); /* Copy firmware and bios info into FB memory. */ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - PSP_HEADER_BYTES - PSP_FOOTER_BYTES; fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, * amdgpu_ucode_init_single_fw will load dmub firmware * fw_inst_const part to cw0; otherwise, the firmware back door load * will be done by dm_dmub_hw_init */ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, fw_inst_const_size); } if (fw_bss_data_size) memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, fw_bss_data_size); /* Copy firmware bios info into FB memory. */ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, adev->bios_size); /* Reset regions that need to be reset. */ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); /* Initialize hardware. */ memset(&hw_params, 0, sizeof(hw_params)); hw_params.fb_base = adev->gmc.fb_start; hw_params.fb_offset = adev->vm_manager.vram_base_offset; /* backdoor load firmware and trigger dmub running */ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) hw_params.load_inst_const = true; if (dmcu) hw_params.psp_version = dmcu->psp_version; for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): hw_params.dpia_supported = true; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; default: break; } status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error initializing DMUB HW: %d\n", status); return -EINVAL; } /* Wait for firmware load to finish. */ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK) DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); /* Init DMCU and ABM if available. */ if (dmcu && abm) { dmcu->funcs->dmcu_init(dmcu); abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); } if (!adev->dm.dc->ctx->dmub_srv) adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); if (!adev->dm.dc->ctx->dmub_srv) { DRM_ERROR("Couldn't allocate DC DMUB server!\n"); return -ENOMEM; } DRM_INFO("DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); return 0; } static void dm_dmub_hw_resume(struct amdgpu_device *adev) { struct dmub_srv *dmub_srv = adev->dm.dmub_srv; enum dmub_status status; bool init; if (!dmub_srv) { /* DMUB isn't supported on the ASIC. */ return; } status = dmub_srv_is_hw_init(dmub_srv, &init); if (status != DMUB_STATUS_OK) DRM_WARN("DMUB hardware init check failed: %d\n", status); if (status == DMUB_STATUS_OK && init) { /* Wait for firmware load to finish. */ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK) DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); } else { /* Perform the full hardware initialization. */ dm_dmub_hw_init(adev); } } static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) { u64 pt_base; u32 logical_addr_low; u32 logical_addr_high; u32 agp_base, agp_bot, agp_top; PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; memset(pa_config, 0, sizeof(*pa_config)); agp_base = 0; agp_bot = adev->gmc.agp_start >> 24; agp_top = adev->gmc.agp_end >> 24; /* AGP aperture is disabled */ if (agp_bot == agp_top) { logical_addr_low = adev->gmc.fb_start >> 18; if (adev->apu_flags & AMD_APU_IS_RAVEN2) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the * workaround that increase system aperture high address (add 1) * to get rid of the VM fault and hardware hang. */ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; else logical_addr_high = adev->gmc.fb_end >> 18; } else { logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; if (adev->apu_flags & AMD_APU_IS_RAVEN2) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the * workaround that increase system aperture high address (add 1) * to get rid of the VM fault and hardware hang. */ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); else logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; } pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> AMDGPU_GPU_PAGE_SHIFT); page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> AMDGPU_GPU_PAGE_SHIFT); page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> AMDGPU_GPU_PAGE_SHIFT); page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> AMDGPU_GPU_PAGE_SHIFT); page_table_base.high_part = upper_32_bits(pt_base); page_table_base.low_part = lower_32_bits(pt_base); pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; pa_config->system_aperture.fb_base = adev->gmc.fb_start; pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; pa_config->system_aperture.fb_top = adev->gmc.fb_end; pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; } static void force_connector_state( struct amdgpu_dm_connector *aconnector, enum drm_connector_force force_state) { struct drm_connector *connector = &aconnector->base; mutex_lock(&connector->dev->mode_config.mutex); aconnector->base.force = force_state; mutex_unlock(&connector->dev->mode_config.mutex); mutex_lock(&aconnector->hpd_lock); drm_kms_helper_connector_hotplug_event(connector); mutex_unlock(&aconnector->hpd_lock); } static void dm_handle_hpd_rx_offload_work(struct work_struct *work) { struct hpd_rx_irq_offload_work *offload_work; struct amdgpu_dm_connector *aconnector; struct dc_link *dc_link; struct amdgpu_device *adev; enum dc_connection_type new_connection_type = dc_connection_none; unsigned long flags; union test_response test_response; memset(&test_response, 0, sizeof(test_response)); offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); aconnector = offload_work->offload_wq->aconnector; if (!aconnector) { DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); goto skip; } adev = drm_to_adev(aconnector->base.dev); dc_link = aconnector->dc_link; mutex_lock(&aconnector->hpd_lock); if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); mutex_unlock(&aconnector->hpd_lock); if (new_connection_type == dc_connection_none) goto skip; if (amdgpu_in_reset(adev)) goto skip; if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); goto skip; } mutex_lock(&adev->dm.dc_lock); if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { dc_link_dp_handle_automated_test(dc_link); if (aconnector->timing_changed) { /* force connector disconnect and reconnect */ force_connector_state(aconnector, DRM_FORCE_OFF); msleep(100); force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); } test_response.bits.ACK = 1; core_link_write_dpcd( dc_link, DP_TEST_RESPONSE, &test_response.raw, sizeof(test_response)); } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && dc_link_check_link_loss_status(dc_link, &offload_work->data) && dc_link_dp_allow_hpd_rx_irq(dc_link)) { /* offload_work->data is from handle_hpd_rx_irq-> * schedule_hpd_rx_offload_work.this is defer handle * for hpd short pulse. upon here, link status may be * changed, need get latest link status from dpcd * registers. if link status is good, skip run link * training again. */ union hpd_irq_data irq_data; memset(&irq_data, 0, sizeof(irq_data)); /* before dc_link_dp_handle_link_loss, allow new link lost handle * request be added to work queue if link lost at end of dc_link_ * dp_handle_link_loss */ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); offload_work->offload_wq->is_handling_link_loss = false; spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && dc_link_check_link_loss_status(dc_link, &irq_data)) dc_link_dp_handle_link_loss(dc_link); } mutex_unlock(&adev->dm.dc_lock); skip: kfree(offload_work); } static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) { int max_caps = dc->caps.max_links; int i = 0; struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); if (!hpd_rx_offload_wq) return NULL; for (i = 0; i < max_caps; i++) { hpd_rx_offload_wq[i].wq = create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); if (hpd_rx_offload_wq[i].wq == NULL) { DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); goto out_err; } spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); } return hpd_rx_offload_wq; out_err: for (i = 0; i < max_caps; i++) { if (hpd_rx_offload_wq[i].wq) destroy_workqueue(hpd_rx_offload_wq[i].wq); } kfree(hpd_rx_offload_wq); return NULL; } struct amdgpu_stutter_quirk { u16 chip_vendor; u16 chip_device; u16 subsys_vendor; u16 subsys_device; u8 revision; }; static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, { 0, 0, 0, 0, 0 }, }; static bool dm_should_disable_stutter(struct pci_dev *pdev) { const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; while (p && p->chip_device != 0) { if (pdev->vendor == p->chip_vendor && pdev->device == p->chip_device && pdev->subsystem_vendor == p->subsys_vendor && pdev->subsystem_device == p->subsys_device && pdev->revision == p->revision) { return true; } ++p; } return false; } static const struct dmi_system_id hpd_disconnect_quirk_table[] = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), }, }, {} /* TODO: refactor this from a fixed table to a dynamic option */ }; static void retrieve_dmi_info(struct amdgpu_display_manager *dm) { const struct dmi_system_id *dmi_id; dm->aux_hpd_discon_quirk = false; dmi_id = dmi_first_match(hpd_disconnect_quirk_table); if (dmi_id) { dm->aux_hpd_discon_quirk = true; DRM_INFO("aux_hpd_discon_quirk attached\n"); } } static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; struct dc_callback_init init_params; int r; adev->dm.ddev = adev_to_drm(adev); adev->dm.adev = adev; /* Zero all the fields */ memset(&init_data, 0, sizeof(init_data)); memset(&init_params, 0, sizeof(init_params)); mutex_init(&adev->dm.dpia_aux_lock); mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); if (amdgpu_dm_irq_init(adev)) { DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); goto error; } init_data.asic_id.chip_family = adev->family; init_data.asic_id.pci_revision_id = adev->pdev->revision; init_data.asic_id.hw_internal_rev = adev->external_rev_id; init_data.asic_id.chip_id = adev->pdev->device; init_data.asic_id.vram_width = adev->gmc.vram_width; /* TODO: initialize init_data.asic_id.vram_type here!!!! */ init_data.asic_id.atombios_base_address = adev->mode_info.atom_context->bios; init_data.driver = adev; adev->dm.cgs_device = amdgpu_cgs_create_device(adev); if (!adev->dm.cgs_device) { DRM_ERROR("amdgpu: failed to create cgs device.\n"); goto error; } init_data.cgs_device = adev->dm.cgs_device; init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 1, 0): switch (adev->dm.dmcub_fw_version) { case 0: /* development */ case 0x1: /* linux-firmware.git hash 6d9f399 */ case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ init_data.flags.disable_dmcu = false; break; default: init_data.flags.disable_dmcu = true; } break; case IP_VERSION(2, 0, 3): init_data.flags.disable_dmcu = true; break; default: break; } switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: init_data.flags.gpu_vm_support = true; break; default: switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): /* enable S/G on PCO and RV2 */ if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || (adev->apu_flags & AMD_APU_IS_PICASSO)) init_data.flags.gpu_vm_support = true; break; case IP_VERSION(2, 1, 0): case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): init_data.flags.gpu_vm_support = true; break; default: break; } break; } if (init_data.flags.gpu_vm_support && (amdgpu_sg_display == 0)) init_data.flags.gpu_vm_support = false; if (init_data.flags.gpu_vm_support) adev->mode_info.gpu_vm_support = true; if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) init_data.flags.multi_mon_pp_mclk_switch = true; if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) init_data.flags.disable_fractional_pwm = true; if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) init_data.flags.edp_no_power_sequencing = true; if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; init_data.flags.seamless_boot_edp_requested = false; if (check_seamless_boot_capability(adev)) { init_data.flags.seamless_boot_edp_requested = true; init_data.flags.allow_seamless_boot_optimization = true; DRM_INFO("Seamless boot condition check passed\n"); } init_data.flags.enable_mipi_converter_optimization = true; init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; INIT_LIST_HEAD(&adev->dm.da_list); retrieve_dmi_info(&adev->dm); /* Display Core create. */ adev->dm.dc = dc_create(&init_data); if (adev->dm.dc) { DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER, dce_version_to_string(adev->dm.dc->ctx->dce_version)); goto error; } if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { adev->dm.dc->debug.force_single_disp_pipe_split = false; adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; } if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; if (dm_should_disable_stutter(adev->pdev)) adev->dm.dc->debug.disable_stutter = true; if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) adev->dm.dc->debug.disable_dsc = true; if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) adev->dm.dc->debug.force_subvp_mclk_switch = true; adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ adev->dm.dc->debug.ignore_cable_id = true; /* TODO: There is a new drm mst change where the freedom of * vc_next_start_slot update is revoked/moved into drm, instead of in * driver. This forces us to make sure to get vc_next_start_slot updated * in drm function each time without considering if mst_state is active * or not. Otherwise, next time hotplug will give wrong start_slot * number. We are implementing a temporary solution to even notify drm * mst deallocation when link is no longer of MST type when uncommitting * the stream so we will have more time to work on a proper solution. * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we * should notify drm to do a complete "reset" of its states and stop * calling further drm mst functions when link is no longer of an MST * type. This could happen when we unplug an MST hubs/displays. When * uncommit stream comes later after unplug, we should just reset * hardware states only. */ adev->dm.dc->debug.temp_mst_deallocation_sequence = true; if (adev->dm.dc->caps.dp_hdmi21_pcon_support) DRM_INFO("DP-HDMI FRL PCON supported\n"); r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); goto error; } dc_hardware_init(adev->dm.dc); adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); if (!adev->dm.hpd_rx_offload_wq) { DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); goto error; } if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { struct dc_phy_addr_space_config pa_config; mmhub_read_system_context(adev, &pa_config); // Call the DC init_memory func dc_setup_system_context(adev->dm.dc, &pa_config); } adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( "amdgpu: failed to initialize freesync_module.\n"); } else DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", adev->dm.freesync_module); amdgpu_dm_init_color_mod(); if (adev->dm.dc->caps.max_links > 0) { adev->dm.vblank_control_workqueue = create_singlethread_workqueue("dm_vblank_control_workqueue"); if (!adev->dm.vblank_control_workqueue) DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); } if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); if (!adev->dm.hdcp_workqueue) DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); else DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); dc_init_callbacks(adev->dm.dc, &init_params); } if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); goto error; } adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); if (!adev->dm.delayed_hpd_wq) { DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); goto error; } amdgpu_dm_outbox_init(adev); if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, dmub_aux_setconfig_callback, false)) { DRM_ERROR("amdgpu: fail to register dmub aux callback"); goto error; } if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { DRM_ERROR("amdgpu: fail to register dmub hpd callback"); goto error; } if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { DRM_ERROR("amdgpu: fail to register dmub hpd callback"); goto error; } } /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. * It is expected that DMUB will resend any pending notifications at this point, for * example HPD from DPIA. */ if (dc_is_dmub_outbox_supported(adev->dm.dc)) { dc_enable_dmub_outbox(adev->dm.dc); /* DPIA trace goes to dmesg logs only if outbox is enabled */ if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) dc_dmub_srv_enable_dpia_trace(adev->dm.dc); } if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); goto error; } /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); /* TODO: Add_display_info? */ /* TODO use dynamic cursor width */ adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); goto error; } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); if (!adev->dm.secure_display_ctxs) DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); #endif DRM_DEBUG_DRIVER("KMS initialized.\n"); return 0; error: amdgpu_dm_fini(adev); return -EINVAL; } static int amdgpu_dm_early_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_dm_audio_fini(adev); return 0; } static void amdgpu_dm_fini(struct amdgpu_device *adev) { int i; if (adev->dm.vblank_control_workqueue) { destroy_workqueue(adev->dm.vblank_control_workqueue); adev->dm.vblank_control_workqueue = NULL; } amdgpu_dm_destroy_drm_device(&adev->dm); #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (adev->dm.secure_display_ctxs) { for (i = 0; i < adev->mode_info.num_crtc; i++) { if (adev->dm.secure_display_ctxs[i].crtc) { flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); } } kfree(adev->dm.secure_display_ctxs); adev->dm.secure_display_ctxs = NULL; } #endif if (adev->dm.hdcp_workqueue) { hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); adev->dm.hdcp_workqueue = NULL; } if (adev->dm.dc) dc_deinit_callbacks(adev->dm.dc); if (adev->dm.dc) dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); if (dc_enable_dmub_notifications(adev->dm.dc)) { kfree(adev->dm.dmub_notify); adev->dm.dmub_notify = NULL; destroy_workqueue(adev->dm.delayed_hpd_wq); adev->dm.delayed_hpd_wq = NULL; } if (adev->dm.dmub_bo) amdgpu_bo_free_kernel(&adev->dm.dmub_bo, &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); if (adev->dm.hpd_rx_offload_wq) { for (i = 0; i < adev->dm.dc->caps.max_links; i++) { if (adev->dm.hpd_rx_offload_wq[i].wq) { destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); adev->dm.hpd_rx_offload_wq[i].wq = NULL; } } kfree(adev->dm.hpd_rx_offload_wq); adev->dm.hpd_rx_offload_wq = NULL; } /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); /* * TODO: pageflip, vlank interrupt * * amdgpu_dm_irq_fini(adev); */ if (adev->dm.cgs_device) { amdgpu_cgs_destroy_device(adev->dm.cgs_device); adev->dm.cgs_device = NULL; } if (adev->dm.freesync_module) { mod_freesync_destroy(adev->dm.freesync_module); adev->dm.freesync_module = NULL; } mutex_destroy(&adev->dm.audio_lock); mutex_destroy(&adev->dm.dc_lock); mutex_destroy(&adev->dm.dpia_aux_lock); } static int load_dmcu_fw(struct amdgpu_device *adev) { const char *fw_name_dmcu = NULL; int r; const struct dmcu_firmware_header_v1_0 *hdr; switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: #endif case CHIP_BONAIRE: case CHIP_HAWAII: case CHIP_KAVERI: case CHIP_KABINI: case CHIP_MULLINS: case CHIP_TONGA: case CHIP_FIJI: case CHIP_CARRIZO: case CHIP_STONEY: case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: case CHIP_VEGAM: case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: return 0; case CHIP_NAVI12: fw_name_dmcu = FIRMWARE_NAVI12_DMCU; break; case CHIP_RAVEN: if (ASICREV_IS_PICASSO(adev->external_rev_id)) fw_name_dmcu = FIRMWARE_RAVEN_DMCU; else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) fw_name_dmcu = FIRMWARE_RAVEN_DMCU; else return 0; break; default: switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 3): case IP_VERSION(2, 0, 0): case IP_VERSION(2, 1, 0): case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 2): case IP_VERSION(3, 0, 3): case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): return 0; default: break; } DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); return -EINVAL; } if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); return 0; } r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); if (r == -ENODEV) { /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); adev->dm.fw_dmcu = NULL; return 0; } if (r) { dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", fw_name_dmcu); amdgpu_ucode_release(&adev->dm.fw_dmcu); return r; } hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); return 0; } static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) { struct amdgpu_device *adev = ctx; return dm_read_reg(adev->dm.dc->ctx, address); } static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, uint32_t value) { struct amdgpu_device *adev = ctx; return dm_write_reg(adev->dm.dc->ctx, address, value); } static int dm_dmub_sw_init(struct amdgpu_device *adev) { struct dmub_srv_create_params create_params; struct dmub_srv_region_params region_params; struct dmub_srv_region_info region_info; struct dmub_srv_fb_params fb_params; struct dmub_srv_fb_info *fb_info; struct dmub_srv *dmub_srv; const struct dmcub_firmware_header_v1_0 *hdr; enum dmub_asic dmub_asic; enum dmub_status status; int r; switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 1, 0): dmub_asic = DMUB_ASIC_DCN21; break; case IP_VERSION(3, 0, 0): dmub_asic = DMUB_ASIC_DCN30; break; case IP_VERSION(3, 0, 1): dmub_asic = DMUB_ASIC_DCN301; break; case IP_VERSION(3, 0, 2): dmub_asic = DMUB_ASIC_DCN302; break; case IP_VERSION(3, 0, 3): dmub_asic = DMUB_ASIC_DCN303; break; case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; break; case IP_VERSION(3, 1, 4): dmub_asic = DMUB_ASIC_DCN314; break; case IP_VERSION(3, 1, 5): dmub_asic = DMUB_ASIC_DCN315; break; case IP_VERSION(3, 1, 6): dmub_asic = DMUB_ASIC_DCN316; break; case IP_VERSION(3, 2, 0): dmub_asic = DMUB_ASIC_DCN32; break; case IP_VERSION(3, 2, 1): dmub_asic = DMUB_ASIC_DCN321; break; default: /* ASIC doesn't support DMUB. */ return 0; } hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = AMDGPU_UCODE_ID_DMCUB; adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw; adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", adev->dm.dmcub_fw_version); } adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); dmub_srv = adev->dm.dmub_srv; if (!dmub_srv) { DRM_ERROR("Failed to allocate DMUB service!\n"); return -ENOMEM; } memset(&create_params, 0, sizeof(create_params)); create_params.user_ctx = adev; create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; create_params.asic = dmub_asic; /* Create the DMUB service. */ status = dmub_srv_create(dmub_srv, &create_params); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error creating DMUB service: %d\n", status); return -EINVAL; } /* Calculate the size of all the regions for the DMUB service. */ memset(&region_params, 0, sizeof(region_params)); region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - PSP_HEADER_BYTES - PSP_FOOTER_BYTES; region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); region_params.vbios_size = adev->bios_size; region_params.fw_bss_data = region_params.bss_data_size ? adev->dm.dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + le32_to_cpu(hdr->inst_const_bytes) : NULL; region_params.fw_inst_const = adev->dm.dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + PSP_HEADER_BYTES; status = dmub_srv_calc_region_info(dmub_srv, &region_params, &region_info); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error calculating DMUB region info: %d\n", status); return -EINVAL; } /* * Allocate a framebuffer based on the total size of all the regions. * TODO: Move this into GART. */ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, &adev->dm.dmub_bo, &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); if (r) return r; /* Rebase the regions on the framebuffer address. */ memset(&fb_params, 0, sizeof(fb_params)); fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; fb_params.region_info = &region_info; adev->dm.dmub_fb_info = kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); fb_info = adev->dm.dmub_fb_info; if (!fb_info) { DRM_ERROR( "Failed to allocate framebuffer info for DMUB service!\n"); return -ENOMEM; } status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error calculating DMUB FB info: %d\n", status); return -EINVAL; } return 0; } static int dm_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; r = dm_dmub_sw_init(adev); if (r) return r; return load_dmcu_fw(adev); } static int dm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; kfree(adev->dm.dmub_fb_info); adev->dm.dmub_fb_info = NULL; if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; } amdgpu_ucode_release(&adev->dm.dmub_fw); amdgpu_ucode_release(&adev->dm.fw_dmcu); return 0; } static int detect_mst_link_for_all_connectors(struct drm_device *dev) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; int ret = 0; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { DRM_ERROR("DM_MST: Failed to start MST\n"); aconnector->dc_link->type = dc_connection_single; ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, aconnector->dc_link); break; } } } drm_connector_list_iter_end(&iter); return ret; } static int dm_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct dmcu_iram_parameters params; unsigned int linear_lut[16]; int i; struct dmcu *dmcu = NULL; dmcu = adev->dm.dc->res_pool->dmcu; for (i = 0; i < 16; i++) linear_lut[i] = 0xFFFF * i / 15; params.set = 0; params.backlight_ramping_override = false; params.backlight_ramping_start = 0xCCCC; params.backlight_ramping_reduction = 0xCCCCCCCC; params.backlight_lut_array_size = 16; params.backlight_lut_array = linear_lut; /* Min backlight level after ABM reduction, Don't allow below 1% * 0xFFFF x 0.01 = 0x28F */ params.min_abm_backlight = 0x28F; /* In the case where abm is implemented on dmcub, * dmcu object will be null. * ABM 2.4 and up are implemented on dmcub. */ if (dmcu) { if (!dmcu_load_iram(dmcu, params)) return -EINVAL; } else if (adev->dm.dc->ctx->dmub_srv) { struct dc_link *edp_links[MAX_NUM_EDP]; int edp_num; dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); for (i = 0; i < edp_num; i++) { if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) return -EINVAL; } } return detect_mst_link_for_all_connectors(adev_to_drm(adev)); } static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) { int ret; u8 guid[16]; u64 tmp64; mutex_lock(&mgr->lock); if (!mgr->mst_primary) goto out_fail; if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); if (ret < 0) { drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); goto out_fail; } /* Some hubs forget their guids after they resume */ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); if (ret != 16) { drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } if (memchr_inv(guid, 0, 16) == NULL) { tmp64 = get_jiffies_64(); memcpy(&guid[0], &tmp64, sizeof(u64)); memcpy(&guid[8], &tmp64, sizeof(u64)); ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); if (ret != 16) { drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); goto out_fail; } } memcpy(mgr->mst_primary->guid, guid, 16); out_fail: mutex_unlock(&mgr->lock); } static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) continue; mgr = &aconnector->mst_mgr; if (suspend) { drm_dp_mst_topology_mgr_suspend(mgr); } else { /* if extended timeout is supported in hardware, * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer * CTS 4.2.1.1 regression introduced by CTS specs requirement update. */ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); if (!dp_is_lttpr_present(aconnector->dc_link)) try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); /* TODO: move resume_mst_branch_status() into drm mst resume again * once topology probing work is pulled out from mst resume into mst * resume 2nd step. mst resume 2nd step should be called after old * state getting restored (i.e. drm_atomic_helper_resume()). */ resume_mst_branch_status(mgr); } } drm_connector_list_iter_end(&iter); } static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) { int ret = 0; /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends * on window driver dc implementation. * For Navi1x, clock settings of dcn watermarks are fixed. the settings * should be passed to smu during boot up and resume from s3. * boot up: dc calculate dcn watermark clock settings within dc_create, * dcn20_resource_construct * then call pplib functions below to pass the settings to smu: * smu_set_watermarks_for_clock_ranges * smu_set_watermarks_table * navi10_set_watermarks_table * smu_write_watermarks_table * * For Renoir, clock settings of dcn watermark are also fixed values. * dc has implemented different flow for window driver: * dc_hardware_init / dc_set_power_state * dcn10_init_hw * notify_wm_ranges * set_wm_ranges * -- Linux * smu_set_watermarks_for_clock_ranges * renoir_set_watermarks_table * smu_write_watermarks_table * * For Linux, * dc_hardware_init -> amdgpu_dm_init * dc_set_power_state --> dm_resume * * therefore, this function apply to navi10/12/14 but not Renoir * * */ switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 0): break; default: return 0; } ret = amdgpu_dpm_write_watermarks_table(adev); if (ret) { DRM_ERROR("Failed to update WMTABLE!\n"); return ret; } return 0; } /** * dm_hw_init() - Initialize DC device * @handle: The base driver device containing the amdgpu_dm device. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. * * Although the function implies hardware initialization, both hardware and * software are initialized here. Splitting them out to their relevant init * hooks is a future TODO item. * * Some notable things that are initialized here: * * - Display Core, both software and hardware * - DC modules that we need (freesync and color management) * - DRM software states * - Interrupt sources and handlers * - Vblank support * - Debug FS entries, if enabled */ static int dm_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* Create DAL display manager */ amdgpu_dm_init(adev); amdgpu_dm_hpd_init(adev); return 0; } /** * dm_hw_fini() - Teardown DC device * @handle: The base driver device containing the amdgpu_dm device. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that * were loaded. Also flush IRQ workqueues and disable them. */ static int dm_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_dm_hpd_fini(adev); amdgpu_dm_irq_fini(adev); amdgpu_dm_fini(adev); return 0; } static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, struct dc_state *state, bool enable) { enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc; int rc = -EBUSY; int i = 0; for (i = 0; i < state->stream_count; i++) { acrtc = get_crtc_by_otg_inst( adev, state->stream_status[i].primary_otg_inst); if (acrtc && state->stream_status[i].plane_count != 0) { irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; if (rc) DRM_WARN("Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); if (enable) { if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); } else rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); if (rc) DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; /* During gpu-reset we disable and then enable vblank irq, so * don't use amdgpu_irq_get/put() to avoid refcount change. */ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); } } } static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) { struct dc_state *context = NULL; enum dc_status res = DC_ERROR_UNEXPECTED; int i; struct dc_stream_state *del_streams[MAX_PIPES]; int del_streams_count = 0; memset(del_streams, 0, sizeof(del_streams)); context = dc_create_state(dc); if (context == NULL) goto context_alloc_fail; dc_resource_state_copy_construct_current(dc, context); /* First remove from context all streams */ for (i = 0; i < context->stream_count; i++) { struct dc_stream_state *stream = context->streams[i]; del_streams[del_streams_count++] = stream; } /* Remove all planes for removed streams and then remove the streams */ for (i = 0; i < del_streams_count; i++) { if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; goto fail; } res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); if (res != DC_OK) goto fail; } res = dc_commit_streams(dc, context->streams, context->stream_count); fail: dc_release_state(context); context_alloc_fail: return res; } static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) { int i; if (dm->hpd_rx_offload_wq) { for (i = 0; i < dm->dc->caps.max_links; i++) flush_workqueue(dm->hpd_rx_offload_wq[i].wq); } } static int dm_suspend(void *handle) { struct amdgpu_device *adev = handle; struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; if (amdgpu_in_reset(adev)) { mutex_lock(&dm->dc_lock); dc_allow_idle_optimizations(adev->dm.dc, false); dm->cached_dc_state = dc_copy_state(dm->dc->current_state); dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); amdgpu_dm_commit_zero_streams(dm->dc); amdgpu_dm_irq_suspend(adev); hpd_rx_irq_work_suspend(dm); return ret; } WARN_ON(adev->dm.cached_state); adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); s3_handle_mst(adev_to_drm(adev), true); amdgpu_dm_irq_suspend(adev); hpd_rx_irq_work_suspend(dm); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); return 0; } struct amdgpu_dm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc) { u32 i; struct drm_connector_state *new_con_state; struct drm_connector *connector; struct drm_crtc *crtc_from_state; for_each_new_connector_in_state(state, connector, new_con_state, i) { crtc_from_state = new_con_state->crtc; if (crtc_from_state == crtc) return to_amdgpu_dm_connector(connector); } return NULL; } static void emulated_link_detect(struct dc_link *link) { struct dc_sink_init_data sink_init_data = { 0 }; struct display_sink_capability sink_caps = { 0 }; enum dc_edid_status edid_status; struct dc_context *dc_ctx = link->ctx; struct dc_sink *sink = NULL; struct dc_sink *prev_sink = NULL; link->type = dc_connection_none; prev_sink = link->local_sink; if (prev_sink) dc_sink_release(prev_sink); switch (link->connector_signal) { case SIGNAL_TYPE_HDMI_TYPE_A: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; break; } case SIGNAL_TYPE_DVI_SINGLE_LINK: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; break; } case SIGNAL_TYPE_DVI_DUAL_LINK: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; break; } case SIGNAL_TYPE_LVDS: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_LVDS; break; } case SIGNAL_TYPE_EDP: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; } case SIGNAL_TYPE_DISPLAY_PORT: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_VIRTUAL; break; } default: DC_ERROR("Invalid connector type! signal:%d\n", link->connector_signal); return; } sink_init_data.link = link; sink_init_data.sink_signal = sink_caps.signal; sink = dc_sink_create(&sink_init_data); if (!sink) { DC_ERROR("Failed to create sink!\n"); return; } /* dc_sink_create returns a new reference */ link->local_sink = sink; edid_status = dm_helpers_read_local_edid( link->ctx, link, sink); if (edid_status != EDID_OK) DC_ERROR("Failed to read EDID"); } static void dm_gpureset_commit_state(struct dc_state *dc_state, struct amdgpu_display_manager *dm) { struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; struct dc_scaling_info scaling_infos[MAX_SURFACES]; struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_stream_update stream_update; } *bundle; int k, m; bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { dm_error("Failed to allocate update bundle\n"); goto cleanup; } for (k = 0; k < dc_state->stream_count; k++) { bundle->stream_update.stream = dc_state->streams[k]; for (m = 0; m < dc_state->stream_status->plane_count; m++) { bundle->surface_updates[m].surface = dc_state->stream_status->plane_states[m]; bundle->surface_updates[m].surface->force_full_update = true; } update_planes_and_stream_adapter(dm->dc, UPDATE_TYPE_FULL, dc_state->stream_status->plane_count, dc_state->streams[k], &bundle->stream_update, bundle->surface_updates); } cleanup: kfree(bundle); } static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; struct drm_device *ddev = adev_to_drm(adev); struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state; struct drm_plane *plane; struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; int i, r, j, ret; bool need_hotplug = false; if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; /* * The dc->current_state is backed up into dm->cached_dc_state * before we commit 0 streams. * * DC will clear link encoder assignments on the real state * but the changes won't propagate over to the copy we made * before the 0 streams commit. * * DC expects that link encoder assignments are *not* valid * when committing a state, so as a workaround we can copy * off of the current state. * * We lose the previous assignments, but we had already * commit 0 streams anyway. */ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); dc_resume(dm->dc); amdgpu_dm_irq_resume_early(adev); for (i = 0; i < dc_state->stream_count; i++) { dc_state->streams[i]->mode_changed = true; for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { dc_state->stream_status[i].plane_states[j]->update_flags.raw = 0xffffffff; } } if (dc_is_dmub_outbox_supported(adev->dm.dc)) { amdgpu_dm_outbox_init(adev); dc_enable_dmub_outbox(adev->dm.dc); } WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); dm_gpureset_commit_state(dm->cached_dc_state, dm); dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); dc_release_state(dm->cached_dc_state); dm->cached_dc_state = NULL; amdgpu_dm_irq_resume_late(adev); mutex_unlock(&dm->dc_lock); return 0; } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_release_state(dm_state->context); dm_state->context = dc_create_state(dm->dc); /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); /* Before powering on DC we need to re-initialize DMUB. */ dm_dmub_hw_resume(adev); /* Re-enable outbox interrupts for DPIA. */ if (dc_is_dmub_outbox_supported(adev->dm.dc)) { amdgpu_dm_outbox_init(adev); dc_enable_dmub_outbox(adev->dm.dc); } /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); /* program HPD filter */ dc_resume(dm->dc); /* * early enable HPD Rx IRQ, should be done before set mode as short * pulse interrupts are used for MST */ amdgpu_dm_irq_resume_early(adev); /* On resume we need to rewrite the MSTM control bits to enable MST*/ s3_handle_mst(ddev, false); /* Do detection*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->dc_link) continue; /* * this is the case when traversing through already created end sink * MST connectors, should be skipped */ if (aconnector && aconnector->mst_root) continue; mutex_lock(&aconnector->hpd_lock); if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); } else { mutex_lock(&dm->dc_lock); dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); mutex_unlock(&dm->dc_lock); } if (aconnector->fake_enable && aconnector->dc_link->local_sink) aconnector->fake_enable = false; if (aconnector->dc_sink) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); } drm_connector_list_iter_end(&iter); /* Force mode set in atomic commit */ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) new_crtc_state->active_changed = true; /* * atomic_check is expected to create the dc states. We need to release * them here, since they were duplicated as part of the suspend * procedure. */ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) { WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); dc_stream_release(dm_new_crtc_state->stream); dm_new_crtc_state->stream = NULL; } } for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { dm_new_plane_state = to_dm_plane_state(new_plane_state); if (dm_new_plane_state->dc_state) { WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); dc_plane_state_release(dm_new_plane_state->dc_state); dm_new_plane_state->dc_state = NULL; } } drm_atomic_helper_resume(ddev, dm->cached_state); dm->cached_state = NULL; /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) continue; ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); if (ret < 0) { dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, aconnector->dc_link); need_hotplug = true; } } drm_connector_list_iter_end(&iter); if (need_hotplug) drm_kms_helper_hotplug_event(ddev); amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); return 0; } /** * DOC: DM Lifecycle * * DM (and consequently DC) is registered in the amdgpu base driver as a IP * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to * the base driver's device list to be initialized and torn down accordingly. * * The functions to do so are provided as hooks in &struct amd_ip_funcs. */ static const struct amd_ip_funcs amdgpu_dm_funcs = { .name = "dm", .early_init = dm_early_init, .late_init = dm_late_init, .sw_init = dm_sw_init, .sw_fini = dm_sw_fini, .early_fini = amdgpu_dm_early_fini, .hw_init = dm_hw_init, .hw_fini = dm_hw_fini, .suspend = dm_suspend, .resume = dm_resume, .is_idle = dm_is_idle, .wait_for_idle = dm_wait_for_idle, .check_soft_reset = dm_check_soft_reset, .soft_reset = dm_soft_reset, .set_clockgating_state = dm_set_clockgating_state, .set_powergating_state = dm_set_powergating_state, }; const struct amdgpu_ip_block_version dm_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 1, .minor = 0, .rev = 0, .funcs = &amdgpu_dm_funcs, }; /** * DOC: atomic * * *WIP* */ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, .get_format_info = amdgpu_dm_plane_get_format_info, .atomic_check = amdgpu_dm_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, }; static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { struct amdgpu_dm_backlight_caps *caps; struct drm_connector *conn_base; struct amdgpu_device *adev; struct drm_luminance_range_info *luminance_range; if (aconnector->bl_idx == -1 || aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) return; conn_base = &aconnector->base; adev = drm_to_adev(conn_base->dev); caps = &adev->dm.backlight_caps[aconnector->bl_idx]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; if (caps->ext_caps->bits.oled == 1 /* * || * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || * caps->ext_caps->bits.hdr_aux_backlight_control == 1 */) caps->aux_support = true; if (amdgpu_backlight == 0) caps->aux_support = false; else if (amdgpu_backlight == 1) caps->aux_support = true; luminance_range = &conn_base->display_info.luminance_range; if (luminance_range->max_luminance) { caps->aux_min_input_signal = luminance_range->min_luminance; caps->aux_max_input_signal = luminance_range->max_luminance; } else { caps->aux_min_input_signal = 0; caps->aux_max_input_signal = 512; } } void amdgpu_dm_update_connector_after_detect( struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct dc_sink *sink; /* MST handled by drm_mst framework */ if (aconnector->mst_mgr.mst_state == true) return; sink = aconnector->dc_link->local_sink; if (sink) dc_sink_retain(sink); /* * Edid mgmt connector gets first update only in mode_valid hook and then * the connector sink is set to either fake or physical sink depends on link status. * Skip if already done during boot. */ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && aconnector->dc_em_sink) { /* * For S3 resume with headless use eml_sink to fake stream * because on resume connector->sink is set to NULL */ mutex_lock(&dev->mode_config.mutex); if (sink) { if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps(connector, NULL); /* * retain and release below are used to * bump up refcount for sink because the link doesn't point * to it anymore after disconnect, so on next crtc to connector * reshuffle by UMD we will get into unwanted dc_sink release */ dc_sink_release(aconnector->dc_sink); } aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); if (!aconnector->dc_sink) { aconnector->dc_sink = aconnector->dc_em_sink; dc_sink_retain(aconnector->dc_sink); } } mutex_unlock(&dev->mode_config.mutex); if (sink) dc_sink_release(sink); return; } /* * TODO: temporary guard to look for proper fix * if this sink is MST sink, we should not do anything */ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { dc_sink_release(sink); return; } if (aconnector->dc_sink == sink) { /* * We got a DP short pulse (Link Loss, DP CTS, etc...). * Do nothing!! */ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", aconnector->connector_id); if (sink) dc_sink_release(sink); return; } DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", aconnector->connector_id, aconnector->dc_sink, sink); mutex_lock(&dev->mode_config.mutex); /* * 1. Update status of the drm connector * 2. Send an event and let userspace tell us what to do */ if (sink) { /* * TODO: check if we still need the S3 mode update workaround. * If yes, put it here. */ if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps(connector, NULL); dc_sink_release(aconnector->dc_sink); } aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { aconnector->edid = NULL; if (aconnector->dc_link->aux_mode) { drm_dp_cec_unset_edid( &aconnector->dm_dp_aux.aux); } } else { aconnector->edid = (struct edid *)sink->dc_edid.raw_edid; if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, aconnector->edid); } if (!aconnector->timing_requested) { aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); if (!aconnector->timing_requested) dm_error("failed to create aconnector->requested_timing\n"); } drm_connector_update_edid_property(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); update_connector_ext_caps(aconnector); } else { drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); amdgpu_dm_update_freesync_caps(connector, NULL); drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; kfree(aconnector->timing_requested); aconnector->timing_requested = NULL; /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; } mutex_unlock(&dev->mode_config.mutex); update_subconnector_property(aconnector); if (sink) dc_sink_release(sink); } static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); bool ret = false; if (adev->dm.disable_hpd_irq) return; /* * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. */ mutex_lock(&aconnector->hpd_lock); if (adev->dm.hdcp_workqueue) { hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); dm_con_state->update_hdcp = true; } if (aconnector->fake_enable) aconnector->fake_enable = false; aconnector->timing_changed = false; if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) drm_kms_helper_connector_hotplug_event(connector); } else { mutex_lock(&adev->dm.dc_lock); ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); mutex_unlock(&adev->dm.dc_lock); if (ret) { amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) drm_kms_helper_connector_hotplug_event(connector); } } mutex_unlock(&aconnector->hpd_lock); } static void handle_hpd_irq(void *param) { struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; handle_hpd_irq_helper(aconnector); } static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, union hpd_irq_data hpd_irq_data) { struct hpd_rx_irq_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); if (!offload_work) { DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); return; } INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); offload_work->data = hpd_irq_data; offload_work->offload_wq = offload_wq; queue_work(offload_wq->wq, &offload_work->work); DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); } static void handle_hpd_rx_irq(void *param) { struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; bool result = false; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); union hpd_irq_data hpd_irq_data; bool link_loss = false; bool has_left_work = false; int idx = dc_link->link_index; struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); if (adev->dm.disable_hpd_irq) return; /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio * conflict, after implement i2c helper, this mutex should be * retired. */ mutex_lock(&aconnector->hpd_lock); result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, &link_loss, true, &has_left_work); if (!has_left_work) goto out; if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); goto out; } if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { bool skip = false; /* * DOWN_REP_MSG_RDY is also handled by polling method * mgr->cbs->poll_hpd_irq() */ spin_lock(&offload_wq->offload_lock); skip = offload_wq->is_handling_mst_msg_rdy_event; if (!skip) offload_wq->is_handling_mst_msg_rdy_event = true; spin_unlock(&offload_wq->offload_lock); if (!skip) schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); goto out; } if (link_loss) { bool skip = false; spin_lock(&offload_wq->offload_lock); skip = offload_wq->is_handling_link_loss; if (!skip) offload_wq->is_handling_link_loss = true; spin_unlock(&offload_wq->offload_lock); if (!skip) schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); goto out; } } out: if (result && !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(dc_link); if (aconnector->fake_enable) aconnector->fake_enable = false; amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); drm_kms_helper_connector_hotplug_event(connector); } else { bool ret = false; mutex_lock(&adev->dm.dc_lock); ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); mutex_unlock(&adev->dm.dc_lock); if (ret) { if (aconnector->fake_enable) aconnector->fake_enable = false; amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); drm_kms_helper_connector_hotplug_event(connector); } } } if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { if (adev->dm.hdcp_workqueue) hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); } if (dc_link->type != dc_connection_mst_branch) drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); mutex_unlock(&aconnector->hpd_lock); } static void register_hpd_handlers(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct amdgpu_dm_connector *aconnector; const struct dc_link *dc_link; struct dc_interrupt_params int_params = {0}; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { aconnector = to_amdgpu_dm_connector(connector); dc_link = aconnector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_link->irq_source_hpd; amdgpu_dm_irq_register_interrupt(adev, &int_params, handle_hpd_irq, (void *) aconnector); } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { /* Also register for DP short pulse (hpd_rx). */ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_link->irq_source_hpd_rx; amdgpu_dm_irq_register_interrupt(adev, &int_params, handle_hpd_rx_irq, (void *) aconnector); } if (adev->dm.hpd_rx_offload_wq) adev->dm.hpd_rx_offload_wq[connector->index].aconnector = aconnector; } } #if defined(CONFIG_DRM_AMD_DC_SI) /* Register IRQ sources and initialize IRQ callbacks */ static int dce60_register_irq_handlers(struct amdgpu_device *adev) { struct dc *dc = adev->dm.dc; struct common_irq_params *c_irq_params; struct dc_interrupt_params int_params = {0}; int r; int i; unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; /* * Actions of amdgpu_irq_add_id(): * 1. Register a set() function with base driver. * Base driver will call set() function to enable/disable an * interrupt in DC hardware. * 2. Register amdgpu_dm_irq_handler(). * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC * for acknowledging and handling. */ /* Use VBLANK interrupt */ for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); if (r) { DRM_ERROR("Failed to add crtc irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i + 1, 0); c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_crtc_high_irq, c_irq_params); } /* Use GRPH_PFLIP interrupt */ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); if (r) { DRM_ERROR("Failed to add page flip irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_pflip_high_irq, c_irq_params); } /* HPD */ r = amdgpu_irq_add_id(adev, client_id, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) { DRM_ERROR("Failed to add hpd irq id!\n"); return r; } register_hpd_handlers(adev); return 0; } #endif /* Register IRQ sources and initialize IRQ callbacks */ static int dce110_register_irq_handlers(struct amdgpu_device *adev) { struct dc *dc = adev->dm.dc; struct common_irq_params *c_irq_params; struct dc_interrupt_params int_params = {0}; int r; int i; unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; if (adev->family >= AMDGPU_FAMILY_AI) client_id = SOC15_IH_CLIENTID_DCE; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; /* * Actions of amdgpu_irq_add_id(): * 1. Register a set() function with base driver. * Base driver will call set() function to enable/disable an * interrupt in DC hardware. * 2. Register amdgpu_dm_irq_handler(). * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC * for acknowledging and handling. */ /* Use VBLANK interrupt */ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); if (r) { DRM_ERROR("Failed to add crtc irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_crtc_high_irq, c_irq_params); } /* Use VUPDATE interrupt */ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); if (r) { DRM_ERROR("Failed to add vupdate irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_vupdate_high_irq, c_irq_params); } /* Use GRPH_PFLIP interrupt */ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); if (r) { DRM_ERROR("Failed to add page flip irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_pflip_high_irq, c_irq_params); } /* HPD */ r = amdgpu_irq_add_id(adev, client_id, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) { DRM_ERROR("Failed to add hpd irq id!\n"); return r; } register_hpd_handlers(adev); return 0; } /* Register IRQ sources and initialize IRQ callbacks */ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) { struct dc *dc = adev->dm.dc; struct common_irq_params *c_irq_params; struct dc_interrupt_params int_params = {0}; int r; int i; #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) static const unsigned int vrtl_int_srcid[] = { DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL }; #endif int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; /* * Actions of amdgpu_irq_add_id(): * 1. Register a set() function with base driver. * Base driver will call set() function to enable/disable an * interrupt in DC hardware. * 2. Register amdgpu_dm_irq_handler(). * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC * for acknowledging and handling. */ /* Use VSTARTUP interrupt */ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); if (r) { DRM_ERROR("Failed to add crtc irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt( adev, &int_params, dm_crtc_high_irq, c_irq_params); } /* Use otg vertical line interrupt */ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, vrtl_int_srcid[i], &adev->vline0_irq); if (r) { DRM_ERROR("Failed to add vline0 irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); break; } c_irq_params = &adev->dm.vline0_params[int_params.irq_source - DC_IRQ_SOURCE_DC1_VLINE0]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_dcn_vertical_interrupt0_high_irq, c_irq_params); } #endif /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx * to trigger at end of each vblank, regardless of state of the lock, * matching DCE behaviour. */ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); if (r) { DRM_ERROR("Failed to add vupdate irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_vupdate_high_irq, c_irq_params); } /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { DRM_ERROR("Failed to add page flip irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_pflip_high_irq, c_irq_params); } /* HPD */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, &adev->hpd_irq); if (r) { DRM_ERROR("Failed to add hpd irq id!\n"); return r; } register_hpd_handlers(adev); return 0; } /* Register Outbox IRQ sources and initialize IRQ callbacks */ static int register_outbox_irq_handlers(struct amdgpu_device *adev) { struct dc *dc = adev->dm.dc; struct common_irq_params *c_irq_params; struct dc_interrupt_params int_params = {0}; int r, i; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, &adev->dmub_outbox_irq); if (r) { DRM_ERROR("Failed to add outbox irq id!\n"); return r; } if (dc->ctx->dmub_srv) { i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); c_irq_params = &adev->dm.dmub_outbox_params[0]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, dm_dmub_outbox1_low_irq, c_irq_params); } return 0; } /* * Acquires the lock for the atomic state object and returns * the new atomic state. * * This should only be called during atomic check. */ int dm_atomic_get_state(struct drm_atomic_state *state, struct dm_atomic_state **dm_state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; struct drm_private_state *priv_state; if (*dm_state) return 0; priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); if (IS_ERR(priv_state)) return PTR_ERR(priv_state); *dm_state = to_dm_atomic_state(priv_state); return 0; } static struct dm_atomic_state * dm_atomic_get_new_state(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; struct drm_private_obj *obj; struct drm_private_state *new_obj_state; int i; for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { if (obj->funcs == dm->atomic_obj.funcs) return to_dm_atomic_state(new_obj_state); } return NULL; } static struct drm_private_state * dm_atomic_duplicate_state(struct drm_private_obj *obj) { struct dm_atomic_state *old_state, *new_state; new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); if (!new_state) return NULL; __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); old_state = to_dm_atomic_state(obj->state); if (old_state && old_state->context) new_state->context = dc_copy_state(old_state->context); if (!new_state->context) { kfree(new_state); return NULL; } return &new_state->base; } static void dm_atomic_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state) { struct dm_atomic_state *dm_state = to_dm_atomic_state(state); if (dm_state && dm_state->context) dc_release_state(dm_state->context); kfree(dm_state); } static struct drm_private_state_funcs dm_atomic_state_funcs = { .atomic_duplicate_state = dm_atomic_duplicate_state, .atomic_destroy_state = dm_atomic_destroy_state, }; static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) { struct dm_atomic_state *state; int r; adev->mode_info.mode_config_initialized = true; adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; adev_to_drm(adev)->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; if (adev->asic_type == CHIP_HAWAII) /* disable prefer shadow for now due to hibernation issues */ adev_to_drm(adev)->mode_config.prefer_shadow = 0; else adev_to_drm(adev)->mode_config.prefer_shadow = 1; /* indicates support for immediate flip */ adev_to_drm(adev)->mode_config.async_page_flip = true; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; state->context = dc_create_state(adev->dm.dc); if (!state->context) { kfree(state); return -ENOMEM; } dc_resource_state_copy_construct_current(adev->dm.dc, state->context); drm_atomic_private_obj_init(adev_to_drm(adev), &adev->dm.atomic_obj, &state->base, &dm_atomic_state_funcs); r = amdgpu_display_modeset_create_props(adev); if (r) { dc_release_state(state->context); kfree(state); return r; } r = amdgpu_dm_audio_init(adev); if (r) { dc_release_state(state->context); kfree(state); return r; } return 0; } #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, int bl_idx) { #if defined(CONFIG_ACPI) struct amdgpu_dm_backlight_caps caps; memset(&caps, 0, sizeof(caps)); if (dm->backlight_caps[bl_idx].caps_valid) return; amdgpu_acpi_get_backlight_caps(&caps); if (caps.caps_valid) { dm->backlight_caps[bl_idx].caps_valid = true; if (caps.aux_support) return; dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; } else { dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; } #else if (dm->backlight_caps[bl_idx].aux_support) return; dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; #endif } static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, unsigned int *min, unsigned int *max) { if (!caps) return 0; if (caps->aux_support) { // Firmware limits are in nits, DC API wants millinits. *max = 1000 * caps->aux_max_input_signal; *min = 1000 * caps->aux_min_input_signal; } else { // Firmware limits are 8-bit, PWM control is 16-bit. *max = 0x101 * caps->max_input_signal; *min = 0x101 * caps->min_input_signal; } return 1; } static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { unsigned int min, max; if (!get_brightness_range(caps, &min, &max)) return brightness; // Rescale 0..255 to min..max return min + DIV_ROUND_CLOSEST((max - min) * brightness, AMDGPU_MAX_BL_LEVEL); } static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { unsigned int min, max; if (!get_brightness_range(caps, &min, &max)) return brightness; if (brightness < min) return 0; // Rescale min..max to 0..255 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), max - min); } static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, int bl_idx, u32 user_brightness) { struct amdgpu_dm_backlight_caps caps; struct dc_link *link; u32 brightness; bool rc; amdgpu_dm_update_backlight_caps(dm, bl_idx); caps = dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; /* update scratch register */ if (bl_idx == 0) amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; /* Change brightness based on AUX property */ if (caps.aux_support) { rc = dc_link_set_backlight_level_nits(link, true, brightness, AUX_BL_DEFAULT_TRANSITION_TIME_MS); if (!rc) DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); } else { rc = dc_link_set_backlight_level(link, brightness, 0); if (!rc) DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } if (rc) dm->actual_brightness[bl_idx] = user_brightness; } static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); int i; for (i = 0; i < dm->num_of_edps; i++) { if (bd == dm->backlight_dev[i]) break; } if (i >= AMDGPU_DM_MAX_NUM_EDP) i = 0; amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); return 0; } static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, int bl_idx) { int ret; struct amdgpu_dm_backlight_caps caps; struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; amdgpu_dm_update_backlight_caps(dm, bl_idx); caps = dm->backlight_caps[bl_idx]; if (caps.aux_support) { u32 avg, peak; bool rc; rc = dc_link_get_backlight_level_nits(link, &avg, &peak); if (!rc) return dm->brightness[bl_idx]; return convert_brightness_to_user(&caps, avg); } ret = dc_link_get_backlight_level(link); if (ret == DC_ERROR_UNEXPECTED) return dm->brightness[bl_idx]; return convert_brightness_to_user(&caps, ret); } static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); int i; for (i = 0; i < dm->num_of_edps; i++) { if (bd == dm->backlight_dev[i]) break; } if (i >= AMDGPU_DM_MAX_NUM_EDP) i = 0; return amdgpu_dm_backlight_get_level(dm, i); } static const struct backlight_ops amdgpu_dm_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = amdgpu_dm_backlight_get_brightness, .update_status = amdgpu_dm_backlight_update_status, }; static void amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) { struct drm_device *drm = aconnector->base.dev; struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; struct backlight_properties props = { 0 }; char bl_name[16]; if (aconnector->bl_idx == -1) return; if (!acpi_video_backlight_use_native()) { drm_info(drm, "Skipping amdgpu DM backlight registration\n"); /* Try registering an ACPI video backlight device instead. */ acpi_video_register_backlight(); return; } props.max_brightness = AMDGPU_MAX_BL_LEVEL; props.brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", drm->primary->index + aconnector->bl_idx); dm->backlight_dev[aconnector->bl_idx] = backlight_device_register(bl_name, aconnector->base.kdev, dm, &amdgpu_dm_backlight_ops, &props); if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { DRM_ERROR("DM: Backlight registration failed!\n"); dm->backlight_dev[aconnector->bl_idx] = NULL; } else DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); } static int initialize_plane(struct amdgpu_display_manager *dm, struct amdgpu_mode_info *mode_info, int plane_id, enum drm_plane_type plane_type, const struct dc_plane_cap *plane_cap) { struct drm_plane *plane; unsigned long possible_crtcs; int ret = 0; plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); if (!plane) { DRM_ERROR("KMS: Failed to allocate plane\n"); return -ENOMEM; } plane->type = plane_type; /* * HACK: IGT tests expect that the primary plane for a CRTC * can only have one possible CRTC. Only expose support for * any CRTC if they're not going to be used as a primary plane * for a CRTC - like overlay or underlay planes. */ possible_crtcs = 1 << plane_id; if (plane_id >= dm->dc->caps.max_streams) possible_crtcs = 0xff; ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); if (ret) { DRM_ERROR("KMS: Failed to initialize plane\n"); kfree(plane); return ret; } if (mode_info) mode_info->planes[plane_id] = plane; return ret; } static void setup_backlight_device(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector) { struct dc_link *link = aconnector->dc_link; int bl_idx = dm->num_of_edps; if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || link->type == dc_connection_none) return; if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); return; } aconnector->bl_idx = bl_idx; amdgpu_dm_update_backlight_caps(dm, bl_idx); dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; dm->backlight_link[bl_idx] = link; dm->num_of_edps++; update_connector_ext_caps(aconnector); } static void amdgpu_set_panel_orientation(struct drm_connector *connector); /* * In this architecture, the association * connector -> encoder -> crtc * id not really requried. The crtc and connector will hold the * display_index as an abstraction to use with DAL component * * Returns 0 on success */ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) { struct amdgpu_display_manager *dm = &adev->dm; s32 i; struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_encoder *aencoder = NULL; struct amdgpu_mode_info *mode_info = &adev->mode_info; u32 link_cnt; s32 primary_planes; enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; bool psr_feature_enabled = false; bool replay_feature_enabled = false; int max_overlay = dm->dc->caps.max_slave_planes; dm->display_indexes_num = dm->dc->caps.max_streams; /* Update the actual used number of crtc */ adev->mode_info.num_crtc = adev->dm.display_indexes_num; amdgpu_dm_set_irq_funcs(adev); link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { DRM_ERROR("DM: Failed to initialize mode config\n"); return -EINVAL; } /* There is one primary plane per CRTC */ primary_planes = dm->dc->caps.max_streams; ASSERT(primary_planes <= AMDGPU_MAX_PLANES); /* * Initialize primary planes, implicit planes for legacy IOCTLS. * Order is reversed to match iteration order in atomic check. */ for (i = (primary_planes - 1); i >= 0; i--) { plane = &dm->dc->caps.planes[i]; if (initialize_plane(dm, mode_info, i, DRM_PLANE_TYPE_PRIMARY, plane)) { DRM_ERROR("KMS: Failed to initialize primary plane\n"); goto fail; } } /* * Initialize overlay planes, index starting after primary planes. * These planes have a higher DRM index than the primary planes since * they should be considered as having a higher z-order. * Order is reversed to match iteration order in atomic check. * * Only support DCN for now, and only expose one so we don't encourage * userspace to use up all the pipes. */ for (i = 0; i < dm->dc->caps.max_planes; ++i) { struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; /* Do not create overlay if MPO disabled */ if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) break; if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) continue; if (!plane->pixel_format_support.argb8888) continue; if (max_overlay-- == 0) break; if (initialize_plane(dm, NULL, primary_planes + i, DRM_PLANE_TYPE_OVERLAY, plane)) { DRM_ERROR("KMS: Failed to initialize overlay plane\n"); goto fail; } } for (i = 0; i < dm->dc->caps.max_streams; i++) if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { DRM_ERROR("KMS: Failed to initialize crtc\n"); goto fail; } /* Use Outbox interrupt */ switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(2, 1, 0): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; } break; default: DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", adev->ip_versions[DCE_HWIP][0]); } /* Determine whether to enable PSR support by default. */ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): psr_feature_enabled = true; break; default: psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; break; } } if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): replay_feature_enabled = true; break; default: replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; break; } } /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { DRM_ERROR( "KMS: Cannot support more than %d display indexes\n", AMDGPU_DM_MAX_DISPLAY_INDEX); continue; } aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) goto fail; aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); if (!aencoder) goto fail; if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { DRM_ERROR("KMS: Failed to initialize encoder\n"); goto fail; } if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { DRM_ERROR("KMS: Failed to initialize connector\n"); goto fail; } link = dc_get_link_at_index(dm->dc, i); if (!dc_link_detect_connection_type(link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(link); amdgpu_dm_update_connector_after_detect(aconnector); } else { bool ret = false; mutex_lock(&dm->dc_lock); ret = dc_link_detect(link, DETECT_REASON_BOOT); mutex_unlock(&dm->dc_lock); if (ret) { amdgpu_dm_update_connector_after_detect(aconnector); setup_backlight_device(dm, aconnector); /* * Disable psr if replay can be enabled */ if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector)) psr_feature_enabled = false; if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); /* TODO: Fix vblank control helpers to delay PSR entry to allow this when * PSR is also supported. */ if (link->psr_settings.psr_feature_enabled) adev_to_drm(adev)->vblank_disable_immediate = false; } } amdgpu_set_panel_orientation(&aconnector->base); } /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: if (dce60_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; } break; #endif case CHIP_BONAIRE: case CHIP_HAWAII: case CHIP_KAVERI: case CHIP_KABINI: case CHIP_MULLINS: case CHIP_TONGA: case CHIP_FIJI: case CHIP_CARRIZO: case CHIP_STONEY: case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: case CHIP_VEGAM: case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: if (dce110_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; } break; default: switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 3): case IP_VERSION(2, 0, 0): case IP_VERSION(2, 1, 0): case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 2): case IP_VERSION(3, 0, 3): case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; } break; default: DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", adev->ip_versions[DCE_HWIP][0]); goto fail; } break; } return 0; fail: kfree(aencoder); kfree(aconnector); return -EINVAL; } static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { drm_atomic_private_obj_fini(&dm->atomic_obj); } /****************************************************************************** * amdgpu_display_funcs functions *****************************************************************************/ /* * dm_bandwidth_update - program display watermarks * * @adev: amdgpu_device pointer * * Calculate and program the display watermarks and line buffer allocation. */ static void dm_bandwidth_update(struct amdgpu_device *adev) { /* TODO: implement later */ } static const struct amdgpu_display_funcs dm_display_funcs = { .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ .backlight_set_level = NULL, /* never called for DC */ .backlight_get_level = NULL, /* never called for DC */ .hpd_sense = NULL,/* called unconditionally */ .hpd_set_polarity = NULL, /* called unconditionally */ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ .page_flip_get_scanoutpos = dm_crtc_get_scanoutpos,/* called unconditionally */ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ .add_connector = NULL, /* VBIOS parsing. DAL does it. */ }; #if defined(CONFIG_DEBUG_KERNEL_DC) static ssize_t s3_debug_store(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { int ret; int s3_state; struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_to_adev(drm_dev); ret = kstrtoint(buf, 0, &s3_state); if (ret == 0) { if (s3_state) { dm_resume(adev); drm_kms_helper_hotplug_event(adev_to_drm(adev)); } else dm_suspend(adev); } return ret == 0 ? count : 0; } DEVICE_ATTR_WO(s3_debug); #endif static int dm_init_microcode(struct amdgpu_device *adev) { char *fw_name_dmub; int r; switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 1, 0): fw_name_dmub = FIRMWARE_RENOIR_DMUB; if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; break; case IP_VERSION(3, 0, 0): if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; else fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; break; case IP_VERSION(3, 0, 1): fw_name_dmub = FIRMWARE_VANGOGH_DMUB; break; case IP_VERSION(3, 0, 2): fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; break; case IP_VERSION(3, 0, 3): fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; break; case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; break; case IP_VERSION(3, 1, 4): fw_name_dmub = FIRMWARE_DCN_314_DMUB; break; case IP_VERSION(3, 1, 5): fw_name_dmub = FIRMWARE_DCN_315_DMUB; break; case IP_VERSION(3, 1, 6): fw_name_dmub = FIRMWARE_DCN316_DMUB; break; case IP_VERSION(3, 2, 0): fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; break; case IP_VERSION(3, 2, 1): fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; default: /* ASIC doesn't support DMUB. */ return 0; } r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); if (r) DRM_ERROR("DMUB firmware loading failed: %d\n", r); return r; } static int dm_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_mode_info *mode_info = &adev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); u16 data_offset; /* if there is no object header, skip DM */ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; dev_info(adev->dev, "No object header, skipping DM\n"); return -ENOENT; } switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_OLAND: adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 2; adev->mode_info.num_dig = 2; break; #endif case CHIP_BONAIRE: case CHIP_HAWAII: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_KAVERI: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; case CHIP_KABINI: case CHIP_MULLINS: adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_FIJI: case CHIP_TONGA: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; case CHIP_CARRIZO: adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_STONEY: adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_POLARIS11: case CHIP_POLARIS12: adev->mode_info.num_crtc = 5; adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; case CHIP_POLARIS10: case CHIP_VEGAM: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; default: switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 0, 2): case IP_VERSION(3, 0, 0): adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case IP_VERSION(2, 0, 0): case IP_VERSION(3, 0, 2): adev->mode_info.num_crtc = 5; adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; case IP_VERSION(2, 0, 3): case IP_VERSION(3, 0, 3): adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 2; adev->mode_info.num_dig = 2; break; case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): case IP_VERSION(3, 0, 1): case IP_VERSION(2, 1, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; break; default: DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", adev->ip_versions[DCE_HWIP][0]); return -EINVAL; } break; } if (adev->mode_info.funcs == NULL) adev->mode_info.funcs = &dm_display_funcs; /* * Note: Do NOT change adev->audio_endpt_rreg and * adev->audio_endpt_wreg because they are initialised in * amdgpu_device_init() */ #if defined(CONFIG_DEBUG_KERNEL_DC) device_create_file( adev_to_drm(adev)->dev, &dev_attr_s3_debug); #endif adev->dc_enabled = true; return dm_init_microcode(adev); } static bool modereset_required(struct drm_crtc_state *crtc_state) { return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); } static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); kfree(encoder); } static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { .destroy = amdgpu_dm_encoder_destroy, }; static int fill_plane_color_attributes(const struct drm_plane_state *plane_state, const enum surface_pixel_format format, enum dc_color_space *color_space) { bool full_range; *color_space = COLOR_SPACE_SRGB; /* DRM color properties only affect non-RGB formats. */ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return 0; full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); switch (plane_state->color_encoding) { case DRM_COLOR_YCBCR_BT601: if (full_range) *color_space = COLOR_SPACE_YCBCR601; else *color_space = COLOR_SPACE_YCBCR601_LIMITED; break; case DRM_COLOR_YCBCR_BT709: if (full_range) *color_space = COLOR_SPACE_YCBCR709; else *color_space = COLOR_SPACE_YCBCR709_LIMITED; break; case DRM_COLOR_YCBCR_BT2020: if (full_range) *color_space = COLOR_SPACE_2020_YCBCR; else return -EINVAL; break; default: return -EINVAL; } return 0; } static int fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const struct drm_plane_state *plane_state, const u64 tiling_flags, struct dc_plane_info *plane_info, struct dc_plane_address *address, bool tmz_surface, bool force_disable_dcc) { const struct drm_framebuffer *fb = plane_state->fb; const struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane_state->fb); int ret; memset(plane_info, 0, sizeof(*plane_info)); switch (fb->format->format) { case DRM_FORMAT_C8: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; break; case DRM_FORMAT_RGB565: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; break; case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ABGR2101010: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; break; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; break; case DRM_FORMAT_NV21: plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; break; case DRM_FORMAT_NV12: plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; break; case DRM_FORMAT_P010: plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; break; case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; break; case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; break; case DRM_FORMAT_XRGB16161616: case DRM_FORMAT_ARGB16161616: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; break; case DRM_FORMAT_XBGR16161616: case DRM_FORMAT_ABGR16161616: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; break; default: DRM_ERROR( "Unsupported screen format %p4cc\n", &fb->format->format); return -EINVAL; } switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { case DRM_MODE_ROTATE_0: plane_info->rotation = ROTATION_ANGLE_0; break; case DRM_MODE_ROTATE_90: plane_info->rotation = ROTATION_ANGLE_90; break; case DRM_MODE_ROTATE_180: plane_info->rotation = ROTATION_ANGLE_180; break; case DRM_MODE_ROTATE_270: plane_info->rotation = ROTATION_ANGLE_270; break; default: plane_info->rotation = ROTATION_ANGLE_0; break; } plane_info->visible = true; plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; plane_info->layer_index = plane_state->normalized_zpos; ret = fill_plane_color_attributes(plane_state, plane_info->format, &plane_info->color_space); if (ret) return ret; ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, plane_info->rotation, tiling_flags, &plane_info->tiling_info, &plane_info->plane_size, &plane_info->dcc, address, tmz_surface, force_disable_dcc); if (ret) return ret; amdgpu_dm_plane_fill_blending_from_plane_state( plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, &plane_info->global_alpha, &plane_info->global_alpha_value); return 0; } static int fill_dc_plane_attributes(struct amdgpu_device *adev, struct dc_plane_state *dc_plane_state, struct drm_plane_state *plane_state, struct drm_crtc_state *crtc_state) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; struct dc_scaling_info scaling_info; struct dc_plane_info plane_info; int ret; bool force_disable_dcc = false; ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); if (ret) return ret; dc_plane_state->src_rect = scaling_info.src_rect; dc_plane_state->dst_rect = scaling_info.dst_rect; dc_plane_state->clip_rect = scaling_info.clip_rect; dc_plane_state->scaling_quality = scaling_info.scaling_quality; force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; ret = fill_dc_plane_info_and_addr(adev, plane_state, afb->tiling_flags, &plane_info, &dc_plane_state->address, afb->tmz_surface, force_disable_dcc); if (ret) return ret; dc_plane_state->format = plane_info.format; dc_plane_state->color_space = plane_info.color_space; dc_plane_state->format = plane_info.format; dc_plane_state->plane_size = plane_info.plane_size; dc_plane_state->rotation = plane_info.rotation; dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; dc_plane_state->stereo_format = plane_info.stereo_format; dc_plane_state->tiling_info = plane_info.tiling_info; dc_plane_state->visible = plane_info.visible; dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; dc_plane_state->global_alpha = plane_info.global_alpha; dc_plane_state->global_alpha_value = plane_info.global_alpha_value; dc_plane_state->dcc = plane_info.dcc; dc_plane_state->layer_index = plane_info.layer_index; dc_plane_state->flip_int_enabled = true; /* * Always set input transfer function, since plane state is refreshed * every time. */ ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); if (ret) return ret; return 0; } static inline void fill_dc_dirty_rect(struct drm_plane *plane, struct rect *dirty_rect, int32_t x, s32 y, s32 width, s32 height, int *i, bool ffu) { WARN_ON(*i >= DC_MAX_DIRTY_RECTS); dirty_rect->x = x; dirty_rect->y = y; dirty_rect->width = width; dirty_rect->height = height; if (ffu) drm_dbg(plane->dev, "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", plane->base.id, width, height); else drm_dbg(plane->dev, "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", plane->base.id, x, y, width, height); (*i)++; } /** * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates * * @plane: DRM plane containing dirty regions that need to be flushed to the eDP * remote fb * @old_plane_state: Old state of @plane * @new_plane_state: New state of @plane * @crtc_state: New state of CRTC connected to the @plane * @flip_addrs: DC flip tracking struct, which also tracts dirty rects * @dirty_regions_changed: dirty regions changed * * For PSR SU, DC informs the DMUB uController of dirty rectangle regions * (referred to as "damage clips" in DRM nomenclature) that require updating on * the eDP remote buffer. The responsibility of specifying the dirty regions is * amdgpu_dm's. * * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the * plane with regions that require flushing to the eDP remote buffer. In * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - * implicitly provide damage clips without any client support via the plane * bounds. */ static void fill_dc_dirty_rects(struct drm_plane *plane, struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state, struct drm_crtc_state *crtc_state, struct dc_flip_addrs *flip_addrs, bool *dirty_regions_changed) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); struct rect *dirty_rects = flip_addrs->dirty_rects; u32 num_clips; struct drm_mode_rect *clips; bool bb_changed; bool fb_changed; u32 i = 0; *dirty_regions_changed = false; /* * Cursor plane has it's own dirty rect update interface. See * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data */ if (plane->type == DRM_PLANE_TYPE_CURSOR) return; num_clips = drm_plane_get_damage_clips_count(new_plane_state); clips = drm_plane_get_damage_clips(new_plane_state); if (!dm_crtc_state->mpo_requested) { if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) goto ffu; for (; flip_addrs->dirty_rect_count < num_clips; clips++) fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[flip_addrs->dirty_rect_count], clips->x1, clips->y1, clips->x2 - clips->x1, clips->y2 - clips->y1, &flip_addrs->dirty_rect_count, false); return; } /* * MPO is requested. Add entire plane bounding box to dirty rects if * flipped to or damaged. * * If plane is moved or resized, also add old bounding box to dirty * rects. */ fb_changed = old_plane_state->fb->base.id != new_plane_state->fb->base.id; bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || old_plane_state->crtc_y != new_plane_state->crtc_y || old_plane_state->crtc_w != new_plane_state->crtc_w || old_plane_state->crtc_h != new_plane_state->crtc_h); drm_dbg(plane->dev, "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", new_plane_state->plane->base.id, bb_changed, fb_changed, num_clips); *dirty_regions_changed = bb_changed; if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) goto ffu; if (bb_changed) { fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], new_plane_state->crtc_x, new_plane_state->crtc_y, new_plane_state->crtc_w, new_plane_state->crtc_h, &i, false); /* Add old plane bounding-box if plane is moved or resized */ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], old_plane_state->crtc_x, old_plane_state->crtc_y, old_plane_state->crtc_w, old_plane_state->crtc_h, &i, false); } if (num_clips) { for (; i < num_clips; clips++) fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], clips->x1, clips->y1, clips->x2 - clips->x1, clips->y2 - clips->y1, &i, false); } else if (fb_changed && !bb_changed) { fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], new_plane_state->crtc_x, new_plane_state->crtc_y, new_plane_state->crtc_w, new_plane_state->crtc_h, &i, false); } flip_addrs->dirty_rect_count = i; return; ffu: fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, dm_crtc_state->base.mode.crtc_hdisplay, dm_crtc_state->base.mode.crtc_vdisplay, &flip_addrs->dirty_rect_count, true); } static void update_stream_scaling_settings(const struct drm_display_mode *mode, const struct dm_connector_state *dm_state, struct dc_stream_state *stream) { enum amdgpu_rmx_type rmx_type; struct rect src = { 0 }; /* viewport in composition space*/ struct rect dst = { 0 }; /* stream addressable area */ /* no mode. nothing to be done */ if (!mode) return; /* Full screen scaling by default */ src.width = mode->hdisplay; src.height = mode->vdisplay; dst.width = stream->timing.h_addressable; dst.height = stream->timing.v_addressable; if (dm_state) { rmx_type = dm_state->scaling; if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { if (src.width * dst.height < src.height * dst.width) { /* height needs less upscaling/more downscaling */ dst.width = src.width * dst.height / src.height; } else { /* width needs less upscaling/more downscaling */ dst.height = src.height * dst.width / src.width; } } else if (rmx_type == RMX_CENTER) { dst = src; } dst.x = (stream->timing.h_addressable - dst.width) / 2; dst.y = (stream->timing.v_addressable - dst.height) / 2; if (dm_state->underscan_enable) { dst.x += dm_state->underscan_hborder / 2; dst.y += dm_state->underscan_vborder / 2; dst.width -= dm_state->underscan_hborder; dst.height -= dm_state->underscan_vborder; } } stream->src = src; stream->dst = dst; DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", dst.x, dst.y, dst.width, dst.height); } static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector, bool is_y420, int requested_bpc) { u8 bpc; if (is_y420) { bpc = 8; /* Cap display bpc based on HDMI 2.0 HF-VSDB */ if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) bpc = 16; else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) bpc = 12; else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) bpc = 10; } else { bpc = (uint8_t)connector->display_info.bpc; /* Assume 8 bpc by default if no bpc is specified. */ bpc = bpc ? bpc : 8; } if (requested_bpc > 0) { /* * Cap display bpc based on the user requested value. * * The value for state->max_bpc may not correctly updated * depending on when the connector gets added to the state * or if this was called outside of atomic check, so it * can't be used directly. */ bpc = min_t(u8, bpc, requested_bpc); /* Round down to the nearest even number. */ bpc = bpc - (bpc & 1); } switch (bpc) { case 0: /* * Temporary Work around, DRM doesn't parse color depth for * EDID revision before 1.4 * TODO: Fix edid parsing */ return COLOR_DEPTH_888; case 6: return COLOR_DEPTH_666; case 8: return COLOR_DEPTH_888; case 10: return COLOR_DEPTH_101010; case 12: return COLOR_DEPTH_121212; case 14: return COLOR_DEPTH_141414; case 16: return COLOR_DEPTH_161616; default: return COLOR_DEPTH_UNDEFINED; } } static enum dc_aspect_ratio get_aspect_ratio(const struct drm_display_mode *mode_in) { /* 1-1 mapping, since both enums follow the HDMI spec. */ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; } static enum dc_color_space get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, const struct drm_connector_state *connector_state) { enum dc_color_space color_space = COLOR_SPACE_SRGB; switch (connector_state->colorspace) { case DRM_MODE_COLORIMETRY_BT601_YCC: if (dc_crtc_timing->flags.Y_ONLY) color_space = COLOR_SPACE_YCBCR601_LIMITED; else color_space = COLOR_SPACE_YCBCR601; break; case DRM_MODE_COLORIMETRY_BT709_YCC: if (dc_crtc_timing->flags.Y_ONLY) color_space = COLOR_SPACE_YCBCR709_LIMITED; else color_space = COLOR_SPACE_YCBCR709; break; case DRM_MODE_COLORIMETRY_OPRGB: color_space = COLOR_SPACE_ADOBERGB; break; case DRM_MODE_COLORIMETRY_BT2020_RGB: case DRM_MODE_COLORIMETRY_BT2020_YCC: if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) color_space = COLOR_SPACE_2020_RGB_FULLRANGE; else color_space = COLOR_SPACE_2020_YCBCR; break; case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 default: if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { color_space = COLOR_SPACE_SRGB; /* * 27030khz is the separation point between HDTV and SDTV * according to HDMI spec, we use YCbCr709 and YCbCr601 * respectively */ } else if (dc_crtc_timing->pix_clk_100hz > 270300) { if (dc_crtc_timing->flags.Y_ONLY) color_space = COLOR_SPACE_YCBCR709_LIMITED; else color_space = COLOR_SPACE_YCBCR709; } else { if (dc_crtc_timing->flags.Y_ONLY) color_space = COLOR_SPACE_YCBCR601_LIMITED; else color_space = COLOR_SPACE_YCBCR601; } break; } return color_space; } static bool adjust_colour_depth_from_display_info( struct dc_crtc_timing *timing_out, const struct drm_display_info *info) { enum dc_color_depth depth = timing_out->display_color_depth; int normalized_clk; do { normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) normalized_clk /= 2; /* Adjusting pix clock following on HDMI spec based on colour depth */ switch (depth) { case COLOR_DEPTH_888: break; case COLOR_DEPTH_101010: normalized_clk = (normalized_clk * 30) / 24; break; case COLOR_DEPTH_121212: normalized_clk = (normalized_clk * 36) / 24; break; case COLOR_DEPTH_161616: normalized_clk = (normalized_clk * 48) / 24; break; default: /* The above depths are the only ones valid for HDMI. */ return false; } if (normalized_clk <= info->max_tmds_clock) { timing_out->display_color_depth = depth; return true; } } while (--depth > COLOR_DEPTH_666); return false; } static void fill_stream_properties_from_drm_display_mode( struct dc_stream_state *stream, const struct drm_display_mode *mode_in, const struct drm_connector *connector, const struct drm_connector_state *connector_state, const struct dc_stream_state *old_stream, int requested_bpc) { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct hdmi_vendor_infoframe hv_frame; struct hdmi_avi_infoframe avi_frame; memset(&hv_frame, 0, sizeof(hv_frame)); memset(&avi_frame, 0, sizeof(avi_frame)); timing_out->h_border_left = 0; timing_out->h_border_right = 0; timing_out->v_border_top = 0; timing_out->v_border_bottom = 0; /* TODO: un-hardcode */ if (drm_mode_is_420_only(info, mode_in) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if (drm_mode_is_420_also(info, mode_in) && aconnector->force_yuv420_output) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; else timing_out->pixel_encoding = PIXEL_ENCODING_RGB; timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; timing_out->display_color_depth = convert_color_depth_from_display_info( connector, (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), requested_bpc); timing_out->scan_type = SCANNING_TYPE_NODATA; timing_out->hdmi_vic = 0; if (old_stream) { timing_out->vic = old_stream->timing.vic; timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; } else { timing_out->vic = drm_match_cea_mode(mode_in); if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; } if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); timing_out->vic = avi_frame.video_code; drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); timing_out->hdmi_vic = hv_frame.vic; } if (is_freesync_video_mode(mode_in, aconnector)) { timing_out->h_addressable = mode_in->hdisplay; timing_out->h_total = mode_in->htotal; timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; timing_out->v_total = mode_in->vtotal; timing_out->v_addressable = mode_in->vdisplay; timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; timing_out->pix_clk_100hz = mode_in->clock * 10; } else { timing_out->h_addressable = mode_in->crtc_hdisplay; timing_out->h_total = mode_in->crtc_htotal; timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; timing_out->v_total = mode_in->crtc_vtotal; timing_out->v_addressable = mode_in->crtc_vdisplay; timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; } timing_out->aspect_ratio = get_aspect_ratio(mode_in); stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { if (!adjust_colour_depth_from_display_info(timing_out, info) && drm_mode_is_420_also(info, mode_in) && timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; adjust_colour_depth_from_display_info(timing_out, info); } } stream->output_color_space = get_output_color_space(timing_out, connector_state); } static void fill_audio_info(struct audio_info *audio_info, const struct drm_connector *drm_connector, const struct dc_sink *dc_sink) { int i = 0; int cea_revision = 0; const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; audio_info->manufacture_id = edid_caps->manufacturer_id; audio_info->product_id = edid_caps->product_id; cea_revision = drm_connector->display_info.cea_rev; strscpy(audio_info->display_name, edid_caps->display_name, AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); if (cea_revision >= 3) { audio_info->mode_count = edid_caps->audio_mode_count; for (i = 0; i < audio_info->mode_count; ++i) { audio_info->modes[i].format_code = (enum audio_format_code) (edid_caps->audio_modes[i].format_code); audio_info->modes[i].channel_count = edid_caps->audio_modes[i].channel_count; audio_info->modes[i].sample_rates.all = edid_caps->audio_modes[i].sample_rate; audio_info->modes[i].sample_size = edid_caps->audio_modes[i].sample_size; } } audio_info->flags.all = edid_caps->speaker_flags; /* TODO: We only check for the progressive mode, check for interlace mode too */ if (drm_connector->latency_present[0]) { audio_info->video_latency = drm_connector->video_latency[0]; audio_info->audio_latency = drm_connector->audio_latency[0]; } /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ } static void copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, struct drm_display_mode *dst_mode) { dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; dst_mode->crtc_clock = src_mode->crtc_clock; dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; dst_mode->crtc_htotal = src_mode->crtc_htotal; dst_mode->crtc_hskew = src_mode->crtc_hskew; dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; dst_mode->crtc_vtotal = src_mode->crtc_vtotal; } static void decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, const struct drm_display_mode *native_mode, bool scale_enabled) { if (scale_enabled) { copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); } else if (native_mode->clock == drm_mode->clock && native_mode->htotal == drm_mode->htotal && native_mode->vtotal == drm_mode->vtotal) { copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); } else { /* no scaling nor amdgpu inserted, no need to patch */ } } static struct dc_sink * create_fake_sink(struct amdgpu_dm_connector *aconnector) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; sink_init_data.link = aconnector->dc_link; sink_init_data.sink_signal = aconnector->dc_link->connector_signal; sink = dc_sink_create(&sink_init_data); if (!sink) { DRM_ERROR("Failed to create sink!\n"); return NULL; } sink->sink_signal = SIGNAL_TYPE_VIRTUAL; return sink; } static void set_multisync_trigger_params( struct dc_stream_state *stream) { struct dc_stream_state *master = NULL; if (stream->triggered_crtc_reset.enabled) { master = stream->triggered_crtc_reset.event_source; stream->triggered_crtc_reset.event = master->timing.flags.VSYNC_POSITIVE_POLARITY ? CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; } } static void set_master_stream(struct dc_stream_state *stream_set[], int stream_count) { int j, highest_rfr = 0, master_stream = 0; for (j = 0; j < stream_count; j++) { if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { int refresh_rate = 0; refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); if (refresh_rate > highest_rfr) { highest_rfr = refresh_rate; master_stream = j; } } } for (j = 0; j < stream_count; j++) { if (stream_set[j]) stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; } } static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) { int i = 0; struct dc_stream_state *stream; if (context->stream_count < 2) return; for (i = 0; i < context->stream_count ; i++) { if (!context->streams[i]) continue; /* * TODO: add a function to read AMD VSDB bits and set * crtc_sync_master.multi_sync_enabled flag * For now it's set to false */ } set_master_stream(context->streams, context->stream_count); for (i = 0; i < context->stream_count ; i++) { stream = context->streams[i]; if (!stream) continue; set_multisync_trigger_params(stream); } } /** * DOC: FreeSync Video * * When a userspace application wants to play a video, the content follows a * standard format definition that usually specifies the FPS for that format. * The below list illustrates some video format and the expected FPS, * respectively: * * - TV/NTSC (23.976 FPS) * - Cinema (24 FPS) * - TV/PAL (25 FPS) * - TV/NTSC (29.97 FPS) * - TV/NTSC (30 FPS) * - Cinema HFR (48 FPS) * - TV/PAL (50 FPS) * - Commonly used (60 FPS) * - Multiples of 24 (48,72,96 FPS) * * The list of standards video format is not huge and can be added to the * connector modeset list beforehand. With that, userspace can leverage * FreeSync to extends the front porch in order to attain the target refresh * rate. Such a switch will happen seamlessly, without screen blanking or * reprogramming of the output in any other way. If the userspace requests a * modesetting change compatible with FreeSync modes that only differ in the * refresh rate, DC will skip the full update and avoid blink during the * transition. For example, the video player can change the modesetting from * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without * causing any display blink. This same concept can be applied to a mode * setting change. */ static struct drm_display_mode * get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, bool use_probed_modes) { struct drm_display_mode *m, *m_pref = NULL; u16 current_refresh, highest_refresh; struct list_head *list_head = use_probed_modes ? &aconnector->base.probed_modes : &aconnector->base.modes; if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; /* Find the preferred mode */ list_for_each_entry(m, list_head, head) { if (m->type & DRM_MODE_TYPE_PREFERRED) { m_pref = m; break; } } if (!m_pref) { /* Probably an EDID with no preferred mode. Fallback to first entry */ m_pref = list_first_entry_or_null( &aconnector->base.modes, struct drm_display_mode, head); if (!m_pref) { DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); return NULL; } } highest_refresh = drm_mode_vrefresh(m_pref); /* * Find the mode with highest refresh rate with same resolution. * For some monitors, preferred mode is not the mode with highest * supported refresh rate. */ list_for_each_entry(m, list_head, head) { current_refresh = drm_mode_vrefresh(m); if (m->hdisplay == m_pref->hdisplay && m->vdisplay == m_pref->vdisplay && highest_refresh < current_refresh) { highest_refresh = current_refresh; m_pref = m; } } drm_mode_copy(&aconnector->freesync_vid_base, m_pref); return m_pref; } static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector) { struct drm_display_mode *high_mode; int timing_diff; high_mode = get_highest_refresh_rate_mode(aconnector, false); if (!high_mode || !mode) return false; timing_diff = high_mode->vtotal - mode->vtotal; if (high_mode->clock == 0 || high_mode->clock != mode->clock || high_mode->hdisplay != mode->hdisplay || high_mode->vdisplay != mode->vdisplay || high_mode->hsync_start != mode->hsync_start || high_mode->hsync_end != mode->hsync_end || high_mode->htotal != mode->htotal || high_mode->hskew != mode->hskew || high_mode->vscan != mode->vscan || high_mode->vsync_start - mode->vsync_start != timing_diff || high_mode->vsync_end - mode->vsync_end != timing_diff) return false; else return true; } static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps) { stream->timing.flags.DSC = 0; dsc_caps->is_dsc_supported = false; if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, dsc_caps); } } static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps, uint32_t max_dsc_target_bpp_limit_override) { const struct dc_link_settings *verified_link_cap = NULL; u32 link_bw_in_kbps; u32 edp_min_bpp_x16, edp_max_bpp_x16; struct dc *dc = sink->ctx->dc; struct dc_dsc_bw_range bw_range = {0}; struct dc_dsc_config dsc_cfg = {0}; struct dc_dsc_config_options dsc_options = {0}; dc_dsc_get_default_config_option(dc, &dsc_options); dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; verified_link_cap = dc_link_get_link_cap(stream->link); link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); edp_min_bpp_x16 = 8 * 16; edp_max_bpp_x16 = 8 * 16; if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; if (edp_max_bpp_x16 < edp_min_bpp_x16) edp_min_bpp_x16 = edp_max_bpp_x16; if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], dc->debug.dsc_min_slice_height_override, edp_min_bpp_x16, edp_max_bpp_x16, dsc_caps, &stream->timing, dc_link_get_highest_encoding_format(aconnector->dc_link), &bw_range)) { if (bw_range.max_kbps < link_bw_in_kbps) { if (dc_dsc_compute_config(dc->res_pool->dscs[0], dsc_caps, &dsc_options, 0, &stream->timing, dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; } return; } } if (dc_dsc_compute_config(dc->res_pool->dscs[0], dsc_caps, &dsc_options, link_bw_in_kbps, &stream->timing, dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; } } static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps) { struct drm_connector *drm_connector = &aconnector->base; u32 link_bandwidth_kbps; struct dc *dc = sink->ctx->dc; u32 max_supported_bw_in_kbps, timing_bw_in_kbps; u32 dsc_max_supported_bw_in_kbps; u32 max_dsc_target_bpp_limit_override = drm_connector->display_info.max_dsc_bpp; struct dc_dsc_config_options dsc_options = {0}; dc_dsc_get_default_config_option(dc, &dsc_options); dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); /* Set DSC policy according to dsc_clock_en */ dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, &dsc_options, link_bandwidth_kbps, &stream->timing, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(aconnector->dc_link)); max_supported_bw_in_kbps = link_bandwidth_kbps; dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; if (timing_bw_in_kbps > max_supported_bw_in_kbps && max_supported_bw_in_kbps > 0 && dsc_max_supported_bw_in_kbps > 0) if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, &dsc_options, dsc_max_supported_bw_in_kbps, &stream->timing, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", __func__, drm_connector->name); } } } /* Overwrite the stream flag if DSC is enabled through debugfs */ if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) stream->timing.flags.DSC = 1; if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; } static struct dc_stream_state * create_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream, int requested_bpc) { struct drm_display_mode *preferred_mode = NULL; struct drm_connector *drm_connector; const struct drm_connector_state *con_state = &dm_state->base; struct dc_stream_state *stream = NULL; struct drm_display_mode mode; struct drm_display_mode saved_mode; struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; bool recalculate_timing = false; bool scale = dm_state->scaling != RMX_OFF; int mode_refresh; int preferred_refresh = 0; enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; struct dsc_dec_dpcd_caps dsc_caps; struct dc_sink *sink = NULL; drm_mode_init(&mode, drm_mode); memset(&saved_mode, 0, sizeof(saved_mode)); if (aconnector == NULL) { DRM_ERROR("aconnector is NULL!\n"); return stream; } drm_connector = &aconnector->base; if (!aconnector->dc_sink) { sink = create_fake_sink(aconnector); if (!sink) return stream; } else { sink = aconnector->dc_sink; dc_sink_retain(sink); } stream = dc_create_stream_for_sink(sink); if (stream == NULL) { DRM_ERROR("Failed to create stream for sink!\n"); goto finish; } stream->dm_stream_context = aconnector; stream->timing.flags.LTE_340MCSC_SCRAMBLE = drm_connector->display_info.hdmi.scdc.scrambling.low_rates; list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { native_mode_found = true; break; } } if (!native_mode_found) preferred_mode = list_first_entry_or_null( &aconnector->base.modes, struct drm_display_mode, head); mode_refresh = drm_mode_vrefresh(&mode); if (preferred_mode == NULL) { /* * This may not be an error, the use case is when we have no * usermode calls to reset and set mode upon hotplug. In this * case, we call set mode ourselves to restore the previous mode * and the modelist may not be filled in time. */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { recalculate_timing = is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); drm_mode_copy(&mode, freesync_mode); } else { decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, scale); preferred_refresh = drm_mode_vrefresh(preferred_mode); } } if (recalculate_timing) drm_mode_set_crtcinfo(&saved_mode, 0); /* * If scaling is enabled and refresh rate didn't change * we copy the vic and polarities of the old timings */ if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode( stream, &mode, &aconnector->base, con_state, NULL, requested_bpc); else fill_stream_properties_from_drm_display_mode( stream, &mode, &aconnector->base, con_state, old_stream, requested_bpc); if (aconnector->timing_changed) { DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", __func__, stream->timing.display_color_depth, aconnector->timing_requested->display_color_depth); stream->timing = *aconnector->timing_requested; } /* SST DSC determination policy */ update_dsc_caps(aconnector, sink, stream, &dsc_caps); if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); update_stream_scaling_settings(&mode, dm_state, stream); fill_audio_info( &stream->audio_info, drm_connector, sink); update_stream_signal(stream, sink); if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // stream->use_vsc_sdp_for_colorimetry = false; if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { stream->use_vsc_sdp_for_colorimetry = aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; } else { if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) stream->use_vsc_sdp_for_colorimetry = true; } if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } finish: dc_sink_release(sink); return stream; } static enum drm_connector_status amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) { bool connected; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); /* * Notes: * 1. This interface is NOT called in context of HPD irq. * 2. This interface *is called* in context of user-mode ioctl. Which * makes it a bad place for *any* MST-related activity. */ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && !aconnector->fake_enable) connected = (aconnector->dc_sink != NULL); else connected = (aconnector->base.force == DRM_FORCE_ON || aconnector->base.force == DRM_FORCE_ON_DIGITAL); update_subconnector_property(aconnector); return (connected ? connector_status_connected : connector_status_disconnected); } int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *connector_state, struct drm_property *property, uint64_t val) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct dm_connector_state *dm_old_state = to_dm_connector_state(connector->state); struct dm_connector_state *dm_new_state = to_dm_connector_state(connector_state); int ret = -EINVAL; if (property == dev->mode_config.scaling_mode_property) { enum amdgpu_rmx_type rmx_type; switch (val) { case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; case DRM_MODE_SCALE_NONE: default: rmx_type = RMX_OFF; break; } if (dm_old_state->scaling == rmx_type) return 0; dm_new_state->scaling = rmx_type; ret = 0; } else if (property == adev->mode_info.underscan_hborder_property) { dm_new_state->underscan_hborder = val; ret = 0; } else if (property == adev->mode_info.underscan_vborder_property) { dm_new_state->underscan_vborder = val; ret = 0; } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; } else if (property == adev->mode_info.abm_level_property) { dm_new_state->abm_level = val; ret = 0; } return ret; } int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, uint64_t *val) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct dm_connector_state *dm_state = to_dm_connector_state(state); int ret = -EINVAL; if (property == dev->mode_config.scaling_mode_property) { switch (dm_state->scaling) { case RMX_CENTER: *val = DRM_MODE_SCALE_CENTER; break; case RMX_ASPECT: *val = DRM_MODE_SCALE_ASPECT; break; case RMX_FULL: *val = DRM_MODE_SCALE_FULLSCREEN; break; case RMX_OFF: default: *val = DRM_MODE_SCALE_NONE; break; } ret = 0; } else if (property == adev->mode_info.underscan_hborder_property) { *val = dm_state->underscan_hborder; ret = 0; } else if (property == adev->mode_info.underscan_vborder_property) { *val = dm_state->underscan_vborder; ret = 0; } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; } else if (property == adev->mode_info.abm_level_property) { *val = dm_state->abm_level; ret = 0; } return ret; } static void amdgpu_dm_connector_unregister(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); } static void amdgpu_dm_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_display_manager *dm = &adev->dm; /* * Call only if mst_mgr was initialized before since it's not done * for all connector types. */ if (aconnector->mst_mgr.dev) drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); if (aconnector->bl_idx != -1) { backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); dm->backlight_dev[aconnector->bl_idx] = NULL; } if (aconnector->dc_em_sink) dc_sink_release(aconnector->dc_em_sink); aconnector->dc_em_sink = NULL; if (aconnector->dc_sink) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); drm_connector_unregister(connector); drm_connector_cleanup(connector); if (aconnector->i2c) { i2c_del_adapter(&aconnector->i2c->base); kfree(aconnector->i2c); } kfree(aconnector->dm_dp_aux.aux.name); kfree(connector); } void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) { struct dm_connector_state *state = to_dm_connector_state(connector->state); if (connector->state) __drm_atomic_helper_connector_destroy_state(connector->state); kfree(state); state = kzalloc(sizeof(*state), GFP_KERNEL); if (state) { state->scaling = RMX_OFF; state->underscan_enable = false; state->underscan_hborder = 0; state->underscan_vborder = 0; state->base.max_requested_bpc = 8; state->vcpi_slots = 0; state->pbn = 0; if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) state->abm_level = amdgpu_dm_abm_level; __drm_atomic_helper_connector_reset(connector, &state->base); } } struct drm_connector_state * amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) { struct dm_connector_state *state = to_dm_connector_state(connector->state); struct dm_connector_state *new_state = kmemdup(state, sizeof(*state), GFP_KERNEL); if (!new_state) return NULL; __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); new_state->freesync_capable = state->freesync_capable; new_state->abm_level = state->abm_level; new_state->scaling = state->scaling; new_state->underscan_enable = state->underscan_enable; new_state->underscan_hborder = state->underscan_hborder; new_state->underscan_vborder = state->underscan_vborder; new_state->vcpi_slots = state->vcpi_slots; new_state->pbn = state->pbn; return &new_state->base; } static int amdgpu_dm_connector_late_register(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); int r; amdgpu_dm_register_backlight_device(amdgpu_dm_connector); if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); if (r) return r; } #if defined(CONFIG_DEBUG_FS) connector_debugfs_init(amdgpu_dm_connector); #endif return 0; } static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; struct edid *edid; if (!connector->edid_override) return; drm_edid_override_connector_update(&aconnector->base); edid = aconnector->base.edid_blob_ptr->data; aconnector->edid = edid; /* Update emulated (virtual) sink's EDID */ if (dc_em_sink && dc_link) { memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); dm_helpers_parse_edid_caps( dc_link, &dc_em_sink->dc_edid, &dc_em_sink->edid_caps); } } static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .reset = amdgpu_dm_connector_funcs_reset, .detect = amdgpu_dm_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = amdgpu_dm_connector_destroy, .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_set_property = amdgpu_dm_connector_atomic_set_property, .atomic_get_property = amdgpu_dm_connector_atomic_get_property, .late_register = amdgpu_dm_connector_late_register, .early_unregister = amdgpu_dm_connector_unregister, .force = amdgpu_dm_connector_funcs_force }; static int get_modes(struct drm_connector *connector) { return amdgpu_dm_connector_get_modes(connector); } static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; struct edid *edid; if (!aconnector->base.edid_blob_ptr) { /* if connector->edid_override valid, pass * it to edid_override to edid_blob_ptr */ drm_edid_override_connector_update(&aconnector->base); if (!aconnector->base.edid_blob_ptr) { DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", aconnector->base.name); aconnector->base.force = DRM_FORCE_OFF; return; } } edid = (struct edid *) aconnector->base.edid_blob_ptr->data; aconnector->edid = edid; aconnector->dc_em_sink = dc_link_add_remote_sink( aconnector->dc_link, (uint8_t *)edid, (edid->extensions + 1) * EDID_LENGTH, &init_params); if (aconnector->base.force == DRM_FORCE_ON) { aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; dc_sink_retain(aconnector->dc_sink); } } static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) { struct dc_link *link = (struct dc_link *)aconnector->dc_link; /* * In case of headless boot with force on for DP managed connector * Those settings have to be != 0 to get initial modeset */ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { link->verified_link_cap.lane_count = LANE_COUNT_FOUR; link->verified_link_cap.link_rate = LINK_RATE_HIGH2; } create_eml_sink(aconnector); } static enum dc_status dm_validate_stream_and_context(struct dc *dc, struct dc_stream_state *stream) { enum dc_status dc_result = DC_ERROR_UNEXPECTED; struct dc_plane_state *dc_plane_state = NULL; struct dc_state *dc_state = NULL; if (!stream) goto cleanup; dc_plane_state = dc_create_plane_state(dc); if (!dc_plane_state) goto cleanup; dc_state = dc_create_state(dc); if (!dc_state) goto cleanup; /* populate stream to plane */ dc_plane_state->src_rect.height = stream->src.height; dc_plane_state->src_rect.width = stream->src.width; dc_plane_state->dst_rect.height = stream->src.height; dc_plane_state->dst_rect.width = stream->src.width; dc_plane_state->clip_rect.height = stream->src.height; dc_plane_state->clip_rect.width = stream->src.width; dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; dc_plane_state->plane_size.surface_size.height = stream->src.height; dc_plane_state->plane_size.surface_size.width = stream->src.width; dc_plane_state->plane_size.chroma_size.height = stream->src.height; dc_plane_state->plane_size.chroma_size.width = stream->src.width; dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; dc_plane_state->rotation = ROTATION_ANGLE_0; dc_plane_state->is_tiling_rotated = false; dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; dc_result = dc_validate_stream(dc, stream); if (dc_result == DC_OK) dc_result = dc_validate_plane(dc, dc_plane_state); if (dc_result == DC_OK) dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); if (dc_result == DC_OK && !dc_add_plane_to_context( dc, stream, dc_plane_state, dc_state)) dc_result = DC_FAIL_ATTACH_SURFACES; if (dc_result == DC_OK) dc_result = dc_validate_global_state(dc, dc_state, true); cleanup: if (dc_state) dc_release_state(dc_state); if (dc_plane_state) dc_plane_state_release(dc_plane_state); return dc_result; } struct dc_stream_state * create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream) { struct drm_connector *connector = &aconnector->base; struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dc_stream_state *stream; const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; do { stream = create_stream_for_sink(aconnector, drm_mode, dm_state, old_stream, requested_bpc); if (stream == NULL) { DRM_ERROR("Failed to create stream for sink!\n"); break; } dc_result = dc_validate_stream(adev->dm.dc, stream); if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); if (dc_result == DC_OK) dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, dc_result, dc_status_to_str(dc_result)); dc_stream_release(stream); stream = NULL; requested_bpc -= 2; /* lower bpc to retry validation */ } } while (stream == NULL && requested_bpc >= 6); if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); aconnector->force_yuv420_output = true; stream = create_validate_stream_for_sink(aconnector, drm_mode, dm_state, old_stream); aconnector->force_yuv420_output = false; } return stream; } enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { int result = MODE_ERROR; struct dc_sink *dc_sink; /* TODO: Unhardcode stream count */ struct dc_stream_state *stream; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || (mode->flags & DRM_MODE_FLAG_DBLSCAN)) return result; /* * Only run this the first time mode_valid is called to initilialize * EDID mgmt */ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && !aconnector->dc_em_sink) handle_edid_mgmt(aconnector); dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && aconnector->base.force != DRM_FORCE_ON) { DRM_ERROR("dc_sink is NULL!\n"); goto fail; } drm_mode_set_crtcinfo(mode, 0); stream = create_validate_stream_for_sink(aconnector, mode, to_dm_connector_state(connector->state), NULL); if (stream) { dc_stream_release(stream); result = MODE_OK; } fail: /* TODO: error handling*/ return result; } static int fill_hdr_info_packet(const struct drm_connector_state *state, struct dc_info_packet *out) { struct hdmi_drm_infoframe frame; unsigned char buf[30]; /* 26 + 4 */ ssize_t len; int ret, i; memset(out, 0, sizeof(*out)); if (!state->hdr_output_metadata) return 0; ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); if (ret) return ret; len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); if (len < 0) return (int)len; /* Static metadata is a fixed 26 bytes + 4 byte header. */ if (len != 30) return -EINVAL; /* Prepare the infopacket for DC. */ switch (state->connector->connector_type) { case DRM_MODE_CONNECTOR_HDMIA: out->hb0 = 0x87; /* type */ out->hb1 = 0x01; /* version */ out->hb2 = 0x1A; /* length */ out->sb[0] = buf[3]; /* checksum */ i = 1; break; case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: out->hb0 = 0x00; /* sdp id, zero */ out->hb1 = 0x87; /* type */ out->hb2 = 0x1D; /* payload len - 1 */ out->hb3 = (0x13 << 2); /* sdp version */ out->sb[0] = 0x01; /* version */ out->sb[1] = 0x1A; /* length */ i = 2; break; default: return -EINVAL; } memcpy(&out->sb[i], &buf[4], 26); out->valid = true; print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, sizeof(out->sb), false); return 0; } static int amdgpu_dm_connector_atomic_check(struct drm_connector *conn, struct drm_atomic_state *state) { struct drm_connector_state *new_con_state = drm_atomic_get_new_connector_state(state, conn); struct drm_connector_state *old_con_state = drm_atomic_get_old_connector_state(state, conn); struct drm_crtc *crtc = new_con_state->crtc; struct drm_crtc_state *new_crtc_state; struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret; trace_amdgpu_dm_connector_atomic_check(new_con_state); if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); if (ret < 0) return ret; } if (!crtc) return 0; if (new_con_state->colorspace != old_con_state->colorspace) { new_crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(new_crtc_state)) return PTR_ERR(new_crtc_state); new_crtc_state->mode_changed = true; } if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { struct dc_info_packet hdr_infopacket; ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); if (ret) return ret; new_crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(new_crtc_state)) return PTR_ERR(new_crtc_state); /* * DC considers the stream backends changed if the * static metadata changes. Forcing the modeset also * gives a simple way for userspace to switch from * 8bpc to 10bpc when setting the metadata to enter * or exit HDR. * * Changing the static metadata after it's been * set is permissible, however. So only force a * modeset if we're entering or exiting HDR. */ new_crtc_state->mode_changed = new_crtc_state->mode_changed || !old_con_state->hdr_output_metadata || !new_con_state->hdr_output_metadata; } return 0; } static const struct drm_connector_helper_funcs amdgpu_dm_connector_helper_funcs = { /* * If hotplugging a second bigger display in FB Con mode, bigger resolution * modes will be filtered by drm_mode_validate_size(), and those modes * are missing after user start lightdm. So we need to renew modes list. * in get_modes call back, not just return the modes count */ .get_modes = get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, .atomic_check = amdgpu_dm_connector_atomic_check, }; static void dm_encoder_helper_disable(struct drm_encoder *encoder) { } int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) { switch (display_color_depth) { case COLOR_DEPTH_666: return 6; case COLOR_DEPTH_888: return 8; case COLOR_DEPTH_101010: return 10; case COLOR_DEPTH_121212: return 12; case COLOR_DEPTH_141414: return 14; case COLOR_DEPTH_161616: return 16; default: break; } return 0; } static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_atomic_state *state = crtc_state->state; struct drm_connector *connector = conn_state->connector; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; struct drm_dp_mst_topology_state *mst_state; enum dc_color_depth color_depth; int clock, bpp = 0; bool is_y420 = false; if (!aconnector->mst_output_port) return 0; mst_port = aconnector->mst_output_port; mst_mgr = &aconnector->mst_root->mst_mgr; if (!crtc_state->connectors_changed && !crtc_state->mode_changed) return 0; mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); if (IS_ERR(mst_state)) return PTR_ERR(mst_state); if (!mst_state->pbn_div) mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && aconnector->force_yuv420_output; color_depth = convert_color_depth_from_display_info(connector, is_y420, max_bpc); bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; clock = adjusted_mode->clock; dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); } dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, dm_new_connector_state->pbn); if (dm_new_connector_state->vcpi_slots < 0) { DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); return dm_new_connector_state->vcpi_slots; } return 0; } const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { .disable = dm_encoder_helper_disable, .atomic_check = dm_encoder_helper_atomic_check }; static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars) { struct dc_stream_state *stream = NULL; struct drm_connector *connector; struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; int i, j, ret; int vcpi, pbn_div, pbn, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->mst_output_port) continue; if (!new_con_state || !new_con_state->crtc) continue; dm_conn_state = to_dm_connector_state(new_con_state); for (j = 0; j < dc_state->stream_count; j++) { stream = dc_state->streams[j]; if (!stream) continue; if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) break; stream = NULL; } if (!stream) continue; pbn_div = dm_mst_get_pbn_divider(stream->link); /* pbn is calculated by compute_mst_dsc_configs_for_state*/ for (j = 0; j < dc_state->stream_count; j++) { if (vars[j].aconnector == aconnector) { pbn = vars[j].pbn; break; } } if (j == dc_state->stream_count) continue; slot_num = DIV_ROUND_UP(pbn, pbn_div); if (stream->timing.flags.DSC != 1) { dm_conn_state->pbn = pbn; dm_conn_state->vcpi_slots = slot_num; ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, dm_conn_state->pbn, false); if (ret < 0) return ret; continue; } vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); if (vcpi < 0) return vcpi; dm_conn_state->pbn = pbn; dm_conn_state->vcpi_slots = vcpi; } return 0; } static int to_drm_connector_type(enum signal_type st) { switch (st) { case SIGNAL_TYPE_HDMI_TYPE_A: return DRM_MODE_CONNECTOR_HDMIA; case SIGNAL_TYPE_EDP: return DRM_MODE_CONNECTOR_eDP; case SIGNAL_TYPE_LVDS: return DRM_MODE_CONNECTOR_LVDS; case SIGNAL_TYPE_RGB: return DRM_MODE_CONNECTOR_VGA; case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: return DRM_MODE_CONNECTOR_DisplayPort; case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_DVI_SINGLE_LINK: return DRM_MODE_CONNECTOR_DVID; case SIGNAL_TYPE_VIRTUAL: return DRM_MODE_CONNECTOR_VIRTUAL; default: return DRM_MODE_CONNECTOR_Unknown; } } static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) { struct drm_encoder *encoder; /* There is only one encoder per connector */ drm_connector_for_each_possible_encoder(connector, encoder) return encoder; return NULL; } static void amdgpu_dm_get_native_mode(struct drm_connector *connector) { struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; encoder = amdgpu_dm_connector_to_encoder(connector); if (encoder == NULL) return; amdgpu_encoder = to_amdgpu_encoder(encoder); amdgpu_encoder->native_mode.clock = 0; if (!list_empty(&connector->probed_modes)) { struct drm_display_mode *preferred_mode = NULL; list_for_each_entry(preferred_mode, &connector->probed_modes, head) { if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) amdgpu_encoder->native_mode = *preferred_mode; break; } } } static struct drm_display_mode * amdgpu_dm_create_common_mode(struct drm_encoder *encoder, char *name, int hdisplay, int vdisplay) { struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_display_mode *mode = NULL; struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; mode = drm_mode_duplicate(dev, native_mode); if (mode == NULL) return NULL; mode->hdisplay = hdisplay; mode->vdisplay = vdisplay; mode->type &= ~DRM_MODE_TYPE_PREFERRED; strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); return mode; } static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_display_mode *mode = NULL; struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); int i; int n; struct mode_size { char name[DRM_DISPLAY_MODE_LEN]; int w; int h; } common_modes[] = { { "640x480", 640, 480}, { "800x600", 800, 600}, { "1024x768", 1024, 768}, { "1280x720", 1280, 720}, { "1280x800", 1280, 800}, {"1280x1024", 1280, 1024}, { "1440x900", 1440, 900}, {"1680x1050", 1680, 1050}, {"1600x1200", 1600, 1200}, {"1920x1080", 1920, 1080}, {"1920x1200", 1920, 1200} }; n = ARRAY_SIZE(common_modes); for (i = 0; i < n; i++) { struct drm_display_mode *curmode = NULL; bool mode_existed = false; if (common_modes[i].w > native_mode->hdisplay || common_modes[i].h > native_mode->vdisplay || (common_modes[i].w == native_mode->hdisplay && common_modes[i].h == native_mode->vdisplay)) continue; list_for_each_entry(curmode, &connector->probed_modes, head) { if (common_modes[i].w == curmode->hdisplay && common_modes[i].h == curmode->vdisplay) { mode_existed = true; break; } } if (mode_existed) continue; mode = amdgpu_dm_create_common_mode(encoder, common_modes[i].name, common_modes[i].w, common_modes[i].h); if (!mode) continue; drm_mode_probed_add(connector, mode); amdgpu_dm_connector->num_modes++; } } static void amdgpu_set_panel_orientation(struct drm_connector *connector) { struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; const struct drm_display_mode *native_mode; if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && connector->connector_type != DRM_MODE_CONNECTOR_LVDS) return; mutex_lock(&connector->dev->mode_config.mutex); amdgpu_dm_connector_get_modes(connector); mutex_unlock(&connector->dev->mode_config.mutex); encoder = amdgpu_dm_connector_to_encoder(connector); if (!encoder) return; amdgpu_encoder = to_amdgpu_encoder(encoder); native_mode = &amdgpu_encoder->native_mode; if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) return; drm_connector_set_panel_orientation_with_quirk(connector, DRM_MODE_PANEL_ORIENTATION_UNKNOWN, native_mode->hdisplay, native_mode->vdisplay); } static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, struct edid *edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); if (edid) { /* empty probed_modes */ INIT_LIST_HEAD(&connector->probed_modes); amdgpu_dm_connector->num_modes = drm_add_edid_modes(connector, edid); /* sorting the probed modes before calling function * amdgpu_dm_get_native_mode() since EDID can have * more than one preferred mode. The modes that are * later in the probed mode list could be of higher * and preferred resolution. For example, 3840x2160 * resolution in base EDID preferred timing and 4096x2160 * preferred resolution in DID extension block later. */ drm_mode_sort(&connector->probed_modes); amdgpu_dm_get_native_mode(connector); /* Freesync capabilities are reset by calling * drm_add_edid_modes() and need to be * restored here. */ amdgpu_dm_update_freesync_caps(connector, edid); } else { amdgpu_dm_connector->num_modes = 0; } } static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, struct drm_display_mode *mode) { struct drm_display_mode *m; list_for_each_entry(m, &aconnector->base.probed_modes, head) { if (drm_mode_equal(m, mode)) return true; } return false; } static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) { const struct drm_display_mode *m; struct drm_display_mode *new_mode; uint i; u32 new_modes_count = 0; /* Standard FPS values * * 23.976 - TV/NTSC * 24 - Cinema * 25 - TV/PAL * 29.97 - TV/NTSC * 30 - TV/NTSC * 48 - Cinema HFR * 50 - TV/PAL * 60 - Commonly used * 48,72,96,120 - Multiples of 24 */ static const u32 common_rates[] = { 23976, 24000, 25000, 29970, 30000, 48000, 50000, 60000, 72000, 96000, 120000 }; /* * Find mode with highest refresh rate with the same resolution * as the preferred mode. Some monitors report a preferred mode * with lower resolution than the highest refresh rate supported. */ m = get_highest_refresh_rate_mode(aconnector, true); if (!m) return 0; for (i = 0; i < ARRAY_SIZE(common_rates); i++) { u64 target_vtotal, target_vtotal_diff; u64 num, den; if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) continue; if (common_rates[i] < aconnector->min_vfreq * 1000 || common_rates[i] > aconnector->max_vfreq * 1000) continue; num = (unsigned long long)m->clock * 1000 * 1000; den = common_rates[i] * (unsigned long long)m->htotal; target_vtotal = div_u64(num, den); target_vtotal_diff = target_vtotal - m->vtotal; /* Check for illegal modes */ if (m->vsync_start + target_vtotal_diff < m->vdisplay || m->vsync_end + target_vtotal_diff < m->vsync_start || m->vtotal + target_vtotal_diff < m->vsync_end) continue; new_mode = drm_mode_duplicate(aconnector->base.dev, m); if (!new_mode) goto out; new_mode->vtotal += (u16)target_vtotal_diff; new_mode->vsync_start += (u16)target_vtotal_diff; new_mode->vsync_end += (u16)target_vtotal_diff; new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; new_mode->type |= DRM_MODE_TYPE_DRIVER; if (!is_duplicate_mode(aconnector, new_mode)) { drm_mode_probed_add(&aconnector->base, new_mode); new_modes_count += 1; } else drm_mode_destroy(aconnector->base.dev, new_mode); } out: return new_modes_count; } static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, struct edid *edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); if (!edid) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) amdgpu_dm_connector->num_modes += add_fs_modes(amdgpu_dm_connector); } static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct drm_encoder *encoder; struct edid *edid = amdgpu_dm_connector->edid; struct dc_link_settings *verified_link_cap = &amdgpu_dm_connector->dc_link->verified_link_cap; const struct dc *dc = amdgpu_dm_connector->dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); if (!drm_edid_is_valid(edid)) { amdgpu_dm_connector->num_modes = drm_add_modes_noedid(connector, 640, 480); if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); return amdgpu_dm_connector->num_modes; } static const u32 supported_colorspaces = BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | BIT(DRM_MODE_COLORIMETRY_OPRGB) | BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, int connector_type, struct dc_link *link, int link_index) { struct amdgpu_device *adev = drm_to_adev(dm->ddev); /* * Some of the properties below require access to state, like bpc. * Allocate some default initial connector state with our reset helper. */ if (aconnector->base.funcs->reset) aconnector->base.funcs->reset(&aconnector->base); aconnector->connector_id = link_index; aconnector->bl_idx = -1; aconnector->dc_link = link; aconnector->base.interlace_allowed = false; aconnector->base.doublescan_allowed = false; aconnector->base.stereo_allowed = false; aconnector->base.dpms = DRM_MODE_DPMS_OFF; aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ aconnector->audio_inst = -1; aconnector->pack_sdp_v1_3 = false; aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); mutex_init(&aconnector->hpd_lock); mutex_init(&aconnector->handle_mst_msg_ready); /* * configure support HPD hot plug connector_>polled default value is 0 * which means HPD hot plug not supported */ switch (connector_type) { case DRM_MODE_CONNECTOR_HDMIA: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; aconnector->base.ycbcr_420_allowed = link->link_enc->features.hdmi_ycbcr420_supported ? true : false; break; case DRM_MODE_CONNECTOR_DisplayPort: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; link->link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link->link_enc); if (link->link_enc) aconnector->base.ycbcr_420_allowed = link->link_enc->features.dp_ycbcr420_supported ? true : false; break; case DRM_MODE_CONNECTOR_DVID: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; break; default: break; } drm_object_attach_property(&aconnector->base.base, dm->ddev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_property, UNDERSCAN_OFF); drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_hborder_property, 0); drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_vborder_property, 0); if (!aconnector->mst_root) drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); aconnector->base.state->max_bpc = 16; aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; if (connector_type == DRM_MODE_CONNECTOR_eDP && (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { drm_object_attach_property(&aconnector->base.base, adev->mode_info.abm_level_property, 0); } if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) drm_connector_attach_colorspace_property(&aconnector->base); } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || connector_type == DRM_MODE_CONNECTOR_eDP) { if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) drm_connector_attach_colorspace_property(&aconnector->base); } if (connector_type == DRM_MODE_CONNECTOR_HDMIA || connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) { drm_connector_attach_hdr_output_metadata_property(&aconnector->base); if (!aconnector->mst_root) drm_connector_attach_vrr_capable_property(&aconnector->base); if (adev->dm.hdcp_workqueue) drm_connector_attach_content_protection_property(&aconnector->base, true); } } static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); struct ddc_service *ddc_service = i2c->ddc_service; struct i2c_command cmd; int i; int result = -EIO; cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); if (!cmd.payloads) return result; cmd.number_of_payloads = num; cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; cmd.speed = 100; for (i = 0; i < num; i++) { cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); cmd.payloads[i].address = msgs[i].addr; cmd.payloads[i].length = msgs[i].len; cmd.payloads[i].data = msgs[i].buf; } if (dc_submit_i2c( ddc_service->ctx->dc, ddc_service->link->link_index, &cmd)) result = num; kfree(cmd.payloads); return result; } static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm amdgpu_dm_i2c_algo = { .master_xfer = amdgpu_dm_i2c_xfer, .functionality = amdgpu_dm_i2c_func, }; static struct amdgpu_i2c_adapter * create_i2c(struct ddc_service *ddc_service, int link_index, int *res) { struct amdgpu_device *adev = ddc_service->ctx->driver_context; struct amdgpu_i2c_adapter *i2c; i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); if (!i2c) return NULL; i2c->base.owner = THIS_MODULE; i2c->base.class = I2C_CLASS_DDC; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; return i2c; } /* * Note: this function assumes that dc_link_detect() was called for the * dc_link which will be represented by this aconnector. */ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, u32 link_index, struct amdgpu_encoder *aencoder) { int res = 0; int connector_type; struct dc *dc = dm->dc; struct dc_link *link = dc_get_link_at_index(dc, link_index); struct amdgpu_i2c_adapter *i2c; link->priv = aconnector; i2c = create_i2c(link->ddc, link->link_index, &res); if (!i2c) { DRM_ERROR("Failed to create i2c adapter data\n"); return -ENOMEM; } aconnector->i2c = i2c; res = i2c_add_adapter(&i2c->base); if (res) { DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); goto out_free; } connector_type = to_drm_connector_type(link->connector_signal); res = drm_connector_init_with_ddc( dm->ddev, &aconnector->base, &amdgpu_dm_connector_funcs, connector_type, &i2c->base); if (res) { DRM_ERROR("connector_init failed\n"); aconnector->connector_id = -1; goto out_free; } drm_connector_helper_add( &aconnector->base, &amdgpu_dm_connector_helper_funcs); amdgpu_dm_connector_init_helper( dm, aconnector, connector_type, link, link_index); drm_connector_attach_encoder( &aconnector->base, &aencoder->base); if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); out_free: if (res) { kfree(i2c); aconnector->i2c = NULL; } return res; } int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) { switch (adev->mode_info.num_crtc) { case 1: return 0x1; case 2: return 0x3; case 3: return 0x7; case 4: return 0xf; case 5: return 0x1f; case 6: default: return 0x3f; } } static int amdgpu_dm_encoder_init(struct drm_device *dev, struct amdgpu_encoder *aencoder, uint32_t link_index) { struct amdgpu_device *adev = drm_to_adev(dev); int res = drm_encoder_init(dev, &aencoder->base, &amdgpu_dm_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); if (!res) aencoder->encoder_id = link_index; else aencoder->encoder_id = -1; drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); return res; } static void manage_dm_interrupts(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, bool enable) { /* * We have no guarantee that the frontend index maps to the same * backend index - some even map to more than one. * * TODO: Use a different interrupt or check DC itself for the mapping. */ int irq_type = amdgpu_display_crtc_idx_to_irq_type( adev, acrtc->crtc_id); if (enable) { drm_crtc_vblank_on(&acrtc->base); amdgpu_irq_get( adev, &adev->pageflip_irq, irq_type); #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) amdgpu_irq_get( adev, &adev->vline0_irq, irq_type); #endif } else { #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) amdgpu_irq_put( adev, &adev->vline0_irq, irq_type); #endif amdgpu_irq_put( adev, &adev->pageflip_irq, irq_type); drm_crtc_vblank_off(&acrtc->base); } } static void dm_update_pflip_irq_state(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc) { int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); /** * This reads the current state for the IRQ and force reapplies * the setting to hardware. */ amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); } static bool is_scaling_state_different(const struct dm_connector_state *dm_state, const struct dm_connector_state *old_dm_state) { if (dm_state->scaling != old_dm_state->scaling) return true; if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) return true; } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) return true; } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || dm_state->underscan_vborder != old_dm_state->underscan_vborder) return true; return false; } static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, struct drm_crtc_state *old_crtc_state, struct drm_connector_state *new_conn_state, struct drm_connector_state *old_conn_state, const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", connector->index, connector->status, connector->dpms); pr_debug("[HDCP_DM] state protection old: %x new: %x\n", old_conn_state->content_protection, new_conn_state->content_protection); if (old_crtc_state) pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", old_crtc_state->enable, old_crtc_state->active, old_crtc_state->mode_changed, old_crtc_state->active_changed, old_crtc_state->connectors_changed); if (new_crtc_state) pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", new_crtc_state->enable, new_crtc_state->active, new_crtc_state->mode_changed, new_crtc_state->active_changed, new_crtc_state->connectors_changed); /* hdcp content type change */ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); return true; } /* CP is being re enabled, ignore this */ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { if (new_crtc_state && new_crtc_state->mode_changed) { new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); return true; } new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); return false; } /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED * * Handles: UNDESIRED -> ENABLED */ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; /* Stream removed and re-enabled * * Can sometimes overlap with the HPD case, * thus set update_hdcp to false to avoid * setting HDCP multiple times. * * Handles: DESIRED -> DESIRED (Special case) */ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && new_conn_state->crtc && new_conn_state->crtc->enabled && connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { dm_con_state->update_hdcp = false; pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", __func__); return true; } /* Hot-plug, headless s3, dpms * * Only start HDCP if the display is connected/enabled. * update_hdcp flag will be set to false until the next * HPD comes in. * * Handles: DESIRED -> DESIRED (Special case) */ if (dm_con_state->update_hdcp && new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { dm_con_state->update_hdcp = false; pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", __func__); return true; } if (old_conn_state->content_protection == new_conn_state->content_protection) { if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { if (new_crtc_state && new_crtc_state->mode_changed) { pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", __func__); return true; } pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", __func__); return false; } pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); return false; } if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", __func__); return true; } pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); return false; } static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) { /* this is the update mode case */ acrtc->otg_inst = -1; acrtc->enabled = false; } static void prepare_flip_isr(struct amdgpu_crtc *acrtc) { assert_spin_locked(&acrtc->base.dev->event_lock); WARN_ON(acrtc->event); acrtc->event = acrtc->base.state->event; /* Set the flip status */ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; /* Mark this event as consumed */ acrtc->base.state->event = NULL; DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", acrtc->crtc_id); } static void update_freesync_state_on_stream( struct amdgpu_display_manager *dm, struct dm_crtc_state *new_crtc_state, struct dc_stream_state *new_stream, struct dc_plane_state *surface, u32 flip_timestamp_in_us) { struct mod_vrr_params vrr_params; struct dc_info_packet vrr_infopacket = {0}; struct amdgpu_device *adev = dm->adev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); unsigned long flags; bool pack_sdp_v1_3 = false; struct amdgpu_dm_connector *aconn; enum vrr_packet_type packet_type = PACKET_TYPE_VRR; if (!new_stream) return; /* * TODO: Determine why min/max totals and vrefresh can be 0 here. * For now it's sufficient to just guard against these conditions. */ if (!new_stream->timing.h_total || !new_stream->timing.v_total) return; spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); vrr_params = acrtc->dm_irq_params.vrr_params; if (surface) { mod_freesync_handle_preflip( dm->freesync_module, surface, new_stream, flip_timestamp_in_us, &vrr_params); if (adev->family < AMDGPU_FAMILY_AI && amdgpu_dm_crtc_vrr_active(new_crtc_state)) { mod_freesync_handle_v_update(dm->freesync_module, new_stream, &vrr_params); /* Need to call this before the frame ends. */ dc_stream_adjust_vmin_vmax(dm->dc, new_crtc_state->stream, &vrr_params.adjust); } } aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { pack_sdp_v1_3 = aconn->pack_sdp_v1_3; if (aconn->vsdb_info.amd_vsdb_version == 1) packet_type = PACKET_TYPE_FS_V1; else if (aconn->vsdb_info.amd_vsdb_version == 2) packet_type = PACKET_TYPE_FS_V2; else if (aconn->vsdb_info.amd_vsdb_version == 3) packet_type = PACKET_TYPE_FS_V3; mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, &new_stream->adaptive_sync_infopacket); } mod_freesync_build_vrr_infopacket( dm->freesync_module, new_stream, &vrr_params, packet_type, TRANSFER_FUNC_UNKNOWN, &vrr_infopacket, pack_sdp_v1_3); new_crtc_state->freesync_vrr_info_changed |= (memcmp(&new_crtc_state->vrr_infopacket, &vrr_infopacket, sizeof(vrr_infopacket)) != 0); acrtc->dm_irq_params.vrr_params = vrr_params; new_crtc_state->vrr_infopacket = vrr_infopacket; new_stream->vrr_infopacket = vrr_infopacket; new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); if (new_crtc_state->freesync_vrr_info_changed) DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", new_crtc_state->base.crtc->base.id, (int)new_crtc_state->base.vrr_enabled, (int)vrr_params.state); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } static void update_stream_irq_parameters( struct amdgpu_display_manager *dm, struct dm_crtc_state *new_crtc_state) { struct dc_stream_state *new_stream = new_crtc_state->stream; struct mod_vrr_params vrr_params; struct mod_freesync_config config = new_crtc_state->freesync_config; struct amdgpu_device *adev = dm->adev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); unsigned long flags; if (!new_stream) return; /* * TODO: Determine why min/max totals and vrefresh can be 0 here. * For now it's sufficient to just guard against these conditions. */ if (!new_stream->timing.h_total || !new_stream->timing.v_total) return; spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); vrr_params = acrtc->dm_irq_params.vrr_params; if (new_crtc_state->vrr_supported && config.min_refresh_in_uhz && config.max_refresh_in_uhz) { /* * if freesync compatible mode was set, config.state will be set * in atomic check */ if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; vrr_params.state = VRR_STATE_ACTIVE_FIXED; } else { config.state = new_crtc_state->base.vrr_enabled ? VRR_STATE_ACTIVE_VARIABLE : VRR_STATE_INACTIVE; } } else { config.state = VRR_STATE_UNSUPPORTED; } mod_freesync_build_vrr_params(dm->freesync_module, new_stream, &config, &vrr_params); new_crtc_state->freesync_config = config; /* Copy state for access from DM IRQ handler */ acrtc->dm_irq_params.freesync_config = config; acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; acrtc->dm_irq_params.vrr_params = vrr_params; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); if (!old_vrr_active && new_vrr_active) { /* Transition VRR inactive -> active: * While VRR is active, we must not disable vblank irq, as a * reenable after disable would compute bogus vblank/pflip * timestamps if it likely happened inside display front-porch. * * We also need vupdate irq for the actual core vblank handling * at end of vblank. */ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", __func__, new_state->base.crtc->base.id); } else if (old_vrr_active && !new_vrr_active) { /* Transition VRR active -> inactive: * Allow vblank irq disable again for fixed refresh rate. */ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); drm_crtc_vblank_put(new_state->base.crtc); DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", __func__, new_state->base.crtc->base.id); } } static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) { struct drm_plane *plane; struct drm_plane_state *old_plane_state; int i; /* * TODO: Make this per-stream so we don't issue redundant updates for * commits with multiple streams. */ for_each_old_plane_in_state(state, plane, old_plane_state, i) if (plane->type == DRM_PLANE_TYPE_CURSOR) amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); } static inline uint32_t get_mem_type(struct drm_framebuffer *fb) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_device *dev, struct amdgpu_display_manager *dm, struct drm_crtc *pcrtc, bool wait_for_vblank) { u32 i; u64 timestamp_ns = ktime_get_ns(); struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); struct drm_crtc_state *new_pcrtc_state = drm_atomic_get_new_crtc_state(state, pcrtc); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); int planes_count = 0, vpos, hpos; unsigned long flags; u32 target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); bool cursor_update = false; bool pflip_present = false; bool dirty_rects_changed = false; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; struct dc_scaling_info scaling_infos[MAX_SURFACES]; struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_stream_update stream_update; } *bundle; bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { dm_error("Failed to allocate update bundle\n"); goto cleanup; } /* * Disable the cursor first if we're disabling all the planes. * It'll remain on the screen after the planes are re-enabled * if we don't. */ if (acrtc_state->active_planes == 0) amdgpu_dm_commit_cursors(state); /* update planes when needed */ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state; struct drm_framebuffer *fb = new_plane_state->fb; struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; bool plane_needs_flip; struct dc_plane_state *dc_plane; struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); /* Cursor plane is handled after stream updates */ if (plane->type == DRM_PLANE_TYPE_CURSOR) { if ((fb && crtc == pcrtc) || (old_plane_state->fb && old_plane_state->crtc == pcrtc)) cursor_update = true; continue; } if (!fb || !crtc || pcrtc != crtc) continue; new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state->active) continue; dc_plane = dm_new_plane_state->dc_state; if (!dc_plane) continue; bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; } amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, &bundle->scaling_infos[planes_count]); bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count]; plane_needs_flip = old_plane_state->fb && new_plane_state->fb; pflip_present = pflip_present || plane_needs_flip; if (!plane_needs_flip) { planes_count += 1; continue; } fill_dc_plane_info_and_addr( dm->adev, new_plane_state, afb->tiling_flags, &bundle->plane_infos[planes_count], &bundle->flip_addrs[planes_count].address, afb->tmz_surface, false); drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", new_plane_state->plane->index, bundle->plane_infos[planes_count].dcc.enable); bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || acrtc_state->stream->link->replay_settings.replay_feature_enabled) { fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, new_crtc_state, &bundle->flip_addrs[planes_count], &dirty_rects_changed); /* * If the dirty regions changed, PSR-SU need to be disabled temporarily * and enabled it again after dirty regions are stable to avoid video glitch. * PSR-SU will be enabled in vblank_control_worker() if user pause the video * during the PSR-SU was disabled. */ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && acrtc_attach->dm_irq_params.allow_psr_entry && #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && #endif dirty_rects_changed) { mutex_lock(&dm->dc_lock); acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = timestamp_ns; if (acrtc_state->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(acrtc_state->stream); mutex_unlock(&dm->dc_lock); } } /* * Only allow immediate flips for fast updates that don't * change memory domain, FB pitch, DCC state, rotation or * mirroring. * * dm_crtc_helper_atomic_check() only accepts async flips with * fast updates. */ if (crtc->state->async_flip && (acrtc_state->update_type != UPDATE_TYPE_FAST || get_mem_type(old_plane_state->fb) != get_mem_type(fb))) drm_warn_once(state->dev, "[PLANE:%d:%s] async flip with non-fast update\n", plane->base.id, plane->name); bundle->flip_addrs[planes_count].flip_immediate = crtc->state->async_flip && acrtc_state->update_type == UPDATE_TYPE_FAST && get_mem_type(old_plane_state->fb) == get_mem_type(fb); timestamp_ns = ktime_get_ns(); bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; bundle->surface_updates[planes_count].surface = dc_plane; if (!bundle->surface_updates[planes_count].surface) { DRM_ERROR("No surface for CRTC: id=%d\n", acrtc_attach->crtc_id); continue; } if (plane == pcrtc->primary) update_freesync_state_on_stream( dm, acrtc_state, acrtc_state->stream, dc_plane, bundle->flip_addrs[planes_count].flip_timestamp_in_us); drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", __func__, bundle->flip_addrs[planes_count].address.grph.addr.high_part, bundle->flip_addrs[planes_count].address.grph.addr.low_part); planes_count += 1; } if (pflip_present) { if (!vrr_active) { /* Use old throttling in non-vrr fixed refresh rate mode * to keep flip scheduling based on target vblank counts * working in a backwards compatible way, e.g., for * clients using the GLX_OML_sync_control extension or * DRI3/Present extension with defined target_msc. */ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); } else { /* For variable refresh rate mode only: * Get vblank of last completed flip to avoid > 1 vrr * flips per video frame by use of throttling, but allow * flip programming anywhere in the possibly large * variable vrr vblank interval for fine-grained flip * timing control and more opportunity to avoid stutter * on late submission of flips. */ spin_lock_irqsave(&pcrtc->dev->event_lock, flags); last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } target_vblank = last_flip_vblank + wait_for_vblank; /* * Wait until we're out of the vertical blank period before the one * targeted by the flip */ while ((acrtc_attach->enabled && (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 0, &vpos, &hpos, NULL, NULL, &pcrtc->hwmode) & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (int)(target_vblank - amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { usleep_range(1000, 1100); } /** * Prepare the flip event for the pageflip interrupt to handle. * * This only works in the case where we've already turned on the * appropriate hardware blocks (eg. HUBP) so in the transition case * from 0 -> n planes we have to skip a hardware generated event * and rely on sending it from software. */ if (acrtc_attach->base.state->event && acrtc_state->active_planes > 0) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); prepare_flip_isr(acrtc_attach); spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } if (acrtc_state->stream) { if (acrtc_state->freesync_vrr_info_changed) bundle->stream_update.vrr_infopacket = &acrtc_state->stream->vrr_infopacket; } } else if (cursor_update && acrtc_state->active_planes > 0 && acrtc_attach->base.state->event) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); acrtc_attach->event = acrtc_attach->base.state->event; acrtc_attach->base.state->event = NULL; spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } /* Update the planes if changed or disable if we don't have any. */ if ((planes_count || acrtc_state->active_planes == 0) && acrtc_state->stream) { /* * If PSR or idle optimizations are enabled then flush out * any pending work before hardware programming. */ if (dm->vblank_control_workqueue) flush_workqueue(dm->vblank_control_workqueue); bundle->stream_update.stream = acrtc_state->stream; if (new_pcrtc_state->mode_changed) { bundle->stream_update.src = acrtc_state->stream->src; bundle->stream_update.dst = acrtc_state->stream->dst; } if (new_pcrtc_state->color_mgmt_changed) { /* * TODO: This isn't fully correct since we've actually * already modified the stream in place. */ bundle->stream_update.gamut_remap = &acrtc_state->stream->gamut_remap_matrix; bundle->stream_update.output_csc_transform = &acrtc_state->stream->csc_color_matrix; bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func; } acrtc_state->stream->abm_level = acrtc_state->abm_level; if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) bundle->stream_update.abm_level = &acrtc_state->abm_level; mutex_lock(&dm->dc_lock); if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && acrtc_state->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(acrtc_state->stream); mutex_unlock(&dm->dc_lock); /* * If FreeSync state on the stream has changed then we need to * re-adjust the min/max bounds now that DC doesn't handle this * as part of commit. */ if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { spin_lock_irqsave(&pcrtc->dev->event_lock, flags); dc_stream_adjust_vmin_vmax( dm->dc, acrtc_state->stream, &acrtc_attach->dm_irq_params.vrr_params.adjust); spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } mutex_lock(&dm->dc_lock); update_planes_and_stream_adapter(dm->dc, acrtc_state->update_type, planes_count, acrtc_state->stream, &bundle->stream_update, bundle->surface_updates); /** * Enable or disable the interrupts on the backend. * * Most pipes are put into power gating when unused. * * When power gating is enabled on a pipe we lose the * interrupt enablement state when power gating is disabled. * * So we need to update the IRQ control state in hardware * whenever the pipe turns on (since it could be previously * power gated) or off (since some pipes can't be power gated * on some ASICs). */ if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) dm_update_pflip_irq_state(drm_to_adev(dev), acrtc_attach); if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && !acrtc_state->stream->link->psr_settings.psr_feature_enabled) amdgpu_dm_link_setup_psr(acrtc_state->stream); /* Decrement skip count when PSR is enabled and we're doing fast updates. */ if (acrtc_state->update_type == UPDATE_TYPE_FAST && acrtc_state->stream->link->psr_settings.psr_feature_enabled) { struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; if (aconn->psr_skip_count > 0) aconn->psr_skip_count--; /* Allow PSR when skip count is 0. */ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; /* * If sink supports PSR SU, there is no need to rely on * a vblank event disable request to enable PSR. PSR SU * can be enabled immediately once OS demonstrates an * adequate number of fast atomic commits to notify KMD * of update events. See `vblank_control_worker()`. */ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && acrtc_attach->dm_irq_params.allow_psr_entry && #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && #endif !acrtc_state->stream->link->psr_settings.psr_allow_active && (timestamp_ns - acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > 500000000) amdgpu_dm_psr_enable(acrtc_state->stream); } else { acrtc_attach->dm_irq_params.allow_psr_entry = false; } mutex_unlock(&dm->dc_lock); } /* * Update cursor state *after* programming all the planes. * This avoids redundant programming in the case where we're going * to be disabling a single plane - those pipes are being disabled. */ if (acrtc_state->active_planes) amdgpu_dm_commit_cursors(state); cleanup: kfree(bundle); } static void amdgpu_dm_commit_audio(struct drm_device *dev, struct drm_atomic_state *state) { struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_state *old_con_state, *new_con_state; struct drm_crtc_state *new_crtc_state; struct dm_crtc_state *new_dm_crtc_state; const struct dc_stream_status *status; int i, inst; /* Notify device removals. */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { if (old_con_state->crtc != new_con_state->crtc) { /* CRTC changes require notification. */ goto notify; } if (!new_con_state->crtc) continue; new_crtc_state = drm_atomic_get_new_crtc_state( state, new_con_state->crtc); if (!new_crtc_state) continue; if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; notify: aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); inst = aconnector->audio_inst; aconnector->audio_inst = -1; mutex_unlock(&adev->dm.audio_lock); amdgpu_dm_audio_eld_notify(adev, inst); } /* Notify audio device additions. */ for_each_new_connector_in_state(state, connector, new_con_state, i) { if (!new_con_state->crtc) continue; new_crtc_state = drm_atomic_get_new_crtc_state( state, new_con_state->crtc); if (!new_crtc_state) continue; if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); if (!new_dm_crtc_state->stream) continue; status = dc_stream_get_status(new_dm_crtc_state->stream); if (!status) continue; aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); inst = status->audio_inst; aconnector->audio_inst = inst; mutex_unlock(&adev->dm.audio_lock); amdgpu_dm_audio_eld_notify(adev, inst); } } /* * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC * @crtc_state: the DRM CRTC state * @stream_state: the DC stream state. * * Copy the mirrored transient state flags from DRM, to DC. It is used to bring * a dc_stream_state's flags in sync with a drm_crtc_state's flags. */ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, struct dc_stream_state *stream_state) { stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct dc_state *dc_state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; bool mode_set_reset_required = false; u32 i; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); if (old_crtc_state->active && (!new_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))) { manage_dm_interrupts(adev, acrtc, false); dc_stream_release(dm_old_crtc_state->stream); } } drm_atomic_helper_calc_timestamping_constants(state); /* update changed items */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); drm_dbg_state(state->dev, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", acrtc->crtc_id, new_crtc_state->enable, new_crtc_state->active, new_crtc_state->planes_changed, new_crtc_state->mode_changed, new_crtc_state->active_changed, new_crtc_state->connectors_changed); /* Disable cursor if disabling crtc */ if (old_crtc_state->active && !new_crtc_state->active) { struct dc_cursor_position position; memset(&position, 0, sizeof(position)); mutex_lock(&dm->dc_lock); dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); mutex_unlock(&dm->dc_lock); } /* Copy all transient state flags into dc state */ if (dm_new_crtc_state->stream) { amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, dm_new_crtc_state->stream); } /* handles headless hotplug case, updating new_state and * aconnector as needed */ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); if (!dm_new_crtc_state->stream) { /* * this could happen because of issues with * userspace notifications delivery. * In this case userspace tries to set mode on * display which is disconnected in fact. * dc_sink is NULL in this case on aconnector. * We expect reset mode will come soon. * * This can also happen when unplug is done * during resume sequence ended * * In this case, we want to pretend we still * have a sink to keep the pipe running so that * hw state is consistent with the sw state */ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", __func__, acrtc->base.base.id); continue; } if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); pm_runtime_get_noresume(dev->dev); acrtc->enabled = true; acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; } else if (modereset_required(new_crtc_state)) { DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); /* i.e. reset mode */ if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); mode_set_reset_required = true; } } /* for_each_crtc_in_state() */ /* if there mode set or reset, disable eDP PSR */ if (mode_set_reset_required) { if (dm->vblank_control_workqueue) flush_workqueue(dm->vblank_control_workqueue); amdgpu_dm_psr_disable_all(dm); } dm_enable_per_frame_crtc_master_sync(dc_state); mutex_lock(&dm->dc_lock); WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); /* Allow idle optimization when vblank count is 0 for display off */ if (dm->active_vblank_irq_count == 0) dc_allow_idle_optimizations(dm->dc, true); mutex_unlock(&dm->dc_lock); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream != NULL) { const struct dc_stream_status *status = dc_stream_get_status(dm_new_crtc_state->stream); if (!status) status = dc_stream_get_status_from_state(dc_state, dm_new_crtc_state->stream); if (!status) DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); else acrtc->otg_inst = status->primary_otg_inst; } } } /** * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. * @state: The atomic state to commit * * This will tell DC to commit the constructed DC state from atomic_check, * programming the hardware. Any failures here implies a hardware failure, since * atomic check should have filtered anything non-kosher. */ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; struct dm_atomic_state *dm_state; struct dc_state *dc_state = NULL; u32 i, j; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; unsigned long flags; bool wait_for_vblank = true; struct drm_connector *connector; struct drm_connector_state *old_con_state, *new_con_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; int crtc_disable_count = 0; trace_amdgpu_dm_atomic_commit_tail_begin(state); drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); dm_state = dm_atomic_get_new_state(state); if (dm_state && dm_state->context) { dc_state = dm_state->context; amdgpu_dm_commit_streams(state, dc_state); } for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if (!adev->dm.hdcp_workqueue) continue; pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); if (!connector) continue; pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", connector->index, connector->status, connector->dpms); pr_debug("[HDCP_DM] state protection old: %x new: %x\n", old_con_state->content_protection, new_con_state->content_protection); if (aconnector->dc_sink) { if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", aconnector->dc_sink->edid_caps.display_name); } } new_crtc_state = NULL; old_crtc_state = NULL; if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); } if (old_crtc_state) pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", old_crtc_state->enable, old_crtc_state->active, old_crtc_state->mode_changed, old_crtc_state->active_changed, old_crtc_state->connectors_changed); if (new_crtc_state) pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", new_crtc_state->enable, new_crtc_state->active, new_crtc_state->mode_changed, new_crtc_state->active_changed, new_crtc_state->connectors_changed); } for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if (!adev->dm.hdcp_workqueue) continue; new_crtc_state = NULL; old_crtc_state = NULL; if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); } dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; dm_new_con_state->update_hdcp = true; continue; } if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) { /* when display is unplugged from mst hub, connctor will * be destroyed within dm_dp_mst_connector_destroy. connector * hdcp perperties, like type, undesired, desired, enabled, * will be lost. So, save hdcp properties into hdcp_work within * amdgpu_dm_atomic_commit_tail. if the same display is * plugged back with same display index, its hdcp properties * will be retrieved from hdcp_work within dm_dp_mst_get_modes */ bool enable_encryption = false; if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) enable_encryption = true; if (aconnector->dc_link && aconnector->dc_sink && aconnector->dc_link->type == dc_connection_mst_branch) { struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index]; hdcp_w->hdcp_content_type[connector->index] = new_con_state->hdcp_content_type; hdcp_w->content_protection[connector->index] = new_con_state->content_protection; } if (new_crtc_state && new_crtc_state->mode_changed && new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) enable_encryption = true; DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); hdcp_update_display( adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, new_con_state->hdcp_content_type, enable_encryption); } } /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct dc_surface_update *dummy_updates; struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; bool abm_changed, hdr_changed, scaling_changed; memset(&stream_update, 0, sizeof(stream_update)); if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); } /* Skip any modesets/resets */ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); scaling_changed = is_scaling_state_different(dm_new_con_state, dm_old_con_state); abm_changed = dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level; hdr_changed = !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); if (!scaling_changed && !abm_changed && !hdr_changed) continue; stream_update.stream = dm_new_crtc_state->stream; if (scaling_changed) { update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, dm_new_con_state, dm_new_crtc_state->stream); stream_update.src = dm_new_crtc_state->stream->src; stream_update.dst = dm_new_crtc_state->stream->dst; } if (abm_changed) { dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; stream_update.abm_level = &dm_new_crtc_state->abm_level; } if (hdr_changed) { fill_hdr_info_packet(new_con_state, &hdr_packet); stream_update.hdr_static_metadata = &hdr_packet; } status = dc_stream_get_status(dm_new_crtc_state->stream); if (WARN_ON(!status)) continue; WARN_ON(!status->plane_count); /* * TODO: DC refuses to perform stream updates without a dc_surface_update. * Here we create an empty update on each plane. * To fix this, DC should permit updating only stream properties. */ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; mutex_lock(&dm->dc_lock); dc_update_planes_and_stream(dm->dc, dummy_updates, status->plane_count, dm_new_crtc_state->stream, &stream_update); mutex_unlock(&dm->dc_lock); kfree(dummy_updates); } /** * Enable interrupts for CRTCs that are newly enabled or went through * a modeset. It was intentionally deferred until after the front end * state was modified to wait until the OTG was on and so the IRQ * handlers didn't access stale or invalid state. */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); #ifdef CONFIG_DEBUG_FS enum amdgpu_dm_pipe_crc_source cur_crc_src; #endif /* Count number of newly disabled CRTCs for dropping PM refs later. */ if (old_crtc_state->active && !new_crtc_state->active) crtc_disable_count++; dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); /* For freesync config update on crtc state and params for irq */ update_stream_irq_parameters(dm, dm_new_crtc_state); #ifdef CONFIG_DEBUG_FS spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); cur_crc_src = acrtc->dm_irq_params.crc_src; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); #endif if (new_crtc_state->active && (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))) { dc_stream_retain(dm_new_crtc_state->stream); acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; manage_dm_interrupts(adev, acrtc, true); } /* Handle vrr on->off / off->on transitions */ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); #ifdef CONFIG_DEBUG_FS if (new_crtc_state->active && (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))) { /** * Frontend may have changed so reapply the CRC capture * settings for the stream. */ if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (amdgpu_dm_crc_window_is_activated(crtc)) { spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); acrtc->dm_irq_params.window_param.update_win = true; /** * It takes 2 frames for HW to stably generate CRC when * resuming from suspend, so we set skip_frame_cnt 2. */ acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } #endif if (amdgpu_dm_crtc_configure_crc_source( crtc, dm_new_crtc_state, cur_crc_src)) DRM_DEBUG_DRIVER("Failed to configure crc source"); } } #endif } for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) if (new_crtc_state->async_flip) wait_for_vblank = false; /* update planes when needed per crtc*/ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); } /* Update audio instances for each connector. */ amdgpu_dm_commit_audio(dev, state); /* restore the backlight level */ for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i] && (dm->actual_brightness[i] != dm->brightness[i])) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } /* * send vblank event on all events not handled in flip and * mark consumed event for drm_atomic_helper_commit_hw_done */ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->event) drm_send_event_locked(dev, &new_crtc_state->event->base); new_crtc_state->event = NULL; } spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); /* Signal HW programming completion */ drm_atomic_helper_commit_hw_done(state); if (wait_for_vblank) drm_atomic_helper_wait_for_flip_done(dev, state); drm_atomic_helper_cleanup_planes(dev, state); /* Don't free the memory if we are hitting this as part of suspend. * This way we don't free any memory during suspend; see * amdgpu_bo_free_kernel(). The memory will be freed in the first * non-suspend modeset or when the driver is torn down. */ if (!adev->in_suspend) { /* return the stolen vga memory back to VRAM */ if (!adev->mman.keep_stolen_vga_memory) amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); } /* * Finally, drop a runtime PM reference for each newly disabled CRTC, * so we can put the GPU into runtime suspend if we're not driving any * displays anymore */ for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); } static int dm_force_atomic_commit(struct drm_connector *connector) { int ret = 0; struct drm_device *ddev = connector->dev; struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); struct drm_plane *plane = disconnected_acrtc->base.primary; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; struct drm_plane_state *plane_state; if (!state) return -ENOMEM; state->acquire_ctx = ddev->mode_config.acquire_ctx; /* Construct an atomic state to restore previous display setting */ /* * Attach connectors to drm_atomic_state */ conn_state = drm_atomic_get_connector_state(state, connector); ret = PTR_ERR_OR_ZERO(conn_state); if (ret) goto out; /* Attach crtc to drm_atomic_state*/ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); ret = PTR_ERR_OR_ZERO(crtc_state); if (ret) goto out; /* force a restore */ crtc_state->mode_changed = true; /* Attach plane to drm_atomic_state */ plane_state = drm_atomic_get_plane_state(state, plane); ret = PTR_ERR_OR_ZERO(plane_state); if (ret) goto out; /* Call commit internally with the state we just constructed */ ret = drm_atomic_commit(state); out: drm_atomic_state_put(state); if (ret) DRM_ERROR("Restoring old state failed with %i\n", ret); return ret; } /* * This function handles all cases when set mode does not come upon hotplug. * This includes when a display is unplugged then plugged back into the * same port and when running without usermode desktop manager supprot */ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_crtc *disconnected_acrtc; struct dm_crtc_state *acrtc_state; if (!aconnector->dc_sink || !connector->state || !connector->encoder) return; disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); if (!disconnected_acrtc) return; acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); if (!acrtc_state->stream) return; /* * If the previous sink is not released and different from the current, * we deduce we are in a state where we can not rely on usermode call * to turn on the display, so we do it here */ if (acrtc_state->stream->sink != aconnector->dc_sink) dm_force_atomic_commit(&aconnector->base); } /* * Grabs all modesetting locks to serialize against any blocking commits, * Waits for completion of all non blocking commits. */ static int do_aquire_global_lock(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_commit *commit; long ret; /* * Adding all modeset locks to aquire_ctx will * ensure that when the framework release it the * extra locks we are locking here will get released to */ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); if (ret) return ret; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { spin_lock(&crtc->commit_lock); commit = list_first_entry_or_null(&crtc->commit_list, struct drm_crtc_commit, commit_entry); if (commit) drm_crtc_commit_get(commit); spin_unlock(&crtc->commit_lock); if (!commit) continue; /* * Make sure all pending HW programming completed and * page flips done */ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); if (ret > 0) ret = wait_for_completion_interruptible_timeout( &commit->flip_done, 10*HZ); if (ret == 0) DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", crtc->base.id, crtc->name); drm_crtc_commit_put(commit); } return ret < 0 ? ret : 0; } static void get_freesync_config_for_crtc( struct dm_crtc_state *new_crtc_state, struct dm_connector_state *new_con_state) { struct mod_freesync_config config = {0}; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); struct drm_display_mode *mode = &new_crtc_state->base.mode; int vrefresh = drm_mode_vrefresh(mode); bool fs_vid_mode = false; new_crtc_state->vrr_supported = new_con_state->freesync_capable && vrefresh >= aconnector->min_vfreq && vrefresh <= aconnector->max_vfreq; if (new_crtc_state->vrr_supported) { new_crtc_state->stream->ignore_msa_timing_param = true; fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; config.vsif_supported = true; config.btr = true; if (fs_vid_mode) { config.state = VRR_STATE_ACTIVE_FIXED; config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; goto out; } else if (new_crtc_state->base.vrr_enabled) { config.state = VRR_STATE_ACTIVE_VARIABLE; } else { config.state = VRR_STATE_INACTIVE; } } out: new_crtc_state->freesync_config = config; } static void reset_freesync_config_for_crtc( struct dm_crtc_state *new_crtc_state) { new_crtc_state->vrr_supported = false; memset(&new_crtc_state->vrr_infopacket, 0, sizeof(new_crtc_state->vrr_infopacket)); } static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state) { const struct drm_display_mode *old_mode, *new_mode; if (!old_crtc_state || !new_crtc_state) return false; old_mode = &old_crtc_state->mode; new_mode = &new_crtc_state->mode; if (old_mode->clock == new_mode->clock && old_mode->hdisplay == new_mode->hdisplay && old_mode->vdisplay == new_mode->vdisplay && old_mode->htotal == new_mode->htotal && old_mode->vtotal != new_mode->vtotal && old_mode->hsync_start == new_mode->hsync_start && old_mode->vsync_start != new_mode->vsync_start && old_mode->hsync_end == new_mode->hsync_end && old_mode->vsync_end != new_mode->vsync_end && old_mode->hskew == new_mode->hskew && old_mode->vscan == new_mode->vscan && (old_mode->vsync_end - old_mode->vsync_start) == (new_mode->vsync_end - new_mode->vsync_start)) return true; return false; } static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { u64 num, den, res; struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; den = (unsigned long long)new_crtc_state->mode.htotal * (unsigned long long)new_crtc_state->mode.vtotal; res = div_u64(num, den); dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; } static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state, bool enable, bool *lock_and_validation_needed) { struct dm_atomic_state *dm_state = NULL; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct dc_stream_state *new_stream; int ret = 0; /* * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set * update changed items */ struct amdgpu_crtc *acrtc = NULL; struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; new_stream = NULL; dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); /* TODO This hack should go away */ if (aconnector && enable) { /* Make sure fake sink is created in plug-in scenario */ drm_new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); drm_old_conn_state = drm_atomic_get_old_connector_state(state, &aconnector->base); if (IS_ERR(drm_new_conn_state)) { ret = PTR_ERR_OR_ZERO(drm_new_conn_state); goto fail; } dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto skip_modeset; new_stream = create_validate_stream_for_sink(aconnector, &new_crtc_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); /* * we can have no stream on ACTION_SET if a display * was disconnected during S3, in this case it is not an * error, the OS will be updated after detection, and * will do the right thing on next atomic commit */ if (!new_stream) { DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", __func__, acrtc->base.base.id); ret = -ENOMEM; goto fail; } /* * TODO: Check VSDB bits to decide whether this should * be enabled or not. */ new_stream->triggered_crtc_reset.enabled = dm->force_timing_sync; dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; ret = fill_hdr_info_packet(drm_new_conn_state, &new_stream->hdr_static_metadata); if (ret) goto fail; /* * If we already removed the old stream from the context * (and set the new stream to NULL) then we can't reuse * the old stream even if the stream and scaling are unchanged. * We'll hit the BUG_ON and black screen. * * TODO: Refactor this function to allow this check to work * in all conditions. */ if (dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; if (dm_new_crtc_state->stream && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { new_crtc_state->mode_changed = false; DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", new_crtc_state->mode_changed); } } /* mode_changed flag may get updated above, need to check again */ if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto skip_modeset; drm_dbg_state(state->dev, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", acrtc->crtc_id, new_crtc_state->enable, new_crtc_state->active, new_crtc_state->planes_changed, new_crtc_state->mode_changed, new_crtc_state->active_changed, new_crtc_state->connectors_changed); /* Remove stream for any changed/disabled CRTC */ if (!enable) { if (!dm_old_crtc_state->stream) goto skip_modeset; /* Unset freesync video if it was active before */ if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; } /* Now check if we should set freesync video mode */ if (dm_new_crtc_state->stream && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; DRM_DEBUG_DRIVER( "Mode change not required for front porch change, setting mode_changed to %d", new_crtc_state->mode_changed); set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; } else if (aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; high_mode = get_highest_refresh_rate_mode(aconnector, false); if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) set_freesync_fixed_config(dm_new_crtc_state); } ret = dm_atomic_get_state(state, &dm_state); if (ret) goto fail; DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", crtc->base.id); /* i.e. reset mode */ if (dc_remove_stream_from_ctx( dm->dc, dm_state->context, dm_old_crtc_state->stream) != DC_OK) { ret = -EINVAL; goto fail; } dc_stream_release(dm_old_crtc_state->stream); dm_new_crtc_state->stream = NULL; reset_freesync_config_for_crtc(dm_new_crtc_state); *lock_and_validation_needed = true; } else {/* Add stream for any updated/enabled CRTC */ /* * Quick fix to prevent NULL pointer on new_stream when * added MST connectors not found in existing crtc_state in the chained mode * TODO: need to dig out the root cause of that */ if (!aconnector) goto skip_modeset; if (modereset_required(new_crtc_state)) goto skip_modeset; if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, dm_old_crtc_state->stream)) { WARN_ON(dm_new_crtc_state->stream); ret = dm_atomic_get_state(state, &dm_state); if (ret) goto fail; dm_new_crtc_state->stream = new_stream; dc_stream_retain(new_stream); DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", crtc->base.id); if (dc_add_stream_to_ctx( dm->dc, dm_state->context, dm_new_crtc_state->stream) != DC_OK) { ret = -EINVAL; goto fail; } *lock_and_validation_needed = true; } } skip_modeset: /* Release extra reference */ if (new_stream) dc_stream_release(new_stream); /* * We want to do dc stream updates that do not require a * full modeset below. */ if (!(enable && aconnector && new_crtc_state->active)) return 0; /* * Given above conditions, the dc state cannot be NULL because: * 1. We're in the process of enabling CRTCs (just been added * to the dc context, or already is on the context) * 2. Has a valid connector attached, and * 3. Is currently active and enabled. * => The dc stream state currently exists. */ BUG_ON(dm_new_crtc_state->stream == NULL); /* Scaling or underscan settings */ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || drm_atomic_crtc_needs_modeset(new_crtc_state)) update_stream_scaling_settings( &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); /* ABM settings */ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; /* * Color management settings. We also update color properties * when a modeset is needed, to ensure it gets reprogrammed. */ if (dm_new_crtc_state->base.color_mgmt_changed || drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); if (ret) goto fail; } /* Update Freesync settings. */ get_freesync_config_for_crtc(dm_new_crtc_state, dm_new_conn_state); return ret; fail: if (new_stream) dc_stream_release(new_stream); return ret; } static bool should_reset_plane(struct drm_atomic_state *state, struct drm_plane *plane, struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state) { struct drm_plane *other; struct drm_plane_state *old_other_state, *new_other_state; struct drm_crtc_state *new_crtc_state; int i; /* * TODO: Remove this hack once the checks below are sufficient * enough to determine when we need to reset all the planes on * the stream. */ if (state->allow_modeset) return true; /* Exit early if we know that we're adding or removing the plane. */ if (old_plane_state->crtc != new_plane_state->crtc) return true; /* old crtc == new_crtc == NULL, plane not in context. */ if (!new_plane_state->crtc) return false; new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); if (!new_crtc_state) return true; /* CRTC Degamma changes currently require us to recreate planes. */ if (new_crtc_state->color_mgmt_changed) return true; if (drm_atomic_crtc_needs_modeset(new_crtc_state)) return true; /* * If there are any new primary or overlay planes being added or * removed then the z-order can potentially change. To ensure * correct z-order and pipe acquisition the current DC architecture * requires us to remove and recreate all existing planes. * * TODO: Come up with a more elegant solution for this. */ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { struct amdgpu_framebuffer *old_afb, *new_afb; if (other->type == DRM_PLANE_TYPE_CURSOR) continue; if (old_other_state->crtc != new_plane_state->crtc && new_other_state->crtc != new_plane_state->crtc) continue; if (old_other_state->crtc != new_other_state->crtc) return true; /* Src/dst size and scaling updates. */ if (old_other_state->src_w != new_other_state->src_w || old_other_state->src_h != new_other_state->src_h || old_other_state->crtc_w != new_other_state->crtc_w || old_other_state->crtc_h != new_other_state->crtc_h) return true; /* Rotation / mirroring updates. */ if (old_other_state->rotation != new_other_state->rotation) return true; /* Blending updates. */ if (old_other_state->pixel_blend_mode != new_other_state->pixel_blend_mode) return true; /* Alpha updates. */ if (old_other_state->alpha != new_other_state->alpha) return true; /* Colorspace changes. */ if (old_other_state->color_range != new_other_state->color_range || old_other_state->color_encoding != new_other_state->color_encoding) return true; /* Framebuffer checks fall at the end. */ if (!old_other_state->fb || !new_other_state->fb) continue; /* Pixel format changes can require bandwidth updates. */ if (old_other_state->fb->format != new_other_state->fb->format) return true; old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; /* Tiling and DCC changes also require bandwidth updates. */ if (old_afb->tiling_flags != new_afb->tiling_flags || old_afb->base.modifier != new_afb->base.modifier) return true; } return false; } static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, struct drm_plane_state *new_plane_state, struct drm_framebuffer *fb) { struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); unsigned int pitch; bool linear; if (fb->width > new_acrtc->max_cursor_width || fb->height > new_acrtc->max_cursor_height) { DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", new_plane_state->fb->width, new_plane_state->fb->height); return -EINVAL; } if (new_plane_state->src_w != fb->width << 16 || new_plane_state->src_h != fb->height << 16) { DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); return -EINVAL; } /* Pitch in pixels */ pitch = fb->pitches[0] / fb->format->cpp[0]; if (fb->width != pitch) { DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", fb->width, pitch); return -EINVAL; } switch (pitch) { case 64: case 128: case 256: /* FB pitch is supported by cursor plane */ break; default: DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); return -EINVAL; } /* Core DRM takes care of checking FB modifiers, so we only need to * check tiling flags when the FB doesn't have a modifier. */ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { if (adev->family < AMDGPU_FAMILY_AI) { linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; } else { linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; } if (!linear) { DRM_DEBUG_ATOMIC("Cursor FB not linear"); return -EINVAL; } } return 0; } static int dm_update_plane_state(struct dc *dc, struct drm_atomic_state *state, struct drm_plane *plane, struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state, bool enable, bool *lock_and_validation_needed, bool *is_top_most_overlay) { struct dm_atomic_state *dm_state = NULL; struct drm_crtc *new_plane_crtc, *old_plane_crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; struct amdgpu_crtc *new_acrtc; bool needs_reset; int ret = 0; new_plane_crtc = new_plane_state->crtc; old_plane_crtc = old_plane_state->crtc; dm_new_plane_state = to_dm_plane_state(new_plane_state); dm_old_plane_state = to_dm_plane_state(old_plane_state); if (plane->type == DRM_PLANE_TYPE_CURSOR) { if (!enable || !new_plane_crtc || drm_atomic_plane_disabling(plane->state, new_plane_state)) return 0; new_acrtc = to_amdgpu_crtc(new_plane_crtc); if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); return -EINVAL; } if (new_plane_state->fb) { ret = dm_check_cursor_fb(new_acrtc, new_plane_state, new_plane_state->fb); if (ret) return ret; } return 0; } needs_reset = should_reset_plane(state, plane, old_plane_state, new_plane_state); /* Remove any changed/removed planes */ if (!enable) { if (!needs_reset) return 0; if (!old_plane_crtc) return 0; old_crtc_state = drm_atomic_get_old_crtc_state( state, old_plane_crtc); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); if (!dm_old_crtc_state->stream) return 0; DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", plane->base.id, old_plane_crtc->base.id); ret = dm_atomic_get_state(state, &dm_state); if (ret) return ret; if (!dc_remove_plane_from_context( dc, dm_old_crtc_state->stream, dm_old_plane_state->dc_state, dm_state->context)) { return -EINVAL; } if (dm_old_plane_state->dc_state) dc_plane_state_release(dm_old_plane_state->dc_state); dm_new_plane_state->dc_state = NULL; *lock_and_validation_needed = true; } else { /* Add new planes */ struct dc_plane_state *dc_new_plane_state; if (drm_atomic_plane_disabling(plane->state, new_plane_state)) return 0; if (!new_plane_crtc) return 0; new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (!dm_new_crtc_state->stream) return 0; if (!needs_reset) return 0; ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); if (ret) return ret; WARN_ON(dm_new_plane_state->dc_state); dc_new_plane_state = dc_create_plane_state(dc); if (!dc_new_plane_state) return -ENOMEM; /* Block top most plane from being a video plane */ if (plane->type == DRM_PLANE_TYPE_OVERLAY) { if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) return -EINVAL; *is_top_most_overlay = false; } DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", plane->base.id, new_plane_crtc->base.id); ret = fill_dc_plane_attributes( drm_to_adev(new_plane_crtc->dev), dc_new_plane_state, new_plane_state, new_crtc_state); if (ret) { dc_plane_state_release(dc_new_plane_state); return ret; } ret = dm_atomic_get_state(state, &dm_state); if (ret) { dc_plane_state_release(dc_new_plane_state); return ret; } /* * Any atomic check errors that occur after this will * not need a release. The plane state will be attached * to the stream, and therefore part of the atomic * state. It'll be released when the atomic state is * cleaned. */ if (!dc_add_plane_to_context( dc, dm_new_crtc_state->stream, dc_new_plane_state, dm_state->context)) { dc_plane_state_release(dc_new_plane_state); return -EINVAL; } dm_new_plane_state->dc_state = dc_new_plane_state; dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); /* Tell DC to do a full surface update every time there * is a plane change. Inefficient, but works for now. */ dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; *lock_and_validation_needed = true; } return ret; } static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, int *src_w, int *src_h) { switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { case DRM_MODE_ROTATE_90: case DRM_MODE_ROTATE_270: *src_w = plane_state->src_h >> 16; *src_h = plane_state->src_w >> 16; break; case DRM_MODE_ROTATE_0: case DRM_MODE_ROTATE_180: default: *src_w = plane_state->src_w >> 16; *src_h = plane_state->src_h >> 16; break; } } static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) { struct drm_plane *cursor = crtc->cursor, *underlying; struct drm_plane_state *new_cursor_state, *new_underlying_state; int i; int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; int cursor_src_w, cursor_src_h; int underlying_src_w, underlying_src_h; /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and * positioning from the underlying pipe. Check the cursor plane's * blending properties match the underlying planes'. */ new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); if (!new_cursor_state || !new_cursor_state->fb) return 0; dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { /* Narrow down to non-cursor planes on the same CRTC as the cursor */ if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) continue; /* Ignore disabled planes */ if (!new_underlying_state->fb) continue; dm_get_oriented_plane_size(new_underlying_state, &underlying_src_w, &underlying_src_h); underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; if (cursor_scale_w != underlying_scale_w || cursor_scale_h != underlying_scale_h) { drm_dbg_atomic(crtc->dev, "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", cursor->base.id, cursor->name, underlying->base.id, underlying->name); return -EINVAL; } /* If this plane covers the whole CRTC, no need to check planes underneath */ if (new_underlying_state->crtc_x <= 0 && new_underlying_state->crtc_y <= 0 && new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) break; } return 0; } static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_connector *connector; struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { if (!conn_state->crtc) conn_state = old_conn_state; if (conn_state->crtc != crtc) continue; aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->mst_output_port || !aconnector->mst_root) aconnector = NULL; else break; } if (!aconnector) return 0; return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); } /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. * * @dev: The DRM device * @state: The atomic state to commit * * Validate that the given atomic state is programmable by DC into hardware. * This involves constructing a &struct dc_state reflecting the new hardware * state we wish to commit, then querying DC to see if it is programmable. It's * important not to modify the existing DC state. Otherwise, atomic_check * may unexpectedly commit hardware changes. * * When validating the DC state, it's important that the right locks are * acquired. For full updates case which removes/adds/updates streams on one * CRTC while flipping on another CRTC, acquiring global lock will guarantee * that any such full update commit will wait for completion of any outstanding * flip using DRMs synchronization events. * * Note that DM adds the affected connectors for all CRTCs in state, when that * might not seem necessary. This is because DC stream creation requires the * DC sink, which is tied to the DRM connector state. Cleaning this up should * be possible but non-trivial - a possible TODO item. * * Return: -Error code if validation failed. */ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { struct amdgpu_device *adev = drm_to_adev(dev); struct dm_atomic_state *dm_state = NULL; struct dc *dc = adev->dm.dc; struct drm_connector *connector; struct drm_connector_state *old_con_state, *new_con_state; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; enum dc_status status; int ret, i; bool lock_and_validation_needed = false; bool is_top_most_overlay = true; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct drm_dp_mst_topology_mgr *mgr; struct drm_dp_mst_topology_state *mst_state; struct dsc_mst_fairness_vars vars[MAX_PIPES]; trace_amdgpu_dm_atomic_check_begin(state); ret = drm_atomic_helper_check_modeset(dev, state); if (ret) { DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); goto fail; } /* Check connector changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); /* Skip connectors that are disabled or part of modeset already. */ if (!new_con_state->crtc) continue; new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); if (IS_ERR(new_crtc_state)) { DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); ret = PTR_ERR(new_crtc_state); goto fail; } if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || dm_old_con_state->scaling != dm_new_con_state->scaling) new_crtc_state->connectors_changed = true; } if (dc_resource_is_dsc_encoding_supported(dc)) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); if (ret) { DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); goto fail; } } } } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed && old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && dm_old_crtc_state->dsc_force_changed == false) continue; ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); if (ret) { DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); goto fail; } if (!new_crtc_state->enable) continue; ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) { DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); goto fail; } ret = drm_atomic_add_affected_planes(state, crtc); if (ret) { DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); goto fail; } if (dm_old_crtc_state->dsc_force_changed) new_crtc_state->mode_changed = true; } /* * Add all primary and overlay planes on the CRTC to the state * whenever a plane is enabled to maintain correct z-ordering * and to enable fast surface updates. */ drm_for_each_crtc(crtc, dev) { bool modified = false; for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; if (new_plane_state->crtc == crtc || old_plane_state->crtc == crtc) { modified = true; break; } } if (!modified) continue; drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; new_plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(new_plane_state)) { ret = PTR_ERR(new_plane_state); DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); goto fail; } } } /* * DC consults the zpos (layer_index in DC terminology) to determine the * hw plane on which to enable the hw cursor (see * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in * atomic state, so call drm helper to normalize zpos. */ ret = drm_atomic_normalize_zpos(dev, state); if (ret) { drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); goto fail; } /* Remove exiting planes if they are modified */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { if (old_plane_state->fb && new_plane_state->fb && get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb)) lock_and_validation_needed = true; ret = dm_update_plane_state(dc, state, plane, old_plane_state, new_plane_state, false, &lock_and_validation_needed, &is_top_most_overlay); if (ret) { DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; } } /* Disable all crtcs which require disable */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ret = dm_update_crtc_state(&adev->dm, state, crtc, old_crtc_state, new_crtc_state, false, &lock_and_validation_needed); if (ret) { DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); goto fail; } } /* Enable all crtcs which require enable */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ret = dm_update_crtc_state(&adev->dm, state, crtc, old_crtc_state, new_crtc_state, true, &lock_and_validation_needed); if (ret) { DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); goto fail; } } /* Add new/modified planes */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { ret = dm_update_plane_state(dc, state, plane, old_plane_state, new_plane_state, true, &lock_and_validation_needed, &is_top_most_overlay); if (ret) { DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; } } if (dc_resource_is_dsc_encoding_supported(dc)) { ret = pre_validate_dsc(state, &dm_state, vars); if (ret != 0) goto fail; } /* Run this here since we want to validate the streams we created */ ret = drm_atomic_helper_check_planes(dev, state); if (ret) { DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); goto fail; } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->mpo_requested) DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); } /* Check cursor planes scaling */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); if (ret) { DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); goto fail; } } if (state->legacy_cursor_update) { /* * This is a fast cursor update coming from the plane update * helper, check if it can be done asynchronously for better * performance. */ state->async_update = !drm_atomic_helper_async_check(dev, state); /* * Skip the remaining global validation if this is an async * update. Cursor updates can be done without affecting * state or bandwidth calcs and this avoids the performance * penalty of locking the private state object and * allocating a new dc_state. */ if (state->async_update) return 0; } /* Check scaling and underscan changes*/ /* TODO Removed scaling changes validation due to inability to commit * new stream into context w\o causing full reset. Need to * decide how to handle. */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); /* Skip any modesets/resets */ if (!acrtc || drm_atomic_crtc_needs_modeset( drm_atomic_get_new_crtc_state(state, &acrtc->base))) continue; /* Skip any thing not scale or underscan changes */ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) continue; lock_and_validation_needed = true; } /* set the slot info for each mst_state based on the link encoding format */ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; u8 link_coding_cap; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { if (connector->index == mst_state->mgr->conn_base_id) { aconnector = to_amdgpu_dm_connector(connector); link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); drm_dp_mst_update_slots(mst_state, link_coding_cap); break; } } drm_connector_list_iter_end(&iter); } /** * Streams and planes are reset when there are changes that affect * bandwidth. Anything that affects bandwidth needs to go through * DC global validation to ensure that the configuration can be applied * to hardware. * * We have to currently stall out here in atomic_check for outstanding * commits to finish in this case because our IRQ handlers reference * DRM state directly - we can end up disabling interrupts too early * if we don't. * * TODO: Remove this stall and drop DM state private objects. */ if (lock_and_validation_needed) { ret = dm_atomic_get_state(state, &dm_state); if (ret) { DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); goto fail; } ret = do_aquire_global_lock(dev, state); if (ret) { DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); goto fail; } ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); if (ret) { DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); ret = -EINVAL; goto fail; } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); if (ret) { DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); goto fail; } /* * Perform validation of MST topology in the state: * We need to perform MST atomic check before calling * dc_validate_global_state(), or there is a chance * to get stuck in an infinite loop and hang eventually. */ ret = drm_dp_mst_atomic_check(state); if (ret) { DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); goto fail; } status = dc_validate_global_state(dc, dm_state->context, true); if (status != DC_OK) { DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; } } else { /* * The commit is a fast update. Fast updates shouldn't change * the DC context, affect global validation, and can have their * commit work done in parallel with other commits not touching * the same resource. If we have a new DC context as part of * the DM atomic state from validation we need to free it and * retain the existing one instead. * * Furthermore, since the DM atomic state only contains the DC * context and can safely be annulled, we can free the state * and clear the associated private object now to free * some memory and avoid a possible use-after-free later. */ for (i = 0; i < state->num_private_objs; i++) { struct drm_private_obj *obj = state->private_objs[i].ptr; if (obj->funcs == adev->dm.atomic_obj.funcs) { int j = state->num_private_objs-1; dm_atomic_destroy_state(obj, state->private_objs[i].state); /* If i is not at the end of the array then the * last element needs to be moved to where i was * before the array can safely be truncated. */ if (i != j) state->private_objs[i] = state->private_objs[j]; state->private_objs[j].ptr = NULL; state->private_objs[j].state = NULL; state->private_objs[j].old_state = NULL; state->private_objs[j].new_state = NULL; state->num_private_objs = j; break; } } } /* Store the overall update type for use later in atomic check. */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); /* * Only allow async flips for fast updates that don't change * the FB pitch, the DCC state, rotation, etc. */ if (new_crtc_state->async_flip && lock_and_validation_needed) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] async flips are only supported for fast updates\n", crtc->base.id, crtc->name); ret = -EINVAL; goto fail; } dm_new_crtc_state->update_type = lock_and_validation_needed ? UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; } /* Must be success */ WARN_ON(ret); trace_amdgpu_dm_atomic_check_finish(state, ret); return ret; fail: if (ret == -EDEADLK) DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); else DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret); trace_amdgpu_dm_atomic_check_finish(state, ret); return ret; } static bool is_dp_capable_without_timing_msa(struct dc *dc, struct amdgpu_dm_connector *amdgpu_dm_connector) { u8 dpcd_data; bool capable = false; if (amdgpu_dm_connector->dc_link && dm_helpers_dp_read_dpcd( NULL, amdgpu_dm_connector->dc_link, DP_DOWN_STREAM_PORT_COUNT, &dpcd_data, sizeof(dpcd_data))) { capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; } return capable; } static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, unsigned int offset, unsigned int total_length, u8 *data, unsigned int length, struct amdgpu_hdmi_vsdb_info *vsdb) { bool res; union dmub_rb_cmd cmd; struct dmub_cmd_send_edid_cea *input; struct dmub_cmd_edid_cea_output *output; if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) return false; memset(&cmd, 0, sizeof(cmd)); input = &cmd.edid_cea.data.input; cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; cmd.edid_cea.header.sub_type = 0; cmd.edid_cea.header.payload_bytes = sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); input->offset = offset; input->length = length; input->cea_total_length = total_length; memcpy(input->payload, data, length); res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { DRM_ERROR("EDID CEA parser failed\n"); return false; } output = &cmd.edid_cea.data.output; if (output->type == DMUB_CMD__EDID_CEA_ACK) { if (!output->ack.success) { DRM_ERROR("EDID CEA ack failed at offset %d\n", output->ack.offset); } } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { if (!output->amd_vsdb.vsdb_found) return false; vsdb->freesync_supported = output->amd_vsdb.freesync_supported; vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; } else { DRM_WARN("Unknown EDID CEA parser results\n"); return false; } return true; } static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { int i; /* send extension block to DMCU for parsing */ for (i = 0; i < len; i += 8) { bool res; int offset; /* send 8 bytes a time */ if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) return false; if (i+8 == len) { /* EDID block sent completed, expect result */ int version, min_rate, max_rate; res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); if (res) { /* amd vsdb found */ vsdb_info->freesync_supported = 1; vsdb_info->amd_vsdb_version = version; vsdb_info->min_refresh_rate_hz = min_rate; vsdb_info->max_refresh_rate_hz = max_rate; return true; } /* not amd vsdb */ return false; } /* check for ack*/ res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); if (!res) return false; } return false; } static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { int i; /* send extension block to DMCU for parsing */ for (i = 0; i < len; i += 8) { /* send 8 bytes a time */ if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) return false; } return vsdb_info->freesync_supported; } static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); bool ret; mutex_lock(&adev->dm.dc_lock); if (adev->dm.dmub_srv) ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); else ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); mutex_unlock(&adev->dm.dc_lock); return ret; } static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; int j = 0; if (edid == NULL || edid->extensions == 0) return -ENODEV; /* Find DisplayID extension */ for (i = 0; i < edid->extensions; i++) { edid_ext = (void *)(edid + (i + 1)); if (edid_ext[0] == DISPLAYID_EXT) break; } while (j < EDID_LENGTH) { struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); return true; } j++; } return false; } static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; bool valid_vsdb_found = false; /*----- drm_find_cea_extension() -----*/ /* No EDID or EDID extensions */ if (edid == NULL || edid->extensions == 0) return -ENODEV; /* Find CEA extension */ for (i = 0; i < edid->extensions; i++) { edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); if (edid_ext[0] == CEA_EXT) break; } if (i == edid->extensions) return -ENODEV; /*----- cea_db_offsets() -----*/ if (edid_ext[0] != CEA_EXT) return -ENODEV; valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); return valid_vsdb_found ? i : -ENODEV; } /** * amdgpu_dm_update_freesync_caps - Update Freesync capabilities * * @connector: Connector to query. * @edid: EDID from monitor * * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep * track of some of the display information in the internal data struct used by * amdgpu_dm. This function checks which type of connector we need to set the * FreeSync parameters. */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct edid *edid) { int i = 0; struct detailed_timing *timing; struct detailed_non_pixel *data; struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; if (!connector->state) { DRM_ERROR("%s - Connector has no state", __func__); goto update; } sink = amdgpu_dm_connector->dc_sink ? amdgpu_dm_connector->dc_sink : amdgpu_dm_connector->dc_em_sink; if (!edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; amdgpu_dm_connector->pixel_clock_mhz = 0; connector->display_info.monitor_range.min_vfreq = 0; connector->display_info.monitor_range.max_vfreq = 0; freesync_capable = false; goto update; } dm_con_state = to_dm_connector_state(connector->state); if (!adev->dm.freesync_module) goto update; if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP) { bool edid_check_required = false; if (edid) { edid_check_required = is_dp_capable_without_timing_msa( adev->dm.dc, amdgpu_dm_connector); } if (edid_check_required == true && (edid->version > 1 || (edid->version == 1 && edid->revision > 1))) { for (i = 0; i < 4; i++) { timing = &edid->detailed_timings[i]; data = &timing->data.other_data; range = &data->data.range; /* * Check if monitor has continuous frequency mode */ if (data->type != EDID_DETAIL_MONITOR_RANGE) continue; /* * Check for flag range limits only. If flag == 1 then * no additional timing information provided. * Default GTF, GTF Secondary curve and CVT are not * supported */ if (range->flags != 1) continue; amdgpu_dm_connector->min_vfreq = range->min_vfreq; amdgpu_dm_connector->max_vfreq = range->max_vfreq; amdgpu_dm_connector->pixel_clock_mhz = range->pixel_clock_mhz * 10; connector->display_info.monitor_range.min_vfreq = range->min_vfreq; connector->display_info.monitor_range.max_vfreq = range->max_vfreq; break; } if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) { freesync_capable = true; } } parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (vsdb_info.replay_mode) { amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; } } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { timing = &edid->detailed_timings[i]; data = &timing->data.other_data; amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) freesync_capable = true; connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; } } as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { amdgpu_dm_connector->pack_sdp_v1_3 = true; amdgpu_dm_connector->as_type = as_type; amdgpu_dm_connector->vsdb_info = vsdb_info; amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) freesync_capable = true; connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; } } update: if (dm_con_state) dm_con_state->freesync_capable = freesync_capable; if (connector->vrr_capable_property) drm_connector_set_vrr_capable_property(connector, freesync_capable); } void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) { struct amdgpu_device *adev = drm_to_adev(dev); struct dc *dc = adev->dm.dc; int i; mutex_lock(&adev->dm.dc_lock); if (dc->current_state) { for (i = 0; i < dc->current_state->stream_count; ++i) dc->current_state->streams[i] ->triggered_crtc_reset.enabled = adev->dm.force_timing_sync; dm_enable_per_frame_crtc_master_sync(dc->current_state); dc_trigger_sync(dc, dc->current_state); } mutex_unlock(&adev->dm.dc_lock); } void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, u32 value, const char *func_name) { #ifdef DM_CHECK_ADDR_0 if (address == 0) { DC_ERR("invalid register write. address = 0"); return; } #endif cgs_write_register(ctx->cgs_device, address, value); trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); } uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, const char *func_name) { u32 value; #ifdef DM_CHECK_ADDR_0 if (address == 0) { DC_ERR("invalid register read; address = 0\n"); return 0; } #endif if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress && !ctx->dmub_srv->reg_helper_offload.should_burst_write) { ASSERT(false); return 0; } value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); return value; } int amdgpu_dm_process_dmub_aux_transfer_sync( struct dc_context *ctx, unsigned int link_index, struct aux_payload *payload, enum aux_return_code_type *operation_result) { struct amdgpu_device *adev = ctx->driver_context; struct dmub_notification *p_notify = adev->dm.dmub_notify; int ret = -1; mutex_lock(&adev->dm.dpia_aux_lock); if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; goto out; } if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { DRM_ERROR("wait_for_completion_timeout timeout!"); *operation_result = AUX_RET_ERROR_TIMEOUT; goto out; } if (p_notify->result != AUX_RET_SUCCESS) { /* * Transient states before tunneling is enabled could * lead to this error. We can ignore this for now. */ if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", payload->address, payload->length, p_notify->result); } *operation_result = AUX_RET_ERROR_INVALID_REPLY; goto out; } payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; if (!payload->write && p_notify->aux_reply.length && (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { if (payload->length != p_notify->aux_reply.length) { DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", p_notify->aux_reply.length, payload->address, payload->length); *operation_result = AUX_RET_ERROR_INVALID_REPLY; goto out; } memcpy(payload->data, p_notify->aux_reply.data, p_notify->aux_reply.length); } /* success */ ret = p_notify->aux_reply.length; *operation_result = p_notify->result; out: reinit_completion(&adev->dm.dmub_aux_transfer_done); mutex_unlock(&adev->dm.dpia_aux_lock); return ret; } int amdgpu_dm_process_dmub_set_config_sync( struct dc_context *ctx, unsigned int link_index, struct set_config_cmd_payload *payload, enum set_config_status *operation_result) { struct amdgpu_device *adev = ctx->driver_context; bool is_cmd_complete; int ret; mutex_lock(&adev->dm.dpia_aux_lock); is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, link_index, payload, adev->dm.dmub_notify); if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { ret = 0; *operation_result = adev->dm.dmub_notify->sc_status; } else { DRM_ERROR("wait_for_completion_timeout timeout!"); ret = -1; *operation_result = SET_CONFIG_UNKNOWN_ERROR; } if (!is_cmd_complete) reinit_completion(&adev->dm.dmub_aux_transfer_done); mutex_unlock(&adev->dm.dpia_aux_lock); return ret; } /* * Check whether seamless boot is supported. * * So far we only support seamless boot on CHIP_VANGOGH. * If everything goes well, we may consider expanding * seamless boot to other ASICs. */ bool check_seamless_boot_capability(struct amdgpu_device *adev) { switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(3, 0, 1): if (!adev->mman.keep_stolen_vga_memory) return true; break; default: break; } return false; } bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); } bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "amdgpu_dm_psr.h" #include "dc_dmub_srv.h" #include "dc.h" #include "dm_helpers.h" #include "amdgpu_dm.h" #include "modules/power/power_helpers.h" static bool link_supports_psrsu(struct dc_link *link) { struct dc *dc = link->ctx->dc; if (!dc->caps.dmcub_support) return false; if (dc->ctx->dce_version < DCN_VERSION_3_1) return false; if (!is_psr_su_specific_panel(link)) return false; if (!link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP || !link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) return false; if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.SU_GRANULARITY_REQUIRED && !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) return false; return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); } /* * amdgpu_dm_set_psr_caps() - set link psr capabilities * @link: link * */ void amdgpu_dm_set_psr_caps(struct dc_link *link) { if (!(link->connector_signal & SIGNAL_TYPE_EDP)) { link->psr_settings.psr_feature_enabled = false; return; } if (link->type == dc_connection_none) { link->psr_settings.psr_feature_enabled = false; return; } if (link->dpcd_caps.psr_info.psr_version == 0) { link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; link->psr_settings.psr_feature_enabled = false; } else { if (link_supports_psrsu(link)) link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; else link->psr_settings.psr_version = DC_PSR_VERSION_1; link->psr_settings.psr_feature_enabled = true; } DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", link->psr_settings.psr_feature_enabled, link->psr_settings.psr_version, link->dpcd_caps.psr_info.psr_version, link->dpcd_caps.psr_info.psr_dpcd_caps.raw, link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); } /* * amdgpu_dm_link_setup_psr() - configure psr link * @stream: stream state * * Return: true if success */ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) { struct dc_link *link = NULL; struct psr_config psr_config = {0}; struct psr_context psr_context = {0}; struct dc *dc = NULL; bool ret = false; if (stream == NULL) return false; link = stream->link; dc = link->ctx->dc; if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { mod_power_calc_psr_configs(&psr_config, link, stream); /* linux DM specific updating for psr config fields */ psr_config.allow_smu_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_SMU_OPT) && mod_power_only_edp(dc->current_state, stream); psr_config.allow_multi_disp_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) return false; ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); } DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled); return ret; } /* * amdgpu_dm_psr_enable() - enable psr f/w * @stream: stream state * * Return: true if success */ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) { struct dc_link *link = stream->link; unsigned int vsync_rate_hz = 0; struct dc_static_screen_params params = {0}; /* Calculate number of static frames before generating interrupt to * enter PSR. */ // Init fail safe of 2 frames static unsigned int num_frames_static = 2; unsigned int power_opt = 0; bool psr_enable = true; DRM_DEBUG_DRIVER("Enabling psr...\n"); vsync_rate_hz = div64_u64(div64_u64(( stream->timing.pix_clk_100hz * 100), stream->timing.v_total), stream->timing.h_total); /* Round up * Calculate number of frames such that at least 30 ms of time has * passed. */ if (vsync_rate_hz != 0) { unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; num_frames_static = (30000 / frame_time_microsec) + 1; } params.triggers.cursor_update = true; params.triggers.overlay_update = true; params.triggers.surface_update = true; params.num_frames = num_frames_static; dc_stream_set_static_screen_params(link->ctx->dc, &stream, 1, &params); /* * Only enable static-screen optimizations for PSR1. For PSR SU, this * causes vstartup interrupt issues, used by amdgpu_dm to send vblank * events. */ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) power_opt |= psr_power_opt_z10_static_screen; return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); } /* * amdgpu_dm_psr_disable() - disable psr f/w * @stream: stream state * * Return: true if success */ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) { unsigned int power_opt = 0; bool psr_enable = false; DRM_DEBUG_DRIVER("Disabling psr...\n"); return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt); } /* * amdgpu_dm_psr_disable() - disable psr f/w * if psr is enabled on any stream * * Return: true if success */ bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm) { DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n"); return dc_set_psr_allow_active(dm->dc, false); }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <drm/drm_crtc.h> #include <drm/drm_vblank.h> #include "amdgpu.h" #include "amdgpu_dm.h" #include "dc.h" #include "amdgpu_securedisplay.h" static const char *const pipe_crc_sources[] = { "none", "crtc", "crtc dither", "dprx", "dprx dither", "auto", }; static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source) { if (!source || !strcmp(source, "none")) return AMDGPU_DM_PIPE_CRC_SOURCE_NONE; if (!strcmp(source, "auto") || !strcmp(source, "crtc")) return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC; if (!strcmp(source, "dprx")) return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX; if (!strcmp(source, "crtc dither")) return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER; if (!strcmp(source, "dprx dither")) return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER; return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID; } static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src) { return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) || (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER); } static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src) { return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) || (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER); } static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src) { return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) || (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) || (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE); } const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count) { *count = ARRAY_SIZE(pipe_crc_sources); return pipe_crc_sources; } #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream) { struct drm_device *drm_dev = crtc->dev; struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); bool was_activated; spin_lock_irq(&drm_dev->event_lock); was_activated = acrtc->dm_irq_params.window_param.activated; acrtc->dm_irq_params.window_param.x_start = 0; acrtc->dm_irq_params.window_param.y_start = 0; acrtc->dm_irq_params.window_param.x_end = 0; acrtc->dm_irq_params.window_param.y_end = 0; acrtc->dm_irq_params.window_param.activated = false; acrtc->dm_irq_params.window_param.update_win = false; acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; spin_unlock_irq(&drm_dev->event_lock); /* Disable secure_display if it was enabled */ if (was_activated) { /* stop ROI update on this crtc */ flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work); flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work); dc_stream_forward_crc_window(stream, NULL, true); } } static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) { struct secure_display_context *secure_display_ctx; struct psp_context *psp; struct ta_securedisplay_cmd *securedisplay_cmd; struct drm_crtc *crtc; struct dc_stream_state *stream; uint8_t phy_inst; int ret; secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work); crtc = secure_display_ctx->crtc; if (!crtc) return; psp = &drm_to_adev(crtc->dev)->psp; if (!psp->securedisplay_context.context.initialized) { DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n"); return; } stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; phy_inst = stream->link->link_enc_hw_inst; /* need lock for multiple crtcs to use the command buffer */ mutex_lock(&psp->securedisplay_context.mutex); psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst; /* PSP TA is expected to finish data transmission over I2C within current frame, * even there are up to 4 crtcs request to send in this frame. */ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); if (!ret) { if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); } mutex_unlock(&psp->securedisplay_context.mutex); } static void amdgpu_dm_forward_crc_window(struct work_struct *work) { struct secure_display_context *secure_display_ctx; struct amdgpu_display_manager *dm; struct drm_crtc *crtc; struct dc_stream_state *stream; secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work); crtc = secure_display_ctx->crtc; if (!crtc) return; dm = &drm_to_adev(crtc->dev)->dm; stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; mutex_lock(&dm->dc_lock); dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false); mutex_unlock(&dm->dc_lock); } bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) { struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); bool ret = false; spin_lock_irq(&drm_dev->event_lock); ret = acrtc->dm_irq_params.window_param.activated; spin_unlock_irq(&drm_dev->event_lock); return ret; } #endif int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, size_t *values_cnt) { enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); if (source < 0) { DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n", src_name, crtc->index); return -EINVAL; } *values_cnt = 3; return 0; } int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state, enum amdgpu_dm_pipe_crc_source source) { struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc_stream_state *stream_state = dm_crtc_state->stream; bool enable = amdgpu_dm_is_valid_crc_source(source); int ret = 0; /* Configuration will be deferred to stream enable. */ if (!stream_state) return -EINVAL; mutex_lock(&adev->dm.dc_lock); /* Enable or disable CRTC CRC generation */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, NULL, enable, enable)) { ret = -EINVAL; goto unlock; } } /* Configure dithering */ if (!dm_need_crc_dither(source)) { dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, DYN_EXPANSION_DISABLE); } else { dc_stream_set_dither_option(stream_state, DITHER_OPTION_DEFAULT); dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, DYN_EXPANSION_AUTO); } unlock: mutex_unlock(&adev->dm.dc_lock); return ret; } int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) { enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); enum amdgpu_dm_pipe_crc_source cur_crc_src; struct drm_crtc_commit *commit; struct dm_crtc_state *crtc_state; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct drm_dp_aux *aux = NULL; bool enable = false; bool enabled = false; int ret = 0; if (source < 0) { DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n", src_name, crtc->index); return -EINVAL; } ret = drm_modeset_lock(&crtc->mutex, NULL); if (ret) return ret; spin_lock(&crtc->commit_lock); commit = list_first_entry_or_null(&crtc->commit_list, struct drm_crtc_commit, commit_entry); if (commit) drm_crtc_commit_get(commit); spin_unlock(&crtc->commit_lock); if (commit) { /* * Need to wait for all outstanding programming to complete * in commit tail since it can modify CRC related fields and * hardware state. Since we're holding the CRTC lock we're * guaranteed that no other commit work can be queued off * before we modify the state below. */ ret = wait_for_completion_interruptible_timeout( &commit->hw_done, 10 * HZ); if (ret) goto cleanup; } enable = amdgpu_dm_is_valid_crc_source(source); crtc_state = to_dm_crtc_state(crtc->state); spin_lock_irq(&drm_dev->event_lock); cur_crc_src = acrtc->dm_irq_params.crc_src; spin_unlock_irq(&drm_dev->event_lock); /* * USER REQ SRC | CURRENT SRC | BEHAVIOR * ----------------------------- * None | None | Do nothing * None | CRTC | Disable CRTC CRC, set default to dither * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither * None | CRTC DITHER | Disable CRTC CRC * None | DPRX DITHER | Disable DPRX CRC, need 'aux' * CRTC | XXXX | Enable CRTC CRC, no dither * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither * CRTC DITHER | XXXX | Enable CRTC CRC, set dither * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither */ if (dm_is_crc_source_dprx(source) || (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE && dm_is_crc_source_dprx(cur_crc_src))) { struct amdgpu_dm_connector *aconn = NULL; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; drm_connector_list_iter_begin(crtc->dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (!connector->state || connector->state->crtc != crtc) continue; aconn = to_amdgpu_dm_connector(connector); break; } drm_connector_list_iter_end(&conn_iter); if (!aconn) { DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index); ret = -EINVAL; goto cleanup; } aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux; if (!aux) { DRM_DEBUG_DRIVER("No dp aux for amd connector\n"); ret = -EINVAL; goto cleanup; } if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); ret = -EINVAL; goto cleanup; } } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /* Reset secure_display when we change crc source from debugfs */ amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream); #endif if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { ret = -EINVAL; goto cleanup; } /* * Reading the CRC requires the vblank interrupt handler to be * enabled. Keep a reference until CRC capture stops. */ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src); if (!enabled && enable) { ret = drm_crtc_vblank_get(crtc); if (ret) goto cleanup; if (dm_is_crc_source_dprx(source)) { if (drm_dp_start_crc(aux, crtc)) { DRM_DEBUG_DRIVER("dp start crc failed\n"); ret = -EINVAL; goto cleanup; } } } else if (enabled && !enable) { drm_crtc_vblank_put(crtc); if (dm_is_crc_source_dprx(source)) { if (drm_dp_stop_crc(aux)) { DRM_DEBUG_DRIVER("dp stop crc failed\n"); ret = -EINVAL; goto cleanup; } } } spin_lock_irq(&drm_dev->event_lock); acrtc->dm_irq_params.crc_src = source; spin_unlock_irq(&drm_dev->event_lock); /* Reset crc_skipped on dm state */ crtc_state->crc_skip_count = 0; cleanup: if (commit) drm_crtc_commit_put(commit); drm_modeset_unlock(&crtc->mutex); return ret; } /** * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC. * @crtc: DRM CRTC object. * * This function should be called at the end of a vblank, when the fb has been * fully processed through the pipe. */ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) { struct dm_crtc_state *crtc_state; struct dc_stream_state *stream_state; struct drm_device *drm_dev = NULL; enum amdgpu_dm_pipe_crc_source cur_crc_src; struct amdgpu_crtc *acrtc = NULL; uint32_t crcs[3]; unsigned long flags; if (crtc == NULL) return; crtc_state = to_dm_crtc_state(crtc->state); stream_state = crtc_state->stream; acrtc = to_amdgpu_crtc(crtc); drm_dev = crtc->dev; spin_lock_irqsave(&drm_dev->event_lock, flags); cur_crc_src = acrtc->dm_irq_params.crc_src; spin_unlock_irqrestore(&drm_dev->event_lock, flags); /* Early return if CRC capture is not enabled. */ if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) return; /* * Since flipping and crc enablement happen asynchronously, we - more * often than not - will be returning an 'uncooked' crc on first frame. * Probably because hw isn't ready yet. For added security, skip the * first two CRC values. */ if (crtc_state->crc_skip_count < 2) { crtc_state->crc_skip_count += 1; return; } if (dm_is_crc_source_crtc(cur_crc_src)) { if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, &crcs[0], &crcs[1], &crcs[2])) return; drm_crtc_add_crc_entry(crtc, true, drm_crtc_accurate_vblank_count(crtc), crcs); } } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) { struct drm_device *drm_dev = NULL; enum amdgpu_dm_pipe_crc_source cur_crc_src; struct amdgpu_crtc *acrtc = NULL; struct amdgpu_device *adev = NULL; struct secure_display_context *secure_display_ctx = NULL; unsigned long flags1; if (crtc == NULL) return; acrtc = to_amdgpu_crtc(crtc); adev = drm_to_adev(crtc->dev); drm_dev = crtc->dev; spin_lock_irqsave(&drm_dev->event_lock, flags1); cur_crc_src = acrtc->dm_irq_params.crc_src; /* Early return if CRC capture is not enabled. */ if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) || !dm_is_crc_source_crtc(cur_crc_src)) goto cleanup; if (!acrtc->dm_irq_params.window_param.activated) goto cleanup; if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; goto cleanup; } secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id]; if (WARN_ON(secure_display_ctx->crtc != crtc)) { /* We have set the crtc when creating secure_display_context, * don't expect it to be changed here. */ secure_display_ctx->crtc = crtc; } if (acrtc->dm_irq_params.window_param.update_win) { /* prepare work for dmub to update ROI */ secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start; secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start; secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end - acrtc->dm_irq_params.window_param.x_start; secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end - acrtc->dm_irq_params.window_param.y_start; schedule_work(&secure_display_ctx->forward_roi_work); acrtc->dm_irq_params.window_param.update_win = false; /* Statically skip 1 frame, because we may need to wait below things * before sending ROI to dmub: * 1. We defer the work by using system workqueue. * 2. We may need to wait for dc_lock before accessing dmub. */ acrtc->dm_irq_params.window_param.skip_frame_cnt = 1; } else { /* prepare work for psp to read ROI/CRC and send to I2C */ schedule_work(&secure_display_ctx->notify_ta_work); } cleanup: spin_unlock_irqrestore(&drm_dev->event_lock, flags1); } struct secure_display_context * amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev) { struct secure_display_context *secure_display_ctxs = NULL; int i; secure_display_ctxs = kcalloc(adev->mode_info.num_crtc, sizeof(struct secure_display_context), GFP_KERNEL); if (!secure_display_ctxs) return NULL; for (i = 0; i < adev->mode_info.num_crtc; i++) { INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window); INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base; } return secure_display_ctxs; } #endif
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
// SPDX-License-Identifier: MIT /* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_fourcc.h> #include "amdgpu.h" #include "dal_asic_id.h" #include "amdgpu_display.h" #include "amdgpu_dm_trace.h" #include "amdgpu_dm_plane.h" #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" /* * TODO: these are currently initialized to rgb formats only. * For future use cases we should either initialize them dynamically based on * plane capabilities, or initialize this array to all formats, so internal drm * check will succeed, and let DC implement proper check */ static const uint32_t rgb_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_XRGB16161616, DRM_FORMAT_XBGR16161616, DRM_FORMAT_ARGB16161616, DRM_FORMAT_ABGR16161616, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_RGB565, }; static const uint32_t overlay_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_RGB565, DRM_FORMAT_NV21, DRM_FORMAT_NV12, DRM_FORMAT_P010 }; static const uint32_t video_formats[] = { DRM_FORMAT_NV21, DRM_FORMAT_NV12, DRM_FORMAT_P010 }; static const u32 cursor_formats[] = { DRM_FORMAT_ARGB8888 }; enum dm_micro_swizzle { MICRO_SWIZZLE_Z = 0, MICRO_SWIZZLE_S = 1, MICRO_SWIZZLE_D = 2, MICRO_SWIZZLE_R = 3 }; const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) { return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); } void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, bool *per_pixel_alpha, bool *pre_multiplied_alpha, bool *global_alpha, int *global_alpha_value) { *per_pixel_alpha = false; *pre_multiplied_alpha = true; *global_alpha = false; *global_alpha_value = 0xff; if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) return; if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { static const uint32_t alpha_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_ARGB16161616, DRM_FORMAT_ABGR16161616, DRM_FORMAT_ARGB16161616F, }; uint32_t format = plane_state->fb->format->format; unsigned int i; for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { if (format == alpha_formats[i]) { *per_pixel_alpha = true; break; } } if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) *pre_multiplied_alpha = false; } if (plane_state->alpha < 0xffff) { *global_alpha = true; *global_alpha_value = plane_state->alpha >> 8; } } static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) { if (!*mods) return; if (*cap - *size < 1) { uint64_t new_cap = *cap * 2; uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); if (!new_mods) { kfree(*mods); *mods = NULL; return; } memcpy(new_mods, *mods, sizeof(uint64_t) * *size); kfree(*mods); *mods = new_mods; *cap = new_cap; } (*mods)[*size] = mod; *size += 1; } static bool modifier_has_dcc(uint64_t modifier) { return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); } static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) { if (modifier == DRM_FORMAT_MOD_LINEAR) return 0; return AMD_FMT_MOD_GET(TILE, modifier); } static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, uint64_t tiling_flags) { /* Fill GFX8 params */ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { unsigned int bankw, bankh, mtaspect, tile_split, num_banks; bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); /* XXX fix me for VI */ tiling_info->gfx8.num_banks = num_banks; tiling_info->gfx8.array_mode = DC_ARRAY_2D_TILED_THIN1; tiling_info->gfx8.tile_split = tile_split; tiling_info->gfx8.bank_width = bankw; tiling_info->gfx8.bank_height = bankh; tiling_info->gfx8.tile_aspect = mtaspect; tiling_info->gfx8.tile_mode = DC_ADDR_SURF_MICRO_TILING_DISPLAY; } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_1D_TILED_THIN1) { tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; } tiling_info->gfx8.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); } static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, union dc_tiling_info *tiling_info) { /* Fill GFX9 params */ tiling_info->gfx9.num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes; tiling_info->gfx9.num_banks = adev->gfx.config.gb_addr_config_fields.num_banks; tiling_info->gfx9.pipe_interleave = adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; tiling_info->gfx9.num_shader_engines = adev->gfx.config.gb_addr_config_fields.num_se; tiling_info->gfx9.max_compressed_frags = adev->gfx.config.gb_addr_config_fields.max_compress_frags; tiling_info->gfx9.num_rb_per_se = adev->gfx.config.gb_addr_config_fields.num_rb_per_se; tiling_info->gfx9.shaderEnable = 1; if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; } static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, union dc_tiling_info *tiling_info, uint64_t modifier) { unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); unsigned int pipes_log2; pipes_log2 = min(5u, mod_pipe_xor_bits); fill_gfx9_tiling_info_from_device(adev, tiling_info); if (!IS_AMD_FMT_MOD(modifier)) return; tiling_info->gfx9.num_pipes = 1u << pipes_log2; tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); if (adev->family >= AMDGPU_FAMILY_NV) { tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; } else { tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ } } static int validate_dcc(struct amdgpu_device *adev, const enum surface_pixel_format format, const enum dc_rotation_angle rotation, const union dc_tiling_info *tiling_info, const struct dc_plane_dcc_param *dcc, const struct dc_plane_address *address, const struct plane_size *plane_size) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; struct dc_surface_dcc_cap output; memset(&input, 0, sizeof(input)); memset(&output, 0, sizeof(output)); if (!dcc->enable) return 0; if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || !dc->cap_funcs.get_dcc_compression_cap) return -EINVAL; input.format = format; input.surface_size.width = plane_size->surface_size.width; input.surface_size.height = plane_size->surface_size.height; input.swizzle_mode = tiling_info->gfx9.swizzle; if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) input.scan = SCAN_DIRECTION_HORIZONTAL; else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) input.scan = SCAN_DIRECTION_VERTICAL; if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) return -EINVAL; if (!output.capable) return -EINVAL; if (dcc->independent_64b_blks == 0 && output.grph.rgb.independent_64b_blks != 0) return -EINVAL; return 0; } static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, const struct amdgpu_framebuffer *afb, const enum surface_pixel_format format, const enum dc_rotation_angle rotation, const struct plane_size *plane_size, union dc_tiling_info *tiling_info, struct dc_plane_dcc_param *dcc, struct dc_plane_address *address, const bool force_disable_dcc) { const uint64_t modifier = afb->base.modifier; int ret = 0; fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); if (modifier_has_dcc(modifier) && !force_disable_dcc) { uint64_t dcc_address = afb->address + afb->base.offsets[1]; bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); dcc->enable = 1; dcc->meta_pitch = afb->base.pitches[1]; dcc->independent_64b_blks = independent_64b_blks; if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { if (independent_64b_blks && independent_128b_blks) dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; else if (independent_128b_blks) dcc->dcc_ind_blk = hubp_ind_block_128b; else if (independent_64b_blks && !independent_128b_blks) dcc->dcc_ind_blk = hubp_ind_block_64b; else dcc->dcc_ind_blk = hubp_ind_block_unconstrained; } else { if (independent_64b_blks) dcc->dcc_ind_blk = hubp_ind_block_64b; else dcc->dcc_ind_blk = hubp_ind_block_unconstrained; } address->grph.meta_addr.low_part = lower_32_bits(dcc_address); address->grph.meta_addr.high_part = upper_32_bits(dcc_address); } ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); if (ret) drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); return ret; } static void add_gfx10_1_modifiers(const struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_RETILE, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } static void add_gfx9_modifiers(const struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pipe_xor_bits = min(8, pipes + ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); int bank_xor_bits = min(8 - pipe_xor_bits, ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); if (adev->family == AMDGPU_FAMILY_RV) { /* Raven2 and later */ bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; /* * No _D DCC swizzles yet because we only allow 32bpp, which * doesn't support _D on DCN */ if (has_constant_encode) { add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); } add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); if (has_constant_encode) { add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_RETILE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(RB, rb) | AMD_FMT_MOD_SET(PIPE, pipes)); } add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_RETILE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | AMD_FMT_MOD_SET(RB, rb) | AMD_FMT_MOD_SET(PIPE, pipes)); } /* * Only supported for 64bpp on Raven, will be filtered on format in * dm_plane_format_mod_supported. */ add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); if (adev->family == AMDGPU_FAMILY_RV) { add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); } /* * Only supported for 64bpp on Raven, will be filtered on format in * dm_plane_format_mod_supported. */ add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); if (adev->family == AMDGPU_FAMILY_RV) { add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } } static void add_gfx10_3_modifiers(const struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_RETILE, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs) | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_RETILE, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs)); /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } static void add_gfx11_modifiers(struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { int num_pipes = 0; int pipe_xor_bits = 0; int num_pkrs = 0; int pkrs = 0; u32 gb_addr_config; u8 i = 0; unsigned int swizzle_r_x; uint64_t modifier_r_x; uint64_t modifier_dcc_best; uint64_t modifier_dcc_4k; /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */ gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); ASSERT(gb_addr_config != 0); num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); pkrs = ilog2(num_pkrs); num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); pipe_xor_bits = ilog2(num_pipes); for (i = 0; i < 2; i++) { /* Insert the best one first. */ /* R_X swizzle modes are the best for rendering and DCC requires them. */ if (num_pipes > 16) swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; else swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; modifier_r_x = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(TILE, swizzle_r_x) | AMD_FMT_MOD_SET(PACKERS, pkrs); /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); /* DCC settings for 4K and greater resolutions. (required by display hw) */ modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); add_modifier(mods, size, capacity, modifier_dcc_best); add_modifier(mods, size, capacity, modifier_dcc_4k); add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); add_modifier(mods, size, capacity, modifier_r_x); } add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); } static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) { uint64_t size = 0, capacity = 128; *mods = NULL; /* We have not hooked up any pre-GFX9 modifiers. */ if (adev->family < AMDGPU_FAMILY_AI) return 0; *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); if (plane_type == DRM_PLANE_TYPE_CURSOR) { add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); return *mods ? 0 : -ENOMEM; } switch (adev->family) { case AMDGPU_FAMILY_AI: case AMDGPU_FAMILY_RV: add_gfx9_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_NV: case AMDGPU_FAMILY_VGH: case AMDGPU_FAMILY_YC: case AMDGPU_FAMILY_GC_10_3_6: case AMDGPU_FAMILY_GC_10_3_7: if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) add_gfx10_3_modifiers(adev, mods, &size, &capacity); else add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_GC_11_0_0: case AMDGPU_FAMILY_GC_11_0_1: add_gfx11_modifiers(adev, mods, &size, &capacity); break; } add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); /* INVALID marks the end of the list. */ add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); if (!*mods) return -ENOMEM; return 0; } static int get_plane_formats(const struct drm_plane *plane, const struct dc_plane_cap *plane_cap, uint32_t *formats, int max_formats) { int i, num_formats = 0; /* * TODO: Query support for each group of formats directly from * DC plane caps. This will require adding more formats to the * caps list. */ if (plane->type == DRM_PLANE_TYPE_PRIMARY || (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { if (num_formats >= max_formats) break; formats[num_formats++] = rgb_formats[i]; } if (plane_cap && plane_cap->pixel_format_support.nv12) formats[num_formats++] = DRM_FORMAT_NV12; if (plane_cap && plane_cap->pixel_format_support.p010) formats[num_formats++] = DRM_FORMAT_P010; if (plane_cap && plane_cap->pixel_format_support.fp16) { formats[num_formats++] = DRM_FORMAT_XRGB16161616F; formats[num_formats++] = DRM_FORMAT_ARGB16161616F; formats[num_formats++] = DRM_FORMAT_XBGR16161616F; formats[num_formats++] = DRM_FORMAT_ABGR16161616F; } } else { switch (plane->type) { case DRM_PLANE_TYPE_OVERLAY: for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { if (num_formats >= max_formats) break; formats[num_formats++] = overlay_formats[i]; } break; case DRM_PLANE_TYPE_CURSOR: for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { if (num_formats >= max_formats) break; formats[num_formats++] = cursor_formats[i]; } break; default: break; } } return num_formats; } int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, const struct amdgpu_framebuffer *afb, const enum surface_pixel_format format, const enum dc_rotation_angle rotation, const uint64_t tiling_flags, union dc_tiling_info *tiling_info, struct plane_size *plane_size, struct dc_plane_dcc_param *dcc, struct dc_plane_address *address, bool tmz_surface, bool force_disable_dcc) { const struct drm_framebuffer *fb = &afb->base; int ret; memset(tiling_info, 0, sizeof(*tiling_info)); memset(plane_size, 0, sizeof(*plane_size)); memset(dcc, 0, sizeof(*dcc)); memset(address, 0, sizeof(*address)); address->tmz_surface = tmz_surface; if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { uint64_t addr = afb->address + fb->offsets[0]; plane_size->surface_size.x = 0; plane_size->surface_size.y = 0; plane_size->surface_size.width = fb->width; plane_size->surface_size.height = fb->height; plane_size->surface_pitch = fb->pitches[0] / fb->format->cpp[0]; address->type = PLN_ADDR_TYPE_GRAPHICS; address->grph.addr.low_part = lower_32_bits(addr); address->grph.addr.high_part = upper_32_bits(addr); } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { uint64_t luma_addr = afb->address + fb->offsets[0]; uint64_t chroma_addr = afb->address + fb->offsets[1]; plane_size->surface_size.x = 0; plane_size->surface_size.y = 0; plane_size->surface_size.width = fb->width; plane_size->surface_size.height = fb->height; plane_size->surface_pitch = fb->pitches[0] / fb->format->cpp[0]; plane_size->chroma_size.x = 0; plane_size->chroma_size.y = 0; /* TODO: set these based on surface format */ plane_size->chroma_size.width = fb->width / 2; plane_size->chroma_size.height = fb->height / 2; plane_size->chroma_pitch = fb->pitches[1] / fb->format->cpp[1]; address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; address->video_progressive.luma_addr.low_part = lower_32_bits(luma_addr); address->video_progressive.luma_addr.high_part = upper_32_bits(luma_addr); address->video_progressive.chroma_addr.low_part = lower_32_bits(chroma_addr); address->video_progressive.chroma_addr.high_part = upper_32_bits(chroma_addr); } if (adev->family >= AMDGPU_FAMILY_AI) { ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, rotation, plane_size, tiling_info, dcc, address, force_disable_dcc); if (ret) return ret; } else { fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); } return 0; } static int dm_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct amdgpu_framebuffer *afb; struct drm_gem_object *obj; struct amdgpu_device *adev; struct amdgpu_bo *rbo; struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; uint32_t domain; int r; if (!new_state->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } afb = to_amdgpu_framebuffer(new_state->fb); obj = new_state->fb->obj[0]; rbo = gem_to_amdgpu_bo(obj); adev = amdgpu_ttm_adev(rbo->tbo.bdev); r = amdgpu_bo_reserve(rbo, true); if (r) { dev_err(adev->dev, "fail to reserve bo (%d)\n", r); return r; } r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); if (r) { dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); goto error_unlock; } if (plane->type != DRM_PLANE_TYPE_CURSOR) domain = amdgpu_display_supported_domains(adev, rbo->flags); else domain = AMDGPU_GEM_DOMAIN_VRAM; r = amdgpu_bo_pin(rbo, domain); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) DRM_ERROR("Failed to pin framebuffer with error %d\n", r); goto error_unlock; } r = amdgpu_ttm_alloc_gart(&rbo->tbo); if (unlikely(r != 0)) { DRM_ERROR("%p bind failed\n", rbo); goto error_unpin; } r = drm_gem_plane_helper_prepare_fb(plane, new_state); if (unlikely(r != 0)) goto error_unpin; amdgpu_bo_unreserve(rbo); afb->address = amdgpu_bo_gpu_offset(rbo); amdgpu_bo_ref(rbo); /** * We don't do surface updates on planes that have been newly created, * but we also don't have the afb->address during atomic check. * * Fill in buffer attributes depending on the address here, but only on * newly created planes since they're not being used by DC yet and this * won't modify global state. */ dm_plane_state_old = to_dm_plane_state(plane->state); dm_plane_state_new = to_dm_plane_state(new_state); if (dm_plane_state_new->dc_state && dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; bool force_disable_dcc = !plane_state->dcc.enable; amdgpu_dm_plane_fill_plane_buffer_attributes( adev, afb, plane_state->format, plane_state->rotation, afb->tiling_flags, &plane_state->tiling_info, &plane_state->plane_size, &plane_state->dcc, &plane_state->address, afb->tmz_surface, force_disable_dcc); } return 0; error_unpin: amdgpu_bo_unpin(rbo); error_unlock: amdgpu_bo_unreserve(rbo); return r; } static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; int r; if (!old_state->fb) return; rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); r = amdgpu_bo_reserve(rbo, false); if (unlikely(r)) { DRM_ERROR("failed to reserve rbo before unpin\n"); return; } amdgpu_bo_unpin(rbo); amdgpu_bo_unreserve(rbo); amdgpu_bo_unref(&rbo); } static void get_min_max_dc_plane_scaling(struct drm_device *dev, struct drm_framebuffer *fb, int *min_downscale, int *max_upscale) { struct amdgpu_device *adev = drm_to_adev(dev); struct dc *dc = adev->dm.dc; /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; switch (fb->format->format) { case DRM_FORMAT_P010: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: *max_upscale = plane_cap->max_upscale_factor.nv12; *min_downscale = plane_cap->max_downscale_factor.nv12; break; case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: *max_upscale = plane_cap->max_upscale_factor.fp16; *min_downscale = plane_cap->max_downscale_factor.fp16; break; default: *max_upscale = plane_cap->max_upscale_factor.argb8888; *min_downscale = plane_cap->max_downscale_factor.argb8888; break; } /* * A factor of 1 in the plane_cap means to not allow scaling, ie. use a * scaling factor of 1.0 == 1000 units. */ if (*max_upscale == 1) *max_upscale = 1000; if (*min_downscale == 1) *min_downscale = 1000; } int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, struct drm_crtc_state *new_crtc_state) { struct drm_framebuffer *fb = state->fb; int min_downscale, max_upscale; int min_scale = 0; int max_scale = INT_MAX; /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ if (fb && state->crtc) { /* Validate viewport to cover the case when only the position changes */ if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { int viewport_width = state->crtc_w; int viewport_height = state->crtc_h; if (state->crtc_x < 0) viewport_width += state->crtc_x; else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; if (state->crtc_y < 0) viewport_height += state->crtc_y; else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; if (viewport_width < 0 || viewport_height < 0) { DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); return -EINVAL; } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); return -EINVAL; } else if (viewport_height < MIN_VIEWPORT_SIZE) { DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); return -EINVAL; } } /* Get min/max allowed scaling factors from plane caps. */ get_min_max_dc_plane_scaling(state->crtc->dev, fb, &min_downscale, &max_upscale); /* * Convert to drm convention: 16.16 fixed point, instead of dc's * 1.0 == 1000. Also drm scaling is src/dst instead of dc's * dst/src, so min_scale = 1.0 / max_upscale, etc. */ min_scale = (1000 << 16) / max_upscale; max_scale = (1000 << 16) / min_downscale; } return drm_atomic_helper_check_plane_state( state, new_crtc_state, min_scale, max_scale, true, true); } int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, const struct drm_plane_state *state, struct dc_scaling_info *scaling_info) { int scale_w, scale_h, min_downscale, max_upscale; memset(scaling_info, 0, sizeof(*scaling_info)); /* Source is fixed 16.16 but we ignore mantissa for now... */ scaling_info->src_rect.x = state->src_x >> 16; scaling_info->src_rect.y = state->src_y >> 16; /* * For reasons we don't (yet) fully understand a non-zero * src_y coordinate into an NV12 buffer can cause a * system hang on DCN1x. * To avoid hangs (and maybe be overly cautious) * let's reject both non-zero src_x and src_y. * * We currently know of only one use-case to reproduce a * scenario with non-zero src_x and src_y for NV12, which * is to gesture the YouTube Android app into full screen * on ChromeOS. */ if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) return -EINVAL; scaling_info->src_rect.width = state->src_w >> 16; if (scaling_info->src_rect.width == 0) return -EINVAL; scaling_info->src_rect.height = state->src_h >> 16; if (scaling_info->src_rect.height == 0) return -EINVAL; scaling_info->dst_rect.x = state->crtc_x; scaling_info->dst_rect.y = state->crtc_y; if (state->crtc_w == 0) return -EINVAL; scaling_info->dst_rect.width = state->crtc_w; if (state->crtc_h == 0) return -EINVAL; scaling_info->dst_rect.height = state->crtc_h; /* DRM doesn't specify clipping on destination output. */ scaling_info->clip_rect = scaling_info->dst_rect; /* Validate scaling per-format with DC plane caps */ if (state->plane && state->plane->dev && state->fb) { get_min_max_dc_plane_scaling(state->plane->dev, state->fb, &min_downscale, &max_upscale); } else { min_downscale = 250; max_upscale = 16000; } scale_w = scaling_info->dst_rect.width * 1000 / scaling_info->src_rect.width; if (scale_w < min_downscale || scale_w > max_upscale) return -EINVAL; scale_h = scaling_info->dst_rect.height * 1000 / scaling_info->src_rect.height; if (scale_h < min_downscale || scale_h > max_upscale) return -EINVAL; /* * The "scaling_quality" can be ignored for now, quality = 0 has DC * assume reasonable defaults based on the format. */ return 0; } static int dm_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); struct amdgpu_device *adev = drm_to_adev(plane->dev); struct dc *dc = adev->dm.dc; struct dm_plane_state *dm_plane_state; struct dc_scaling_info scaling_info; struct drm_crtc_state *new_crtc_state; int ret; trace_amdgpu_dm_plane_atomic_check(new_plane_state); dm_plane_state = to_dm_plane_state(new_plane_state); if (!dm_plane_state->dc_state) return 0; new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); if (!new_crtc_state) return -EINVAL; ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); if (ret) return ret; ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info); if (ret) return ret; if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) return 0; return -EINVAL; } static int dm_plane_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state) { /* Only support async updates on cursor planes. */ if (plane->type != DRM_PLANE_TYPE_CURSOR) return -EINVAL; return 0; } static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, struct dc_cursor_position *position) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); int x, y; int xorigin = 0, yorigin = 0; if (!crtc || !plane->state->fb) return 0; if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { DRM_ERROR("%s: bad cursor width or height %d x %d\n", __func__, plane->state->crtc_w, plane->state->crtc_h); return -EINVAL; } x = plane->state->crtc_x; y = plane->state->crtc_y; if (x <= -amdgpu_crtc->max_cursor_width || y <= -amdgpu_crtc->max_cursor_height) return 0; if (x < 0) { xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); x = 0; } if (y < 0) { yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); y = 0; } position->enable = true; position->translate_by_source = true; position->x = x; position->y = y; position->x_hotspot = xorigin; position->y_hotspot = yorigin; return 0; } void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state) { struct amdgpu_device *adev = drm_to_adev(plane->dev); struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint64_t address = afb ? afb->address : 0; struct dc_cursor_position position = {0}; struct dc_cursor_attributes attributes; int ret; if (!plane->state->fb && !old_plane_state->fb) return; DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", __func__, amdgpu_crtc->crtc_id, plane->state->crtc_w, plane->state->crtc_h); ret = get_cursor_position(plane, crtc, &position); if (ret) return; if (!position.enable) { /* turn off cursor */ if (crtc_state && crtc_state->stream) { mutex_lock(&adev->dm.dc_lock); dc_stream_set_cursor_position(crtc_state->stream, &position); mutex_unlock(&adev->dm.dc_lock); } return; } amdgpu_crtc->cursor_width = plane->state->crtc_w; amdgpu_crtc->cursor_height = plane->state->crtc_h; memset(&attributes, 0, sizeof(attributes)); attributes.address.high_part = upper_32_bits(address); attributes.address.low_part = lower_32_bits(address); attributes.width = plane->state->crtc_w; attributes.height = plane->state->crtc_h; attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; attributes.rotation_angle = 0; attributes.attribute_flags.value = 0; /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM * legacy gamma setup. */ if (crtc_state->cm_is_degamma_srgb && adev->dm.dc->caps.color.dpp.gamma_corr) attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; if (crtc_state->stream) { mutex_lock(&adev->dm.dc_lock); if (!dc_stream_set_cursor_attributes(crtc_state->stream, &attributes)) DRM_ERROR("DC failed to set cursor attributes\n"); if (!dc_stream_set_cursor_position(crtc_state->stream, &position)) DRM_ERROR("DC failed to set cursor position\n"); mutex_unlock(&adev->dm.dc_lock); } } static void dm_plane_atomic_async_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); trace_amdgpu_dm_atomic_update_cursor(new_state); swap(plane->state->fb, new_state->fb); plane->state->src_x = new_state->src_x; plane->state->src_y = new_state->src_y; plane->state->src_w = new_state->src_w; plane->state->src_h = new_state->src_h; plane->state->crtc_x = new_state->crtc_x; plane->state->crtc_y = new_state->crtc_y; plane->state->crtc_w = new_state->crtc_w; plane->state->crtc_h = new_state->crtc_h; amdgpu_dm_plane_handle_cursor_update(plane, old_state); } static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { .prepare_fb = dm_plane_helper_prepare_fb, .cleanup_fb = dm_plane_helper_cleanup_fb, .atomic_check = dm_plane_atomic_check, .atomic_async_check = dm_plane_atomic_async_check, .atomic_async_update = dm_plane_atomic_async_update }; static void dm_drm_plane_reset(struct drm_plane *plane) { struct dm_plane_state *amdgpu_state = NULL; if (plane->state) plane->funcs->atomic_destroy_state(plane, plane->state); amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); WARN_ON(amdgpu_state == NULL); if (amdgpu_state) __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); } static struct drm_plane_state * dm_drm_plane_duplicate_state(struct drm_plane *plane) { struct dm_plane_state *dm_plane_state, *old_dm_plane_state; old_dm_plane_state = to_dm_plane_state(plane->state); dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); if (!dm_plane_state) return NULL; __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); if (old_dm_plane_state->dc_state) { dm_plane_state->dc_state = old_dm_plane_state->dc_state; dc_plane_state_retain(dm_plane_state->dc_state); } return &dm_plane_state->base; } static bool dm_plane_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) { struct amdgpu_device *adev = drm_to_adev(plane->dev); const struct drm_format_info *info = drm_format_info(format); int i; enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; if (!info) return false; /* * We always have to allow these modifiers: * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. * 2. Not passing any modifiers is the same as explicitly passing INVALID. */ if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == DRM_FORMAT_MOD_INVALID) { return true; } /* Check that the modifier is on the list of the plane's supported modifiers. */ for (i = 0; i < plane->modifier_count; i++) { if (modifier == plane->modifiers[i]) break; } if (i == plane->modifier_count) return false; /* * For D swizzle the canonical modifier depends on the bpp, so check * it here. */ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && adev->family >= AMDGPU_FAMILY_NV) { if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) return false; } if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && info->cpp[0] < 8) return false; if (modifier_has_dcc(modifier)) { /* Per radeonsi comments 16/64 bpp are more complicated. */ if (info->cpp[0] != 4) return false; /* We support multi-planar formats, but not when combined with * additional DCC metadata planes. */ if (info->num_planes > 1) return false; } return true; } static void dm_drm_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); if (dm_plane_state->dc_state) dc_plane_state_release(dm_plane_state->dc_state); drm_atomic_helper_plane_destroy_state(plane, state); } static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_helper_destroy, .reset = dm_drm_plane_reset, .atomic_duplicate_state = dm_drm_plane_duplicate_state, .atomic_destroy_state = dm_drm_plane_destroy_state, .format_mod_supported = dm_plane_format_mod_supported, }; int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, unsigned long possible_crtcs, const struct dc_plane_cap *plane_cap) { uint32_t formats[32]; int num_formats; int res = -EPERM; unsigned int supported_rotations; uint64_t *modifiers = NULL; num_formats = get_plane_formats(plane, plane_cap, formats, ARRAY_SIZE(formats)); res = get_plane_modifiers(dm->adev, plane->type, &modifiers); if (res) return res; if (modifiers == NULL) adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, &dm_plane_funcs, formats, num_formats, modifiers, plane->type, NULL); kfree(modifiers); if (res) return res; if (plane->type == DRM_PLANE_TYPE_OVERLAY && plane_cap && plane_cap->per_pixel_alpha) { unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE); drm_plane_create_alpha_property(plane); drm_plane_create_blend_mode_property(plane, blend_caps); } if (plane->type == DRM_PLANE_TYPE_PRIMARY) { drm_plane_create_zpos_immutable_property(plane, 0); } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) { unsigned int zpos = 1 + drm_plane_index(plane); drm_plane_create_zpos_property(plane, zpos, 1, 254); } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { drm_plane_create_zpos_immutable_property(plane, 255); } if (plane->type == DRM_PLANE_TYPE_PRIMARY && plane_cap && (plane_cap->pixel_format_support.nv12 || plane_cap->pixel_format_support.p010)) { /* This only affects YUV formats. */ drm_plane_create_color_properties( plane, BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709) | BIT(DRM_COLOR_YCBCR_BT2020), BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | BIT(DRM_COLOR_YCBCR_FULL_RANGE), DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); } supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; if (dm->adev->asic_type >= CHIP_BONAIRE && plane->type != DRM_PLANE_TYPE_CURSOR) drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, supported_rotations); if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) && plane->type != DRM_PLANE_TYPE_CURSOR) drm_plane_enable_fb_damage_clips(plane); drm_plane_helper_add(plane, &dm_plane_helper_funcs); /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); return 0; } bool is_video_format(uint32_t format) { int i; for (i = 0; i < ARRAY_SIZE(video_formats); i++) if (format == video_formats[i]) return true; return false; }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/string.h> #include <linux/acpi.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h" #include "amdgpu_pm.h" #include "amdgpu_dm_trace.h" unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx, unsigned long long current_time_stamp, unsigned long long last_time_stamp) { return current_time_stamp - last_time_stamp; } void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc_context *ctx) { trace_amdgpu_dc_performance(ctx->perf_trace->read_count, ctx->perf_trace->write_count, &ctx->perf_trace->last_entry_read, &ctx->perf_trace->last_entry_write, func_name, line); } /**** power component interfaces ****/
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "amdgpu.h" #include "amdgpu_mode.h" #include "amdgpu_dm.h" #include "dc.h" #include "modules/color/color_gamma.h" #include "basics/conversion.h" /** * DOC: overview * * The DC interface to HW gives us the following color management blocks * per pipe (surface): * * - Input gamma LUT (de-normalized) * - Input CSC (normalized) * - Surface degamma LUT (normalized) * - Surface CSC (normalized) * - Surface regamma LUT (normalized) * - Output CSC (normalized) * * But these aren't a direct mapping to DRM color properties. The current DRM * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware * is essentially giving: * * Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM * * The input gamma LUT block isn't really applicable here since it operates * on the actual input data itself rather than the HW fp representation. The * input and output CSC blocks are technically available to use as part of * the DC interface but are typically used internally by DC for conversions * between color spaces. These could be blended together with user * adjustments in the future but for now these should remain untouched. * * The pipe blending also happens after these blocks so we don't actually * support any CRTC props with correct blending with multiple planes - but we * can still support CRTC color management properties in DM in most single * plane cases correctly with clever management of the DC interface in DM. * * As per DRM documentation, blocks should be in hardware bypass when their * respective property is set to NULL. A linear DGM/RGM LUT should also * considered as putting the respective block into bypass mode. * * This means that the following * configuration is assumed to be the default: * * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass */ #define MAX_DRM_LUT_VALUE 0xFFFF /** * amdgpu_dm_init_color_mod - Initialize the color module. * * We're not using the full color module, only certain components. * Only call setup functions for components that we need. */ void amdgpu_dm_init_color_mod(void) { setup_x_points_distribution(); } /** * __extract_blob_lut - Extracts the DRM lut and lut size from a blob. * @blob: DRM color mgmt property blob * @size: lut size * * Returns: * DRM LUT or NULL */ static const struct drm_color_lut * __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) { *size = blob ? drm_color_lut_size(blob) : 0; return blob ? (struct drm_color_lut *)blob->data : NULL; } /** * __is_lut_linear - check if the given lut is a linear mapping of values * @lut: given lut to check values * @size: lut size * * It is considered linear if the lut represents: * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in [0, * MAX_COLOR_LUT_ENTRIES) * * Returns: * True if the given lut is a linear mapping of values, i.e. it acts like a * bypass LUT. Otherwise, false. */ static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size) { int i; uint32_t expected; int delta; for (i = 0; i < size; i++) { /* All color values should equal */ if ((lut[i].red != lut[i].green) || (lut[i].green != lut[i].blue)) return false; expected = i * MAX_DRM_LUT_VALUE / (size-1); /* Allow a +/-1 error. */ delta = lut[i].red - expected; if (delta < -1 || 1 < delta) return false; } return true; } /** * __drm_lut_to_dc_gamma - convert the drm_color_lut to dc_gamma. * @lut: DRM lookup table for color conversion * @gamma: DC gamma to set entries * @is_legacy: legacy or atomic gamma * * The conversion depends on the size of the lut - whether or not it's legacy. */ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, struct dc_gamma *gamma, bool is_legacy) { uint32_t r, g, b; int i; if (is_legacy) { for (i = 0; i < MAX_COLOR_LEGACY_LUT_ENTRIES; i++) { r = drm_color_lut_extract(lut[i].red, 16); g = drm_color_lut_extract(lut[i].green, 16); b = drm_color_lut_extract(lut[i].blue, 16); gamma->entries.red[i] = dc_fixpt_from_int(r); gamma->entries.green[i] = dc_fixpt_from_int(g); gamma->entries.blue[i] = dc_fixpt_from_int(b); } return; } /* else */ for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) { r = drm_color_lut_extract(lut[i].red, 16); g = drm_color_lut_extract(lut[i].green, 16); b = drm_color_lut_extract(lut[i].blue, 16); gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE); gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE); gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE); } } /** * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix * @ctm: DRM color transformation matrix * @matrix: DC CSC float matrix * * The matrix needs to be a 3x4 (12 entry) matrix. */ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, struct fixed31_32 *matrix) { int64_t val; int i; /* * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating * with homogeneous coordinates, augment the matrix with 0's. * * The format provided is S31.32, using signed-magnitude representation. * Our fixed31_32 is also S31.32, but is using 2's complement. We have * to convert from signed-magnitude to 2's complement. */ for (i = 0; i < 12; i++) { /* Skip 4th element */ if (i % 4 == 3) { matrix[i] = dc_fixpt_zero; continue; } /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ val = ctm->matrix[i - (i / 4)]; /* If negative, convert to 2's complement. */ if (val & (1ULL << 63)) val = -(val & ~(1ULL << 63)); matrix[i].value = val; } } /** * __set_legacy_tf - Calculates the legacy transfer function * @func: transfer function * @lut: lookup table that defines the color space * @lut_size: size of respective lut * @has_rom: if ROM can be used for hardcoded curve * * Only for sRGB input space * * Returns: * 0 in case of success, -ENOMEM if fails */ static int __set_legacy_tf(struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size, bool has_rom) { struct dc_gamma *gamma = NULL; struct calculate_buffer cal_buffer = {0}; bool res; ASSERT(lut && lut_size == MAX_COLOR_LEGACY_LUT_ENTRIES); cal_buffer.buffer_index = -1; gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; gamma->type = GAMMA_RGB_256; gamma->num_entries = lut_size; __drm_lut_to_dc_gamma(lut, gamma, true); res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, NULL, &cal_buffer); dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } /** * __set_output_tf - calculates the output transfer function based on expected input space. * @func: transfer function * @lut: lookup table that defines the color space * @lut_size: size of respective lut * @has_rom: if ROM can be used for hardcoded curve * * Returns: * 0 in case of success. -ENOMEM if fails. */ static int __set_output_tf(struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size, bool has_rom) { struct dc_gamma *gamma = NULL; struct calculate_buffer cal_buffer = {0}; bool res; ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); cal_buffer.buffer_index = -1; gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; gamma->num_entries = lut_size; __drm_lut_to_dc_gamma(lut, gamma, false); if (func->tf == TRANSFER_FUNCTION_LINEAR) { /* * Color module doesn't like calculating regamma params * on top of a linear input. But degamma params can be used * instead to simulate this. */ gamma->type = GAMMA_CUSTOM; res = mod_color_calculate_degamma_params(NULL, func, gamma, true); } else { /* * Assume sRGB. The actual mapping will depend on whether the * input was legacy or not. */ gamma->type = GAMMA_CS_TFM_1D; res = mod_color_calculate_regamma_params(func, gamma, false, has_rom, NULL, &cal_buffer); } dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } /** * __set_input_tf - calculates the input transfer function based on expected * input space. * @func: transfer function * @lut: lookup table that defines the color space * @lut_size: size of respective lut. * * Returns: * 0 in case of success. -ENOMEM if fails. */ static int __set_input_tf(struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size) { struct dc_gamma *gamma = NULL; bool res; gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; gamma->type = GAMMA_CUSTOM; gamma->num_entries = lut_size; __drm_lut_to_dc_gamma(lut, gamma, false); res = mod_color_calculate_degamma_params(NULL, func, gamma, true); dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } /** * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes * @crtc_state: the DRM CRTC state * * Verifies that the Degamma and Gamma LUTs attached to the &crtc_state * are of the expected size. * * Returns: * 0 on success. -EINVAL if any lut sizes are invalid. */ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) { const struct drm_color_lut *lut = NULL; uint32_t size = 0; lut = __extract_blob_lut(crtc_state->degamma_lut, &size); if (lut && size != MAX_COLOR_LUT_ENTRIES) { DRM_DEBUG_DRIVER( "Invalid Degamma LUT size. Should be %u but got %u.\n", MAX_COLOR_LUT_ENTRIES, size); return -EINVAL; } lut = __extract_blob_lut(crtc_state->gamma_lut, &size); if (lut && size != MAX_COLOR_LUT_ENTRIES && size != MAX_COLOR_LEGACY_LUT_ENTRIES) { DRM_DEBUG_DRIVER( "Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n", MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES, size); return -EINVAL; } return 0; } /** * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. * @crtc: amdgpu_dm crtc state * * With no plane level color management properties we're free to use any * of the HW blocks as long as the CRTC CTM always comes before the * CRTC RGM and after the CRTC DGM. * * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. * - The CRTC CTM will be placed in the gamut remap block if it is non-linear. * * The RGM block is typically more fully featured and accurate across * all ASICs - DCE can't support a custom non-linear CRTC DGM. * * For supporting both plane level color management and CRTC level color * management at once we have to either restrict the usage of CRTC properties * or blend adjustments together. * * Returns: * 0 on success. Error code if setup fails. */ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) { struct dc_stream_state *stream = crtc->stream; struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); bool has_rom = adev->asic_type <= CHIP_RAVEN; struct drm_color_ctm *ctm = NULL; const struct drm_color_lut *degamma_lut, *regamma_lut; uint32_t degamma_size, regamma_size; bool has_regamma, has_degamma; bool is_legacy; int r; r = amdgpu_dm_verify_lut_sizes(&crtc->base); if (r) return r; degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size); regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size); has_degamma = degamma_lut && !__is_lut_linear(degamma_lut, degamma_size); has_regamma = regamma_lut && !__is_lut_linear(regamma_lut, regamma_size); is_legacy = regamma_size == MAX_COLOR_LEGACY_LUT_ENTRIES; /* Reset all adjustments. */ crtc->cm_has_degamma = false; crtc->cm_is_degamma_srgb = false; /* Setup regamma and degamma. */ if (is_legacy) { /* * Legacy regamma forces us to use the sRGB RGM as a base. * This also means we can't use linear DGM since DGM needs * to use sRGB as a base as well, resulting in incorrect CRTC * DGM and CRTC CTM. * * TODO: Just map this to the standard regamma interface * instead since this isn't really right. One of the cases * where this setup currently fails is trying to do an * inverse color ramp in legacy userspace. */ crtc->cm_is_degamma_srgb = true; stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, regamma_size, has_rom); if (r) return r; } else if (has_regamma) { /* If atomic regamma, CRTC RGM goes into RGM LUT. */ stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; r = __set_output_tf(stream->out_transfer_func, regamma_lut, regamma_size, has_rom); if (r) return r; } else { /* * No CRTC RGM means we can just put the block into bypass * since we don't have any plane level adjustments using it. */ stream->out_transfer_func->type = TF_TYPE_BYPASS; stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; } /* * CRTC DGM goes into DGM LUT. It would be nice to place it * into the RGM since it's a more featured block but we'd * have to place the CTM in the OCSC in that case. */ crtc->cm_has_degamma = has_degamma; /* Setup CRTC CTM. */ if (crtc->base.ctm) { ctm = (struct drm_color_ctm *)crtc->base.ctm->data; /* * Gamut remapping must be used for gamma correction * since it comes before the regamma correction. * * OCSC could be used for gamma correction, but we'd need to * blend the adjustments together with the required output * conversion matrix - so just use the gamut remap block * for now. */ __drm_ctm_to_dc_matrix(ctm, stream->gamut_remap_matrix.matrix); stream->gamut_remap_matrix.enable_remap = true; stream->csc_color_matrix.enable_adjustment = false; } else { /* Bypass CTM. */ stream->gamut_remap_matrix.enable_remap = false; stream->csc_color_matrix.enable_adjustment = false; } return 0; } /** * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. * @crtc: amdgpu_dm crtc state * @dc_plane_state: target DC surface * * Update the underlying dc_stream_state's input transfer function (ITF) in * preparation for hardware commit. The transfer function used depends on * the preparation done on the stream for color management. * * Returns: * 0 on success. -ENOMEM if mem allocation fails. */ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, struct dc_plane_state *dc_plane_state) { const struct drm_color_lut *degamma_lut; enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; uint32_t degamma_size; int r; /* Get the correct base transfer function for implicit degamma. */ switch (dc_plane_state->format) { case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: /* DC doesn't have a transfer function for BT601 specifically. */ tf = TRANSFER_FUNCTION_BT709; break; default: break; } if (crtc->cm_has_degamma) { degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size); ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; /* * This case isn't fully correct, but also fairly * uncommon. This is userspace trying to use a * legacy gamma LUT + atomic degamma LUT * at the same time. * * Legacy gamma requires the input to be in linear * space, so that means we need to apply an sRGB * degamma. But color module also doesn't support * a user ramp in this case so the degamma will * be lost. * * Even if we did support it, it's still not right: * * Input -> CRTC DGM -> sRGB DGM -> CRTC CTM -> * sRGB RGM -> CRTC RGM -> Output * * The CSC will be done in the wrong space since * we're applying an sRGB DGM on top of the CRTC * DGM. * * TODO: Don't use the legacy gamma interface and just * map these to the atomic one instead. */ if (crtc->cm_is_degamma_srgb) dc_plane_state->in_transfer_func->tf = tf; else dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; r = __set_input_tf(dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (r) return r; } else if (crtc->cm_is_degamma_srgb) { /* * For legacy gamma support we need the regamma input * in linear space. Assume that the input is sRGB. */ dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; dc_plane_state->in_transfer_func->tf = tf; if (tf != TRANSFER_FUNCTION_SRGB && !mod_color_calculate_degamma_params(NULL, dc_plane_state->in_transfer_func, NULL, false)) return -ENOMEM; } else { /* ...Otherwise we can just bypass the DGM block. */ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; } return 0; }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services_types.h" #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h" /** * DOC: overview * * DM provides another layer of IRQ management on top of what the base driver * already provides. This is something that could be cleaned up, and is a * future TODO item. * * The base driver provides IRQ source registration with DRM, handler * registration into the base driver's IRQ table, and a handler callback * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic * handler looks up the IRQ table, and calls the respective * &amdgpu_irq_src_funcs.process hookups. * * What DM provides on top are two IRQ tables specifically for top-half and * bottom-half IRQ handling, with the bottom-half implementing workqueues: * * - &amdgpu_display_manager.irq_handler_list_high_tab * - &amdgpu_display_manager.irq_handler_list_low_tab * * They override the base driver's IRQ table, and the effect can be seen * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up * DM's IRQ tables. However, in order for base driver to recognize this hook, DM * still needs to register the IRQ with the base driver. See * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). * * To expose DC's hardware interrupt toggle to the base driver, DM implements * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through * amdgpu_irq_update() to enable or disable the interrupt. */ /****************************************************************************** * Private declarations. *****************************************************************************/ /** * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. * * @list: Linked list entry referencing the next/previous handler * @handler: Handler function * @handler_arg: Argument passed to the handler when triggered * @dm: DM which this handler belongs to * @irq_source: DC interrupt source that this handler is registered for * @work: work struct */ struct amdgpu_dm_irq_handler_data { struct list_head list; interrupt_handler handler; void *handler_arg; struct amdgpu_display_manager *dm; /* DAL irq source which registered for this interrupt. */ enum dc_irq_source irq_source; struct work_struct work; }; #define DM_IRQ_TABLE_LOCK(adev, flags) \ spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) #define DM_IRQ_TABLE_UNLOCK(adev, flags) \ spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags) /****************************************************************************** * Private functions. *****************************************************************************/ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, void (*ih)(void *), void *args, struct amdgpu_display_manager *dm) { hcd->handler = ih; hcd->handler_arg = args; hcd->dm = dm; } /** * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. * * @work: work struct */ static void dm_irq_work_func(struct work_struct *work) { struct amdgpu_dm_irq_handler_data *handler_data = container_of(work, struct amdgpu_dm_irq_handler_data, work); handler_data->handler(handler_data->handler_arg); /* Call a DAL subcomponent which registered for interrupt notification * at INTERRUPT_LOW_IRQ_CONTEXT. * (The most common use is HPD interrupt) */ } /* * Remove a handler and return a pointer to handler list from which the * handler was removed. */ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, void *ih, const struct dc_interrupt_params *int_params) { struct list_head *hnd_list; struct list_head *entry, *tmp; struct amdgpu_dm_irq_handler_data *handler; unsigned long irq_table_flags; bool handler_removed = false; enum dc_irq_source irq_source; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); irq_source = int_params->irq_source; switch (int_params->int_context) { case INTERRUPT_HIGH_IRQ_CONTEXT: hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; break; case INTERRUPT_LOW_IRQ_CONTEXT: default: hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; break; } list_for_each_safe(entry, tmp, hnd_list) { handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, list); if (handler == NULL) continue; if (ih == handler->handler) { /* Found our handler. Remove it from the list. */ list_del(&handler->list); handler_removed = true; break; } } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); if (handler_removed == false) { /* Not necessarily an error - caller may not * know the context. */ return NULL; } kfree(handler); DRM_DEBUG_KMS( "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n", ih, int_params->irq_source, int_params->int_context); return hnd_list; } /** * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table * @adev: The base driver device containing the DM device * * Go through low and high context IRQ tables and deallocate handlers. */ static void unregister_all_irq_handlers(struct amdgpu_device *adev) { struct list_head *hnd_list_low; struct list_head *hnd_list_high; struct list_head *entry, *tmp; struct amdgpu_dm_irq_handler_data *handler; unsigned long irq_table_flags; int i; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) { hnd_list_low = &adev->dm.irq_handler_list_low_tab[i]; hnd_list_high = &adev->dm.irq_handler_list_high_tab[i]; list_for_each_safe(entry, tmp, hnd_list_low) { handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, list); if (handler == NULL || handler->handler == NULL) continue; list_del(&handler->list); kfree(handler); } list_for_each_safe(entry, tmp, hnd_list_high) { handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, list); if (handler == NULL || handler->handler == NULL) continue; list_del(&handler->list); kfree(handler); } } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); } static bool validate_irq_registration_params(struct dc_interrupt_params *int_params, void (*ih)(void *)) { if (NULL == int_params || NULL == ih) { DRM_ERROR("DM_IRQ: invalid input!\n"); return false; } if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) { DRM_ERROR("DM_IRQ: invalid context: %d!\n", int_params->int_context); return false; } if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) { DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n", int_params->irq_source); return false; } return true; } static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, irq_handler_idx handler_idx) { if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) { DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); return false; } if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) { DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source); return false; } return true; } /****************************************************************************** * Public functions. * * Note: caller is responsible for input validation. *****************************************************************************/ /** * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. * @adev: The base driver device containing the DM device. * @int_params: Interrupt parameters containing the source, and handler context * @ih: Function pointer to the interrupt handler to register * @handler_args: Arguments passed to the handler when the interrupt occurs * * Register an interrupt handler for the given IRQ source, under the given * context. The context can either be high or low. High context handlers are * executed directly within ISR context, while low context is executed within a * workqueue, thereby allowing operations that sleep. * * Registered handlers are called in a FIFO manner, i.e. the most recently * registered handler will be called first. * * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ * source, handler function, and args */ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, struct dc_interrupt_params *int_params, void (*ih)(void *), void *handler_args) { struct list_head *hnd_list; struct amdgpu_dm_irq_handler_data *handler_data; unsigned long irq_table_flags; enum dc_irq_source irq_source; if (false == validate_irq_registration_params(int_params, ih)) return DAL_INVALID_IRQ_HANDLER_IDX; handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); if (!handler_data) { DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); return DAL_INVALID_IRQ_HANDLER_IDX; } init_handler_common_data(handler_data, ih, handler_args, &adev->dm); irq_source = int_params->irq_source; handler_data->irq_source = irq_source; /* Lock the list, add the handler. */ DM_IRQ_TABLE_LOCK(adev, irq_table_flags); switch (int_params->int_context) { case INTERRUPT_HIGH_IRQ_CONTEXT: hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; break; case INTERRUPT_LOW_IRQ_CONTEXT: default: hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; INIT_WORK(&handler_data->work, dm_irq_work_func); break; } list_add_tail(&handler_data->list, hnd_list); DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); /* This pointer will be stored by code which requested interrupt * registration. * The same pointer will be needed in order to unregister the * interrupt. */ DRM_DEBUG_KMS( "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", handler_data, irq_source, int_params->int_context); return handler_data; } /** * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table * @adev: The base driver device containing the DM device * @irq_source: IRQ source to remove the given handler from * @ih: Function pointer to the interrupt handler to unregister * * Go through both low and high context IRQ tables, and find the given handler * for the given irq source. If found, remove it. Otherwise, do nothing. */ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, enum dc_irq_source irq_source, void *ih) { struct list_head *handler_list; struct dc_interrupt_params int_params; int i; if (false == validate_irq_unregistration_params(irq_source, ih)) return; memset(&int_params, 0, sizeof(int_params)); int_params.irq_source = irq_source; for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) { int_params.int_context = i; handler_list = remove_irq_handler(adev, ih, &int_params); if (handler_list != NULL) break; } if (handler_list == NULL) { /* If we got here, it means we searched all irq contexts * for this irq source, but the handler was not found. */ DRM_ERROR( "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", ih, irq_source); } } /** * amdgpu_dm_irq_init() - Initialize DM IRQ management * @adev: The base driver device containing the DM device * * Initialize DM's high and low context IRQ tables. * * The N by M table contains N IRQ sources, with M * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The * list_heads are initialized here. When an interrupt n is triggered, all m * handlers are called in sequence, FIFO according to registration order. * * The low context table requires special steps to initialize, since handlers * will be deferred to a workqueue. See &struct irq_list_head. */ int amdgpu_dm_irq_init(struct amdgpu_device *adev) { int src; struct list_head *lh; DRM_DEBUG_KMS("DM_IRQ\n"); spin_lock_init(&adev->dm.irq_handler_list_table_lock); for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { /* low context handler list init */ lh = &adev->dm.irq_handler_list_low_tab[src]; INIT_LIST_HEAD(lh); /* high context handler init */ INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); } return 0; } /** * amdgpu_dm_irq_fini() - Tear down DM IRQ management * @adev: The base driver device containing the DM device * * Flush all work within the low context IRQ table. */ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) { int src; struct list_head *lh; struct list_head *entry, *tmp; struct amdgpu_dm_irq_handler_data *handler; unsigned long irq_table_flags; DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { DM_IRQ_TABLE_LOCK(adev, irq_table_flags); /* The handler was removed from the table, * it means it is safe to flush all the 'work' * (because no code can schedule a new one). */ lh = &adev->dm.irq_handler_list_low_tab[src]; DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); if (!list_empty(lh)) { list_for_each_safe(entry, tmp, lh) { handler = list_entry( entry, struct amdgpu_dm_irq_handler_data, list); flush_work(&handler->work); } } } /* Deallocate handlers from the table. */ unregister_all_irq_handlers(adev); } int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) { int src; struct list_head *hnd_list_h; struct list_head *hnd_list_l; unsigned long irq_table_flags; struct list_head *entry, *tmp; struct amdgpu_dm_irq_handler_data *handler; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); DRM_DEBUG_KMS("DM_IRQ: suspend\n"); /** * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK * will be disabled from manage_dm_interrupts on disable CRTC. */ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, false); DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); if (!list_empty(hnd_list_l)) { list_for_each_safe(entry, tmp, hnd_list_l) { handler = list_entry( entry, struct amdgpu_dm_irq_handler_data, list); flush_work(&handler->work); } } DM_IRQ_TABLE_LOCK(adev, irq_table_flags); } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); return 0; } int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) { int src; struct list_head *hnd_list_h, *hnd_list_l; unsigned long irq_table_flags; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); DRM_DEBUG_KMS("DM_IRQ: early resume\n"); /* re-enable short pulse interrupts HW interrupt */ for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, true); } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); return 0; } int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) { int src; struct list_head *hnd_list_h, *hnd_list_l; unsigned long irq_table_flags; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); DRM_DEBUG_KMS("DM_IRQ: resume\n"); /** * Renable HW interrupt for HPD and only since FLIP and VBLANK * will be enabled from manage_dm_interrupts on enable CRTC. */ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, true); } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); return 0; } /* * amdgpu_dm_irq_schedule_work - schedule all work items registered for the * "irq_source". */ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, enum dc_irq_source irq_source) { struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source]; struct amdgpu_dm_irq_handler_data *handler_data; bool work_queued = false; if (list_empty(handler_list)) return; list_for_each_entry(handler_data, handler_list, list) { if (queue_work(system_highpri_wq, &handler_data->work)) { work_queued = true; break; } } if (!work_queued) { struct amdgpu_dm_irq_handler_data *handler_data_add; /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/ handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list); /*allocate a new amdgpu_dm_irq_handler_data*/ handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC); if (!handler_data_add) { DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); return; } /*copy new amdgpu_dm_irq_handler_data members from handler_data*/ handler_data_add->handler = handler_data->handler; handler_data_add->handler_arg = handler_data->handler_arg; handler_data_add->dm = handler_data->dm; handler_data_add->irq_source = irq_source; list_add_tail(&handler_data_add->list, handler_list); INIT_WORK(&handler_data_add->work, dm_irq_work_func); if (queue_work(system_highpri_wq, &handler_data_add->work)) DRM_DEBUG("Queued work for handling interrupt from " "display for IRQ source %d\n", irq_source); else DRM_ERROR("Failed to queue work for handling interrupt " "from display for IRQ source %d\n", irq_source); } } /* * amdgpu_dm_irq_immediate_work * Callback high irq work immediately, don't send to work queue */ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, enum dc_irq_source irq_source) { struct amdgpu_dm_irq_handler_data *handler_data; unsigned long irq_table_flags; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); list_for_each_entry(handler_data, &adev->dm.irq_handler_list_high_tab[irq_source], list) { /* Call a subcomponent which registered for immediate * interrupt notification */ handler_data->handler(handler_data->handler_arg); } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); } /** * amdgpu_dm_irq_handler - Generic DM IRQ handler * @adev: amdgpu base driver device containing the DM device * @source: Unused * @entry: Data about the triggered interrupt * * Calls all registered high irq work immediately, and schedules work for low * irq. The DM IRQ table is used to find the corresponding handlers. */ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { enum dc_irq_source src = dc_interrupt_to_irq_source( adev->dm.dc, entry->src_id, entry->src_data[0]); dc_interrupt_ack(adev->dm.dc, src); /* Call high irq work immediately */ amdgpu_dm_irq_immediate_work(adev, src); /*Schedule low_irq work */ amdgpu_dm_irq_schedule_work(adev, src); return 0; } static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type) { switch (type) { case AMDGPU_HPD_1: return DC_IRQ_SOURCE_HPD1; case AMDGPU_HPD_2: return DC_IRQ_SOURCE_HPD2; case AMDGPU_HPD_3: return DC_IRQ_SOURCE_HPD3; case AMDGPU_HPD_4: return DC_IRQ_SOURCE_HPD4; case AMDGPU_HPD_5: return DC_IRQ_SOURCE_HPD5; case AMDGPU_HPD_6: return DC_IRQ_SOURCE_HPD6; default: return DC_IRQ_SOURCE_INVALID; } } static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, enum amdgpu_interrupt_state state) { enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); bool st = (state == AMDGPU_IRQ_STATE_ENABLE); dc_interrupt_set(adev->dm.dc, src, st); return 0; } static inline int dm_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int crtc_id, enum amdgpu_interrupt_state state, const enum irq_type dal_irq_type, const char *func) { bool st; enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; if (!acrtc) { DRM_ERROR( "%s: crtc is NULL at id :%d\n", func, crtc_id); return 0; } if (acrtc->otg_inst == -1) return 0; irq_source = dal_irq_type + acrtc->otg_inst; st = (state == AMDGPU_IRQ_STATE_ENABLE); dc_interrupt_set(adev->dm.dc, irq_source, st); return 0; } static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( adev, source, crtc_id, state, IRQ_TYPE_PFLIP, __func__); } static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( adev, source, crtc_id, state, IRQ_TYPE_VBLANK, __func__); } static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( adev, source, crtc_id, state, IRQ_TYPE_VLINE0, __func__); } static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int crtc_id, enum amdgpu_interrupt_state state) { enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; bool st = (state == AMDGPU_IRQ_STATE_ENABLE); dc_interrupt_set(adev->dm.dc, irq_source, st); return 0; } static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( adev, source, crtc_id, state, IRQ_TYPE_VUPDATE, __func__); } static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, enum amdgpu_interrupt_state state) { enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0; bool st = (state == AMDGPU_IRQ_STATE_ENABLE); dc_interrupt_set(adev->dm.dc, irq_source, st); return 0; } static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { .set = amdgpu_dm_set_crtc_irq_state, .process = amdgpu_dm_irq_handler, }; static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = { .set = amdgpu_dm_set_vline0_irq_state, .process = amdgpu_dm_irq_handler, }; static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = { .set = amdgpu_dm_set_dmub_outbox_irq_state, .process = amdgpu_dm_irq_handler, }; static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = { .set = amdgpu_dm_set_vupdate_irq_state, .process = amdgpu_dm_irq_handler, }; static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = { .set = amdgpu_dm_set_dmub_trace_irq_state, .process = amdgpu_dm_irq_handler, }; static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { .set = amdgpu_dm_set_pflip_irq_state, .process = amdgpu_dm_irq_handler, }; static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { .set = amdgpu_dm_set_hpd_irq_state, .process = amdgpu_dm_irq_handler, }; void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) { adev->crtc_irq.num_types = adev->mode_info.num_crtc; adev->crtc_irq.funcs = &dm_crtc_irq_funcs; adev->vline0_irq.num_types = adev->mode_info.num_crtc; adev->vline0_irq.funcs = &dm_vline0_irq_funcs; adev->dmub_outbox_irq.num_types = 1; adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs; adev->vupdate_irq.num_types = adev->mode_info.num_crtc; adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs; adev->dmub_trace_irq.num_types = 1; adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs; adev->pageflip_irq.num_types = adev->mode_info.num_crtc; adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; adev->hpd_irq.num_types = adev->mode_info.num_hpd; adev->hpd_irq.funcs = &dm_hpd_irq_funcs; } void amdgpu_dm_outbox_init(struct amdgpu_device *adev) { dc_interrupt_set(adev->dm.dc, DC_IRQ_SOURCE_DMCUB_OUTBOX, true); } /** * amdgpu_dm_hpd_init - hpd setup callback. * * @adev: amdgpu_device pointer * * Setup the hpd pins used by the card (evergreen+). * Enable the pin, set the polarity, and enable the hpd interrupts. */ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, true); } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, true); } } drm_connector_list_iter_end(&iter); } /** * amdgpu_dm_hpd_fini - hpd tear down callback. * * @adev: amdgpu_device pointer * * Tear down the hpd pins used by the card (evergreen+). * Disable the hpd interrupts. */ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, false); } } drm_connector_list_iter_end(&iter); }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "amdgpu_dm_replay.h" #include "dc.h" #include "dm_helpers.h" #include "amdgpu_dm.h" #include "modules/power/power_helpers.h" #include "dmub/inc/dmub_cmd.h" #include "dc/inc/link.h" /* * link_supports_replay() - check if the link supports replay * @link: link * @aconnector: aconnector * */ static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) { struct dm_connector_state *state = to_dm_connector_state(aconnector->base.state); struct dpcd_caps *dpcd_caps = &link->dpcd_caps; struct adaptive_sync_caps *as_caps = &link->dpcd_caps.adaptive_sync_caps; if (!state->freesync_capable) return false; if (!aconnector->vsdb_info.replay_mode) return false; // Check the eDP version if (dpcd_caps->edp_rev < EDP_REVISION_13) return false; if (!dpcd_caps->alpm_caps.bits.AUX_WAKE_ALPM_CAP) return false; // Check adaptive sync support cap if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT) return false; return true; } /* * amdgpu_dm_setup_replay() - setup replay configuration * @link: link * @aconnector: aconnector * */ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) { struct replay_config pr_config; union replay_debug_flags *debug_flags = NULL; // For eDP, if Replay is supported, return true to skip checks if (link->replay_settings.config.replay_supported) return true; if (!dc_is_embedded_signal(link->connector_signal)) return false; if (link->panel_config.psr.disallow_replay) return false; if (!link_supports_replay(link, aconnector)) return false; // Mark Replay is supported in link and update related attributes pr_config.replay_supported = true; pr_config.replay_power_opt_supported = 0; pr_config.replay_enable_option |= pr_enable_option_static_screen; pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq ? true : false; if (!pr_config.replay_timing_sync_supported) pr_config.replay_enable_option &= ~pr_enable_option_general_ui; debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; debug_flags->u32All = 0; debug_flags->bitfields.visual_confirm = link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY ? true : false; link->replay_settings.replay_feature_enabled = true; init_replay_config(link, &pr_config); return true; } /* * amdgpu_dm_replay_enable() - enable replay f/w * @stream: stream state * * Return: true if success */ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) { uint64_t state; unsigned int retry_count; bool replay_active = true; const unsigned int max_retry = 1000; bool force_static = true; struct dc_link *link = NULL; if (stream == NULL) return false; link = stream->link; if (link == NULL) return false; link->dc->link_srv->edp_setup_replay(link, stream); link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL); link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL); if (wait == true) { for (retry_count = 0; retry_count <= max_retry; retry_count++) { dc_link_get_replay_state(link, &state); if (replay_active) { if (state != REPLAY_STATE_0 && (!force_static || state == REPLAY_STATE_3)) break; } else { if (state == REPLAY_STATE_0) break; } udelay(500); } /* assert if max retry hit */ if (retry_count >= max_retry) ASSERT(0); } else { /* To-do: Add trace log */ } return true; } /* * amdgpu_dm_replay_disable() - disable replay f/w * @stream: stream state * * Return: true if success */ bool amdgpu_dm_replay_disable(struct dc_stream_state *stream) { if (stream->link) { DRM_DEBUG_DRIVER("Disabling replay...\n"); stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL); return true; } return false; }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD */ #include <linux/string.h> #include <linux/acpi.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h" #include "amdgpu_pm.h" #include "dm_pp_smu.h" bool dm_pp_apply_display_requirements( const struct dc_context *ctx, const struct dm_pp_display_configuration *pp_display_cfg) { struct amdgpu_device *adev = ctx->driver_context; int i; if (adev->pm.dpm_enabled) { memset(&adev->pm.pm_display_cfg, 0, sizeof(adev->pm.pm_display_cfg)); adev->pm.pm_display_cfg.cpu_cc6_disable = pp_display_cfg->cpu_cc6_disable; adev->pm.pm_display_cfg.cpu_pstate_disable = pp_display_cfg->cpu_pstate_disable; adev->pm.pm_display_cfg.cpu_pstate_separation_time = pp_display_cfg->cpu_pstate_separation_time; adev->pm.pm_display_cfg.nb_pstate_switch_disable = pp_display_cfg->nb_pstate_switch_disable; adev->pm.pm_display_cfg.num_display = pp_display_cfg->display_count; adev->pm.pm_display_cfg.num_path_including_non_display = pp_display_cfg->display_count; adev->pm.pm_display_cfg.min_core_set_clock = pp_display_cfg->min_engine_clock_khz/10; adev->pm.pm_display_cfg.min_core_set_clock_in_sr = pp_display_cfg->min_engine_clock_deep_sleep_khz/10; adev->pm.pm_display_cfg.min_mem_set_clock = pp_display_cfg->min_memory_clock_khz/10; adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk = pp_display_cfg->min_engine_clock_deep_sleep_khz/10; adev->pm.pm_display_cfg.min_dcef_set_clk = pp_display_cfg->min_dcfclock_khz/10; adev->pm.pm_display_cfg.multi_monitor_in_sync = pp_display_cfg->all_displays_in_sync; adev->pm.pm_display_cfg.min_vblank_time = pp_display_cfg->avail_mclk_switch_time_us; adev->pm.pm_display_cfg.display_clk = pp_display_cfg->disp_clk_khz/10; adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; adev->pm.pm_display_cfg.line_time_in_us = pp_display_cfg->line_time_in_us; adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh; adev->pm.pm_display_cfg.crossfire_display_index = -1; adev->pm.pm_display_cfg.min_bus_bandwidth = 0; for (i = 0; i < pp_display_cfg->display_count; i++) { const struct dm_pp_single_disp_config *dc_cfg = &pp_display_cfg->disp_configs[i]; adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; } amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg); amdgpu_dpm_compute_clocks(adev); } return true; } static void get_default_clock_levels( enum dm_pp_clock_type clk_type, struct dm_pp_clock_levels *clks) { uint32_t disp_clks_in_khz[6] = { 300000, 400000, 496560, 626090, 685720, 757900 }; uint32_t sclks_in_khz[6] = { 300000, 360000, 423530, 514290, 626090, 720000 }; uint32_t mclks_in_khz[2] = { 333000, 800000 }; switch (clk_type) { case DM_PP_CLOCK_TYPE_DISPLAY_CLK: clks->num_levels = 6; memmove(clks->clocks_in_khz, disp_clks_in_khz, sizeof(disp_clks_in_khz)); break; case DM_PP_CLOCK_TYPE_ENGINE_CLK: clks->num_levels = 6; memmove(clks->clocks_in_khz, sclks_in_khz, sizeof(sclks_in_khz)); break; case DM_PP_CLOCK_TYPE_MEMORY_CLK: clks->num_levels = 2; memmove(clks->clocks_in_khz, mclks_in_khz, sizeof(mclks_in_khz)); break; default: clks->num_levels = 0; break; } } static enum amd_pp_clock_type dc_to_pp_clock_type( enum dm_pp_clock_type dm_pp_clk_type) { enum amd_pp_clock_type amd_pp_clk_type = 0; switch (dm_pp_clk_type) { case DM_PP_CLOCK_TYPE_DISPLAY_CLK: amd_pp_clk_type = amd_pp_disp_clock; break; case DM_PP_CLOCK_TYPE_ENGINE_CLK: amd_pp_clk_type = amd_pp_sys_clock; break; case DM_PP_CLOCK_TYPE_MEMORY_CLK: amd_pp_clk_type = amd_pp_mem_clock; break; case DM_PP_CLOCK_TYPE_DCEFCLK: amd_pp_clk_type = amd_pp_dcef_clock; break; case DM_PP_CLOCK_TYPE_DCFCLK: amd_pp_clk_type = amd_pp_dcf_clock; break; case DM_PP_CLOCK_TYPE_PIXELCLK: amd_pp_clk_type = amd_pp_pixel_clock; break; case DM_PP_CLOCK_TYPE_FCLK: amd_pp_clk_type = amd_pp_f_clock; break; case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: amd_pp_clk_type = amd_pp_phy_clock; break; case DM_PP_CLOCK_TYPE_DPPCLK: amd_pp_clk_type = amd_pp_dpp_clock; break; default: DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", dm_pp_clk_type); break; } return amd_pp_clk_type; } static enum dm_pp_clocks_state pp_to_dc_powerlevel_state( enum PP_DAL_POWERLEVEL max_clocks_state) { switch (max_clocks_state) { case PP_DAL_POWERLEVEL_0: return DM_PP_CLOCKS_DPM_STATE_LEVEL_0; case PP_DAL_POWERLEVEL_1: return DM_PP_CLOCKS_DPM_STATE_LEVEL_1; case PP_DAL_POWERLEVEL_2: return DM_PP_CLOCKS_DPM_STATE_LEVEL_2; case PP_DAL_POWERLEVEL_3: return DM_PP_CLOCKS_DPM_STATE_LEVEL_3; case PP_DAL_POWERLEVEL_4: return DM_PP_CLOCKS_DPM_STATE_LEVEL_4; case PP_DAL_POWERLEVEL_5: return DM_PP_CLOCKS_DPM_STATE_LEVEL_5; case PP_DAL_POWERLEVEL_6: return DM_PP_CLOCKS_DPM_STATE_LEVEL_6; case PP_DAL_POWERLEVEL_7: return DM_PP_CLOCKS_DPM_STATE_LEVEL_7; default: DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n", max_clocks_state); return DM_PP_CLOCKS_STATE_INVALID; } } static void pp_to_dc_clock_levels( const struct amd_pp_clocks *pp_clks, struct dm_pp_clock_levels *dc_clks, enum dm_pp_clock_type dc_clk_type) { uint32_t i; if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) { DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), pp_clks->count, DM_PP_MAX_CLOCK_LEVELS); dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS; } else dc_clks->num_levels = pp_clks->count; DRM_INFO("DM_PPLIB: values for %s clock\n", DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); for (i = 0; i < dc_clks->num_levels; i++) { DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]); dc_clks->clocks_in_khz[i] = pp_clks->clock[i]; } } static void pp_to_dc_clock_levels_with_latency( const struct pp_clock_levels_with_latency *pp_clks, struct dm_pp_clock_levels_with_latency *clk_level_info, enum dm_pp_clock_type dc_clk_type) { uint32_t i; if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), pp_clks->num_levels, DM_PP_MAX_CLOCK_LEVELS); clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; } else clk_level_info->num_levels = pp_clks->num_levels; DRM_DEBUG("DM_PPLIB: values for %s clock\n", DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); for (i = 0; i < clk_level_info->num_levels; i++) { DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz); clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; } } static void pp_to_dc_clock_levels_with_voltage( const struct pp_clock_levels_with_voltage *pp_clks, struct dm_pp_clock_levels_with_voltage *clk_level_info, enum dm_pp_clock_type dc_clk_type) { uint32_t i; if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), pp_clks->num_levels, DM_PP_MAX_CLOCK_LEVELS); clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; } else clk_level_info->num_levels = pp_clks->num_levels; DRM_INFO("DM_PPLIB: values for %s clock\n", DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); for (i = 0; i < clk_level_info->num_levels; i++) { DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz, pp_clks->data[i].voltage_in_mv); clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv; } } bool dm_pp_get_clock_levels_by_type( const struct dc_context *ctx, enum dm_pp_clock_type clk_type, struct dm_pp_clock_levels *dc_clks) { struct amdgpu_device *adev = ctx->driver_context; struct amd_pp_clocks pp_clks = { 0 }; struct amd_pp_simple_clock_info validation_clks = { 0 }; uint32_t i; if (amdgpu_dpm_get_clock_by_type(adev, dc_to_pp_clock_type(clk_type), &pp_clks)) { /* Error in pplib. Provide default values. */ get_default_clock_levels(clk_type, dc_clks); return true; } pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); if (amdgpu_dpm_get_display_mode_validation_clks(adev, &validation_clks)) { /* Error in pplib. Provide default values. */ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); validation_clks.engine_max_clock = 72000; validation_clks.memory_max_clock = 80000; validation_clks.level = 0; } DRM_INFO("DM_PPLIB: Validation clocks:\n"); DRM_INFO("DM_PPLIB: engine_max_clock: %d\n", validation_clks.engine_max_clock); DRM_INFO("DM_PPLIB: memory_max_clock: %d\n", validation_clks.memory_max_clock); DRM_INFO("DM_PPLIB: level : %d\n", validation_clks.level); /* Translate 10 kHz to kHz. */ validation_clks.engine_max_clock *= 10; validation_clks.memory_max_clock *= 10; /* Determine the highest non-boosted level from the Validation Clocks */ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) { for (i = 0; i < dc_clks->num_levels; i++) { if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { /* This clock is higher the validation clock. * Than means the previous one is the highest * non-boosted one. */ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", dc_clks->num_levels, i); dc_clks->num_levels = i > 0 ? i : 1; break; } } } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) { for (i = 0; i < dc_clks->num_levels; i++) { if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n", dc_clks->num_levels, i); dc_clks->num_levels = i > 0 ? i : 1; break; } } } return true; } bool dm_pp_get_clock_levels_by_type_with_latency( const struct dc_context *ctx, enum dm_pp_clock_type clk_type, struct dm_pp_clock_levels_with_latency *clk_level_info) { struct amdgpu_device *adev = ctx->driver_context; struct pp_clock_levels_with_latency pp_clks = { 0 }; int ret; ret = amdgpu_dpm_get_clock_by_type_with_latency(adev, dc_to_pp_clock_type(clk_type), &pp_clks); if (ret) return false; pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); return true; } bool dm_pp_get_clock_levels_by_type_with_voltage( const struct dc_context *ctx, enum dm_pp_clock_type clk_type, struct dm_pp_clock_levels_with_voltage *clk_level_info) { struct amdgpu_device *adev = ctx->driver_context; struct pp_clock_levels_with_voltage pp_clk_info = {0}; int ret; ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev, dc_to_pp_clock_type(clk_type), &pp_clk_info); if (ret) return false; pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type); return true; } bool dm_pp_notify_wm_clock_changes( const struct dc_context *ctx, struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) { struct amdgpu_device *adev = ctx->driver_context; /* * Limit this watermark setting for Polaris for now * TODO: expand this to other ASICs */ if ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_VEGAM) && !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, (void *)wm_with_clock_ranges)) return true; return false; } bool dm_pp_apply_power_level_change_request( const struct dc_context *ctx, struct dm_pp_power_level_change_request *level_change_req) { /* TODO: to be implemented */ return false; } bool dm_pp_apply_clock_for_voltage_request( const struct dc_context *ctx, struct dm_pp_clock_for_voltage_req *clock_for_voltage_req) { struct amdgpu_device *adev = ctx->driver_context; struct pp_display_clock_request pp_clock_request = {0}; int ret = 0; pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type); pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz; if (!pp_clock_request.clock_type) return false; ret = amdgpu_dpm_display_clock_voltage_request(adev, &pp_clock_request); if (ret && (ret != -EOPNOTSUPP)) return false; return true; } bool dm_pp_get_static_clocks( const struct dc_context *ctx, struct dm_pp_static_clock_info *static_clk_info) { struct amdgpu_device *adev = ctx->driver_context; struct amd_pp_clock_info pp_clk_info = {0}; if (amdgpu_dpm_get_current_clocks(adev, &pp_clk_info)) return false; static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state); static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10; static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10; return true; } static void pp_rv_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges; int32_t i; wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { if (ranges->reader_wm_sets[i].wm_inst > 3) wm_dce_clocks[i].wm_set_id = WM_SET_A; else wm_dce_clocks[i].wm_set_id = ranges->reader_wm_sets[i].wm_inst; wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; wm_dce_clocks[i].wm_max_mem_clk_in_khz = ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; wm_dce_clocks[i].wm_min_mem_clk_in_khz = ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; } for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { if (ranges->writer_wm_sets[i].wm_inst > 3) wm_soc_clocks[i].wm_set_id = WM_SET_A; else wm_soc_clocks[i].wm_set_id = ranges->writer_wm_sets[i].wm_inst; wm_soc_clocks[i].wm_max_socclk_clk_in_khz = ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; wm_soc_clocks[i].wm_min_socclk_clk_in_khz = ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; wm_soc_clocks[i].wm_max_mem_clk_in_khz = ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; wm_soc_clocks[i].wm_min_mem_clk_in_khz = ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, &wm_with_clock_ranges); } static void pp_rv_set_pme_wa_enable(struct pp_smu *pp) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; amdgpu_dpm_notify_smu_enable_pwe(adev); } static void pp_rv_set_active_display_count(struct pp_smu *pp, int count) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; amdgpu_dpm_set_active_display_count(adev, count); } static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock); } static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock); } static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; amdgpu_dpm_set_hard_min_fclk_by_freq(adev, mhz); } static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges); return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; int ret = 0; ret = amdgpu_dpm_set_active_display_count(adev, count); if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; else if (ret) /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; int ret = 0; /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, mhz); if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; struct pp_display_clock_request clock_req; int ret = 0; clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = mhz * 1000; /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; struct pp_display_clock_request clock_req; int ret = 0; clock_req.clock_type = amd_pp_mem_clock; clock_req.clock_freq_in_khz = mhz * 1000; /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_set_pstate_handshake_support( struct pp_smu *pp, bool pstate_handshake_supported) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; if (amdgpu_dpm_display_disable_memory_clock_switch(adev, !pstate_handshake_supported)) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, enum pp_smu_nv_clock_id clock_id, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; struct pp_display_clock_request clock_req; int ret = 0; switch (clock_id) { case PP_SMU_NV_DISPCLK: clock_req.clock_type = amd_pp_disp_clock; break; case PP_SMU_NV_PHYCLK: clock_req.clock_type = amd_pp_phy_clock; break; case PP_SMU_NV_PIXELCLK: clock_req.clock_type = amd_pp_pixel_clock; break; default: break; } clock_req.clock_freq_in_khz = mhz * 1000; /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; int ret = 0; ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev, max_clocks); if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, unsigned int *clock_values_in_khz, unsigned int *num_states) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; int ret = 0; ret = amdgpu_dpm_get_uclk_dpm_states(adev, clock_values_in_khz, num_states); if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_rn_get_dpm_clock_table( struct pp_smu *pp, struct dpm_clocks *clock_table) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; int ret = 0; ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table); if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges); return PP_SMU_RESULT_OK; } void dm_pp_get_funcs( struct dc_context *ctx, struct pp_smu_funcs *funcs) { switch (ctx->dce_version) { case DCN_VERSION_1_0: case DCN_VERSION_1_01: funcs->ctx.ver = PP_SMU_VER_RV; funcs->rv_funcs.pp_smu.dm = ctx; funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges; funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable; funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count; funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk; funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq; funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq; break; case DCN_VERSION_2_0: funcs->ctx.ver = PP_SMU_VER_NV; funcs->nv_funcs.pp_smu.dm = ctx; funcs->nv_funcs.set_display_count = pp_nv_set_display_count; funcs->nv_funcs.set_hard_min_dcfclk_by_freq = pp_nv_set_hard_min_dcefclk_by_freq; funcs->nv_funcs.set_min_deep_sleep_dcfclk = pp_nv_set_min_deep_sleep_dcfclk; funcs->nv_funcs.set_voltage_by_freq = pp_nv_set_voltage_by_freq; funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges; /* todo set_pme_wa_enable cause 4k@6ohz display not light up */ funcs->nv_funcs.set_pme_wa_enable = NULL; /* todo debug waring message */ funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq; /* todo compare data with window driver*/ funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks; /*todo compare data with window driver */ funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states; funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; break; case DCN_VERSION_2_1: funcs->ctx.ver = PP_SMU_VER_RN; funcs->rn_funcs.pp_smu.dm = ctx; funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; break; default: DRM_ERROR("smu version is not supported !\n"); break; } }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
// SPDX-License-Identifier: MIT /* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <drm/drm_vblank.h> #include <drm/drm_atomic_helper.h> #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm_psr.h" #include "amdgpu_dm_replay.h" #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_plane.h" #include "amdgpu_dm_trace.h" #include "amdgpu_dm_debugfs.h" void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) { struct drm_crtc *crtc = &acrtc->base; struct drm_device *dev = crtc->dev; unsigned long flags; drm_crtc_handle_vblank(crtc); spin_lock_irqsave(&dev->event_lock, flags); /* Send completion event for cursor-only commits */ if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { drm_crtc_send_vblank_event(crtc, acrtc->event); drm_crtc_vblank_put(crtc); acrtc->event = NULL; } spin_unlock_irqrestore(&dev->event_lock, flags); } bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state, struct dc_stream_state *new_stream, struct dc_stream_state *old_stream) { return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); } bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc) { return acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_FIXED; } int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable) { enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); int rc; if (acrtc->otg_inst == -1) return 0; irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", acrtc->crtc_id, enable ? "en" : "dis", rc); return rc; } bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state) { return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } static void vblank_control_worker(struct work_struct *work) { struct vblank_control_work *vblank_work = container_of(work, struct vblank_control_work, work); struct amdgpu_display_manager *dm = vblank_work->dm; mutex_lock(&dm->dc_lock); if (vblank_work->enable) dm->active_vblank_irq_count++; else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); /* * Control PSR based on vblank requirements from OS * * If panel supports PSR SU, there's no need to disable PSR when OS is * submitting fast atomic commits (we infer this by whether the OS * requests vblank events). Fast atomic commits will simply trigger a * full-frame-update (FFU); a specific case of selective-update (SU) * where the SU region is the full hactive*vactive region. See * fill_dc_dirty_rects(). */ if (vblank_work->stream && vblank_work->stream->link) { /* * Prioritize replay, instead of psr */ if (vblank_work->stream->link->replay_settings.replay_feature_enabled) amdgpu_dm_replay_enable(vblank_work->stream, false); else if (vblank_work->enable) { if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && vblank_work->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(vblank_work->stream); } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && !vblank_work->stream->link->psr_settings.psr_allow_active && #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) && #endif vblank_work->stream->link->panel_config.psr.disallow_replay && vblank_work->acrtc->dm_irq_params.allow_psr_entry) { amdgpu_dm_psr_enable(vblank_work->stream); } } mutex_unlock(&dm->dc_lock); dc_stream_release(vblank_work->stream); kfree(vblank_work); } static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); struct amdgpu_display_manager *dm = &adev->dm; struct vblank_control_work *work; int rc = 0; if (acrtc->otg_inst == -1) goto skip; if (enable) { /* vblank irq on -> Only need vupdate irq in vrr mode */ if (amdgpu_dm_crtc_vrr_active(acrtc_state)) rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); } else { /* vblank irq off -> vupdate irq off */ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); } if (rc) return rc; rc = (enable) ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); if (rc) return rc; skip: if (amdgpu_in_reset(adev)) return 0; if (dm->vblank_control_workqueue) { work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) return -ENOMEM; INIT_WORK(&work->work, vblank_control_worker); work->dm = dm; work->acrtc = acrtc; work->enable = enable; if (acrtc_state->stream) { dc_stream_retain(acrtc_state->stream); work->stream = acrtc_state->stream; } queue_work(dm->vblank_control_workqueue, &work->work); } return 0; } int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc) { return dm_set_vblank(crtc, true); } void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc) { dm_set_vblank(crtc, false); } static void dm_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dm_crtc_state *cur = to_dm_crtc_state(state); /* TODO Destroy dc_stream objects are stream object is flattened */ if (cur->stream) dc_stream_release(cur->stream); __drm_atomic_helper_crtc_destroy_state(state); kfree(state); } static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc) { struct dm_crtc_state *state, *cur; cur = to_dm_crtc_state(crtc->state); if (WARN_ON(!crtc->state)) return NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); if (cur->stream) { state->stream = cur->stream; dc_stream_retain(state->stream); } state->active_planes = cur->active_planes; state->vrr_infopacket = cur->vrr_infopacket; state->abm_level = cur->abm_level; state->vrr_supported = cur->vrr_supported; state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; state->crc_skip_count = cur->crc_skip_count; state->mpo_requested = cur->mpo_requested; /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; } static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); kfree(crtc); } static void dm_crtc_reset_state(struct drm_crtc *crtc) { struct dm_crtc_state *state; if (crtc->state) dm_crtc_destroy_state(crtc, crtc->state); state = kzalloc(sizeof(*state), GFP_KERNEL); if (WARN_ON(!state)) return; __drm_atomic_helper_crtc_reset(crtc, &state->base); } #ifdef CONFIG_DEBUG_FS static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) { crtc_debugfs_init(crtc); return 0; } #endif /* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .reset = dm_crtc_reset_state, .destroy = amdgpu_dm_crtc_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = dm_crtc_duplicate_state, .atomic_destroy_state = dm_crtc_destroy_state, .set_crc_source = amdgpu_dm_crtc_set_crc_source, .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, .get_vblank_counter = amdgpu_get_vblank_counter_kms, .enable_vblank = amdgpu_dm_crtc_enable_vblank, .disable_vblank = amdgpu_dm_crtc_disable_vblank, .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, #if defined(CONFIG_DEBUG_FS) .late_register = amdgpu_dm_crtc_late_register, #endif }; static void dm_crtc_helper_disable(struct drm_crtc *crtc) { } static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) { struct drm_atomic_state *state = new_crtc_state->state; struct drm_plane *plane; int num_active = 0; drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { struct drm_plane_state *new_plane_state; /* Cursor planes are "fake". */ if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; new_plane_state = drm_atomic_get_new_plane_state(state, plane); if (!new_plane_state) { /* * The plane is enable on the CRTC and hasn't changed * state. This means that it previously passed * validation and is therefore enabled. */ num_active += 1; continue; } /* We need a framebuffer to be considered enabled. */ num_active += (new_plane_state->fb != NULL); } return num_active; } static void dm_update_crtc_active_planes(struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_new_crtc_state->active_planes = 0; if (!dm_new_crtc_state->stream) return; dm_new_crtc_state->active_planes = count_crtc_active_planes(new_crtc_state); } static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc *dc = adev->dm.dc; struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); int ret = -EINVAL; trace_amdgpu_dm_crtc_atomic_check(crtc_state); dm_update_crtc_active_planes(crtc, crtc_state); if (WARN_ON(unlikely(!dm_crtc_state->stream && amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { return ret; } /* * We require the primary plane to be enabled whenever the CRTC is, otherwise * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other * planes are disabled, which is not supported by the hardware. And there is legacy * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. */ if (crtc_state->enable && !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) { DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n"); return -EINVAL; } /* * Only allow async flips for fast updates that don't change the FB * pitch, the DCC state, rotation, etc. */ if (crtc_state->async_flip && dm_crtc_state->update_type != UPDATE_TYPE_FAST) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] async flips are only supported for fast updates\n", crtc->base.id, crtc->name); return -EINVAL; } /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) return 0; DRM_DEBUG_ATOMIC("Failed DC stream validation\n"); return ret; } static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { .disable = dm_crtc_helper_disable, .atomic_check = dm_crtc_helper_atomic_check, .mode_fixup = dm_crtc_helper_mode_fixup, .get_scanout_position = amdgpu_crtc_get_scanout_position, }; int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, uint32_t crtc_index) { struct amdgpu_crtc *acrtc = NULL; struct drm_plane *cursor_plane; bool is_dcn; int res = -ENOMEM; cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); if (!cursor_plane) goto fail; cursor_plane->type = DRM_PLANE_TYPE_CURSOR; res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); if (!acrtc) goto fail; res = drm_crtc_init_with_planes( dm->ddev, &acrtc->base, plane, cursor_plane, &amdgpu_dm_crtc_funcs, NULL); if (res) goto fail; drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); /* Create (reset) the plane state */ if (acrtc->base.funcs->reset) acrtc->base.funcs->reset(&acrtc->base); acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; acrtc->crtc_id = crtc_index; acrtc->base.enabled = false; acrtc->otg_inst = -1; dm->adev->mode_info.crtcs[crtc_index] = acrtc; /* Don't enable DRM CRTC degamma property for DCE since it doesn't * support programmable degamma anywhere. */ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); return 0; fail: kfree(acrtc); kfree(cursor_plane); return res; }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/string_helpers.h> #include <linux/uaccess.h> #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "amdgpu_dm_debugfs.h" #include "dm_helpers.h" #include "dmub/dmub_srv.h" #include "resource.h" #include "dsc.h" #include "link_hwss.h" #include "dc/dc_dmub_srv.h" #include "link/protocols/link_dp_capability.h" #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY #include "amdgpu_dm_psr.h" #endif struct dmub_debugfs_trace_header { uint32_t entry_count; uint32_t reserved[3]; }; struct dmub_debugfs_trace_entry { uint32_t trace_code; uint32_t tick_count; uint32_t param0; uint32_t param1; }; static const char *const mst_progress_status[] = { "probe", "remote_edid", "allocate_new_payload", "clear_allocated_payload", }; /* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array * * Function takes in attributes passed to debugfs write entry * and writes into param array. * The user passes max_param_num to identify maximum number of * parameters that could be parsed. * */ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size, long *param, const char __user *buf, int max_param_num, uint8_t *param_nums) { char *wr_buf_ptr = NULL; uint32_t wr_buf_count = 0; int r; char *sub_str = NULL; const char delimiter[3] = {' ', '\n', '\0'}; uint8_t param_index = 0; *param_nums = 0; wr_buf_ptr = wr_buf; /* r is bytes not be copied */ if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) { DRM_DEBUG_DRIVER("user data could not be read successfully\n"); return -EFAULT; } /* check number of parameters. isspace could not differ space and \n */ while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) { /* skip space*/ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) { wr_buf_ptr++; wr_buf_count++; } if (wr_buf_count == wr_buf_size) break; /* skip non-space*/ while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) { wr_buf_ptr++; wr_buf_count++; } (*param_nums)++; if (wr_buf_count == wr_buf_size) break; } if (*param_nums > max_param_num) *param_nums = max_param_num; wr_buf_ptr = wr_buf; /* reset buf pointer */ wr_buf_count = 0; /* number of char already checked */ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) { wr_buf_ptr++; wr_buf_count++; } while (param_index < *param_nums) { /* after strsep, wr_buf_ptr will be moved to after space */ sub_str = strsep(&wr_buf_ptr, delimiter); r = kstrtol(sub_str, 16, &(param[param_index])); if (r) DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r); param_index++; } return 0; } /* function description * get/ set DP configuration: lane_count, link_rate, spread_spectrum * * valid lane count value: 1, 2, 4 * valid link rate value: * 06h = 1.62Gbps per lane * 0Ah = 2.7Gbps per lane * 0Ch = 3.24Gbps per lane * 14h = 5.4Gbps per lane * 1Eh = 8.1Gbps per lane * * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings * * --- to get dp configuration * * cat /sys/kernel/debug/dri/0/DP-x/link_settings * * It will list current, verified, reported, preferred dp configuration. * current -- for current video mode * verified --- maximum configuration which pass link training * reported --- DP rx report caps (DPCD register offset 0, 1 2) * preferred --- user force settings * * --- set (or force) dp configuration * * echo <lane_count> <link_rate> > link_settings * * for example, to force to 2 lane, 2.7GHz, * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings * * spread_spectrum could not be changed dynamically. * * in case invalid lane count, link rate are force, no hw programming will be * done. please check link settings after force operation to see if HW get * programming. * * cat /sys/kernel/debug/dri/0/DP-x/link_settings * * check current and preferred settings. * */ static ssize_t dp_link_settings_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; char *rd_buf = NULL; char *rd_buf_ptr = NULL; const uint32_t rd_buf_size = 100; uint32_t result = 0; uint8_t str_len = 0; int r; if (*pos & 3 || size & 3) return -EINVAL; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return 0; rd_buf_ptr = rd_buf; str_len = strlen("Current: %d 0x%x %d "); snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ", link->cur_link_settings.lane_count, link->cur_link_settings.link_rate, link->cur_link_settings.link_spread); rd_buf_ptr += str_len; str_len = strlen("Verified: %d 0x%x %d "); snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ", link->verified_link_cap.lane_count, link->verified_link_cap.link_rate, link->verified_link_cap.link_spread); rd_buf_ptr += str_len; str_len = strlen("Reported: %d 0x%x %d "); snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ", link->reported_link_cap.lane_count, link->reported_link_cap.link_rate, link->reported_link_cap.link_spread); rd_buf_ptr += str_len; str_len = strlen("Preferred: %d 0x%x %d "); snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n", link->preferred_link_setting.lane_count, link->preferred_link_setting.link_rate, link->preferred_link_setting.link_spread); while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; struct amdgpu_device *adev = drm_to_adev(connector->base.dev); struct dc *dc = (struct dc *)link->dc; struct dc_link_settings prefer_link_settings; char *wr_buf = NULL; const uint32_t wr_buf_size = 40; /* 0: lane_count; 1: link_rate */ int max_param_num = 2; uint8_t param_nums = 0; long param[2]; bool valid_input = true; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) return -ENOSPC; if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { kfree(wr_buf); DRM_DEBUG_DRIVER("user data not be read\n"); return -EINVAL; } switch (param[0]) { case LANE_COUNT_ONE: case LANE_COUNT_TWO: case LANE_COUNT_FOUR: break; default: valid_input = false; break; } switch (param[1]) { case LINK_RATE_LOW: case LINK_RATE_HIGH: case LINK_RATE_RBR2: case LINK_RATE_HIGH2: case LINK_RATE_HIGH3: case LINK_RATE_UHBR10: case LINK_RATE_UHBR13_5: case LINK_RATE_UHBR20: break; default: valid_input = false; break; } if (!valid_input) { kfree(wr_buf); DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); mutex_lock(&adev->dm.dc_lock); dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); mutex_unlock(&adev->dm.dc_lock); return size; } /* save user force lane_count, link_rate to preferred settings * spread spectrum will not be changed */ prefer_link_settings.link_spread = link->cur_link_settings.link_spread; prefer_link_settings.use_link_rate_set = false; prefer_link_settings.lane_count = param[0]; prefer_link_settings.link_rate = param[1]; mutex_lock(&adev->dm.dc_lock); dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, false); mutex_unlock(&adev->dm.dc_lock); kfree(wr_buf); return size; } static bool dp_mst_is_end_device(struct amdgpu_dm_connector *aconnector) { bool is_end_device = false; struct drm_dp_mst_topology_mgr *mgr = NULL; struct drm_dp_mst_port *port = NULL; if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) { mgr = &aconnector->mst_root->mst_mgr; port = aconnector->mst_output_port; drm_modeset_lock(&mgr->base.lock, NULL); if (port->pdt == DP_PEER_DEVICE_SST_SINK || port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) is_end_device = true; drm_modeset_unlock(&mgr->base.lock); } return is_end_device; } /* Change MST link setting * * valid lane count value: 1, 2, 4 * valid link rate value: * 06h = 1.62Gbps per lane * 0Ah = 2.7Gbps per lane * 0Ch = 3.24Gbps per lane * 14h = 5.4Gbps per lane * 1Eh = 8.1Gbps per lane * 3E8h = 10.0Gbps per lane * 546h = 13.5Gbps per lane * 7D0h = 20.0Gbps per lane * * debugfs is located at /sys/kernel/debug/dri/0/DP-x/mst_link_settings * * for example, to force to 2 lane, 10.0GHz, * echo 2 0x3e8 > /sys/kernel/debug/dri/0/DP-x/mst_link_settings * * Valid input will trigger hotplug event to get new link setting applied * Invalid input will trigger training setting reset * * The usage can be referred to link_settings entry * */ static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct dc_link *link = aconnector->dc_link; struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); struct dc *dc = (struct dc *)link->dc; struct dc_link_settings prefer_link_settings; char *wr_buf = NULL; const uint32_t wr_buf_size = 40; /* 0: lane_count; 1: link_rate */ int max_param_num = 2; uint8_t param_nums = 0; long param[2]; bool valid_input = true; if (!dp_mst_is_end_device(aconnector)) return -EINVAL; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) return -ENOSPC; if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { kfree(wr_buf); DRM_DEBUG_DRIVER("user data not be read\n"); return -EINVAL; } switch (param[0]) { case LANE_COUNT_ONE: case LANE_COUNT_TWO: case LANE_COUNT_FOUR: break; default: valid_input = false; break; } switch (param[1]) { case LINK_RATE_LOW: case LINK_RATE_HIGH: case LINK_RATE_RBR2: case LINK_RATE_HIGH2: case LINK_RATE_HIGH3: case LINK_RATE_UHBR10: case LINK_RATE_UHBR13_5: case LINK_RATE_UHBR20: break; default: valid_input = false; break; } if (!valid_input) { kfree(wr_buf); DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); mutex_lock(&adev->dm.dc_lock); dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); mutex_unlock(&adev->dm.dc_lock); return -EINVAL; } /* save user force lane_count, link_rate to preferred settings * spread spectrum will not be changed */ prefer_link_settings.link_spread = link->cur_link_settings.link_spread; prefer_link_settings.use_link_rate_set = false; prefer_link_settings.lane_count = param[0]; prefer_link_settings.link_rate = param[1]; /* skip immediate retrain, and train to new link setting after hotplug event triggered */ mutex_lock(&adev->dm.dc_lock); dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); mutex_unlock(&adev->dm.dc_lock); mutex_lock(&aconnector->base.dev->mode_config.mutex); aconnector->base.force = DRM_FORCE_OFF; mutex_unlock(&aconnector->base.dev->mode_config.mutex); drm_kms_helper_hotplug_event(aconnector->base.dev); msleep(100); mutex_lock(&aconnector->base.dev->mode_config.mutex); aconnector->base.force = DRM_FORCE_UNSPECIFIED; mutex_unlock(&aconnector->base.dev->mode_config.mutex); drm_kms_helper_hotplug_event(aconnector->base.dev); kfree(wr_buf); return size; } /* function: get current DP PHY settings: voltage swing, pre-emphasis, * post-cursor2 (defined by VESA DP specification) * * valid values * voltage swing: 0,1,2,3 * pre-emphasis : 0,1,2,3 * post cursor2 : 0,1,2,3 * * * how to use this debugfs * * debugfs is located at /sys/kernel/debug/dri/0/DP-x * * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display * * To figure out which DP-x is the display for DP to be check, * cd DP-x * ls -ll * There should be debugfs file, like link_settings, phy_settings. * cat link_settings * from lane_count, link_rate to figure which DP-x is for display to be worked * on * * To get current DP PHY settings, * cat phy_settings * * To change DP PHY settings, * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to * 0, * echo 2 3 0 > phy_settings * * To check if change be applied, get current phy settings by * cat phy_settings * * In case invalid values are set by user, like * echo 1 4 0 > phy_settings * * HW will NOT be programmed by these settings. * cat phy_settings will show the previous valid settings. */ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; char *rd_buf = NULL; const uint32_t rd_buf_size = 20; uint32_t result = 0; int r; if (*pos & 3 || size & 3) return -EINVAL; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -EINVAL; snprintf(rd_buf, rd_buf_size, " %d %d %d\n", link->cur_lane_setting[0].VOLTAGE_SWING, link->cur_lane_setting[0].PRE_EMPHASIS, link->cur_lane_setting[0].POST_CURSOR2); while (size) { if (*pos >= rd_buf_size) break; r = put_user((*(rd_buf + result)), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } static int dp_lttpr_status_show(struct seq_file *m, void *unused) { struct drm_connector *connector = m->private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_lttpr_caps caps = aconnector->dc_link->dpcd_caps.lttpr_caps; if (connector->status != connector_status_connected) return -ENODEV; seq_printf(m, "phy repeater count: %u (raw: 0x%x)\n", dp_parse_lttpr_repeater_count(caps.phy_repeater_cnt), caps.phy_repeater_cnt); seq_puts(m, "phy repeater mode: "); switch (caps.mode) { case DP_PHY_REPEATER_MODE_TRANSPARENT: seq_puts(m, "transparent"); break; case DP_PHY_REPEATER_MODE_NON_TRANSPARENT: seq_puts(m, "non-transparent"); break; case 0x00: seq_puts(m, "non lttpr"); break; default: seq_printf(m, "read error (raw: 0x%x)", caps.mode); break; } seq_puts(m, "\n"); return 0; } static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; struct dc *dc = (struct dc *)link->dc; char *wr_buf = NULL; uint32_t wr_buf_size = 40; long param[3]; bool use_prefer_link_setting; struct link_training_settings link_lane_settings; int max_param_num = 3; uint8_t param_nums = 0; int r = 0; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) return -ENOSPC; if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { kfree(wr_buf); DRM_DEBUG_DRIVER("user data not be read\n"); return -EINVAL; } if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) || (param[1] > PRE_EMPHASIS_MAX_LEVEL) || (param[2] > POST_CURSOR2_MAX_LEVEL)) { kfree(wr_buf); DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n"); return size; } /* get link settings: lane count, link rate */ use_prefer_link_setting = ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) && (link->test_pattern_enabled)); memset(&link_lane_settings, 0, sizeof(link_lane_settings)); if (use_prefer_link_setting) { link_lane_settings.link_settings.lane_count = link->preferred_link_setting.lane_count; link_lane_settings.link_settings.link_rate = link->preferred_link_setting.link_rate; link_lane_settings.link_settings.link_spread = link->preferred_link_setting.link_spread; } else { link_lane_settings.link_settings.lane_count = link->cur_link_settings.lane_count; link_lane_settings.link_settings.link_rate = link->cur_link_settings.link_rate; link_lane_settings.link_settings.link_spread = link->cur_link_settings.link_spread; } /* apply phy settings from user */ for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) { link_lane_settings.hw_lane_settings[r].VOLTAGE_SWING = (enum dc_voltage_swing) (param[0]); link_lane_settings.hw_lane_settings[r].PRE_EMPHASIS = (enum dc_pre_emphasis) (param[1]); link_lane_settings.hw_lane_settings[r].POST_CURSOR2 = (enum dc_post_cursor2) (param[2]); } /* program ASIC registers and DPCD registers */ dc_link_set_drive_settings(dc, &link_lane_settings, link); kfree(wr_buf); return size; } /* function description * * set PHY layer or Link layer test pattern * PHY test pattern is used for PHY SI check. * Link layer test will not affect PHY SI. * * Reset Test Pattern: * 0 = DP_TEST_PATTERN_VIDEO_MODE * * PHY test pattern supported: * 1 = DP_TEST_PATTERN_D102 * 2 = DP_TEST_PATTERN_SYMBOL_ERROR * 3 = DP_TEST_PATTERN_PRBS7 * 4 = DP_TEST_PATTERN_80BIT_CUSTOM * 5 = DP_TEST_PATTERN_CP2520_1 * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE * 7 = DP_TEST_PATTERN_CP2520_3 * * DP PHY Link Training Patterns * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1 * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2 * a = DP_TEST_PATTERN_TRAINING_PATTERN3 * b = DP_TEST_PATTERN_TRAINING_PATTERN4 * * DP Link Layer Test pattern * c = DP_TEST_PATTERN_COLOR_SQUARES * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA * e = DP_TEST_PATTERN_VERTICAL_BARS * f = DP_TEST_PATTERN_HORIZONTAL_BARS * 10= DP_TEST_PATTERN_COLOR_RAMP * * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x * * --- set test pattern * echo <test pattern #> > test_pattern * * If test pattern # is not supported, NO HW programming will be done. * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data * for the user pattern. input 10 bytes data are separated by space * * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern * * --- reset test pattern * echo 0 > test_pattern * * --- HPD detection is disabled when set PHY test pattern * * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC * is disable. User could unplug DP display from DP connected and plug scope to * check test pattern PHY SI. * If there is need unplug scope and plug DP display back, do steps below: * echo 0 > phy_test_pattern * unplug scope * plug DP display. * * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw * driver could detect "unplug scope" and "plug DP display" */ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; char *wr_buf = NULL; uint32_t wr_buf_size = 100; long param[11] = {0x0}; int max_param_num = 11; enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; bool disable_hpd = false; bool valid_test_pattern = false; uint8_t param_nums = 0; /* init with default 80bit custom pattern */ uint8_t custom_pattern[10] = { 0x1f, 0x7c, 0xf0, 0xc1, 0x07, 0x1f, 0x7c, 0xf0, 0xc1, 0x07 }; struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN, LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED}; struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN, LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED}; struct link_training_settings link_training_settings; int i; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) return -ENOSPC; if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { kfree(wr_buf); DRM_DEBUG_DRIVER("user data not be read\n"); return -EINVAL; } test_pattern = param[0]; switch (test_pattern) { case DP_TEST_PATTERN_VIDEO_MODE: case DP_TEST_PATTERN_COLOR_SQUARES: case DP_TEST_PATTERN_COLOR_SQUARES_CEA: case DP_TEST_PATTERN_VERTICAL_BARS: case DP_TEST_PATTERN_HORIZONTAL_BARS: case DP_TEST_PATTERN_COLOR_RAMP: valid_test_pattern = true; break; case DP_TEST_PATTERN_D102: case DP_TEST_PATTERN_SYMBOL_ERROR: case DP_TEST_PATTERN_PRBS7: case DP_TEST_PATTERN_80BIT_CUSTOM: case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE: case DP_TEST_PATTERN_TRAINING_PATTERN4: disable_hpd = true; valid_test_pattern = true; break; default: valid_test_pattern = false; test_pattern = DP_TEST_PATTERN_UNSUPPORTED; break; } if (!valid_test_pattern) { kfree(wr_buf); DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n"); return size; } if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { for (i = 0; i < 10; i++) { if ((uint8_t) param[i + 1] != 0x0) break; } if (i < 10) { /* not use default value */ for (i = 0; i < 10; i++) custom_pattern[i] = (uint8_t) param[i + 1]; } } /* Usage: set DP physical test pattern using debugfs with normal DP * panel. Then plug out DP panel and connect a scope to measure * For normal video mode and test pattern generated from CRCT, * they are visibile to user. So do not disable HPD. * Video Mode is also set to clear the test pattern, so enable HPD * because it might have been disabled after a test pattern was set. * AUX depends on HPD * sequence dependent, do not move! */ if (!disable_hpd) dc_link_enable_hpd(link); prefer_link_settings.lane_count = link->verified_link_cap.lane_count; prefer_link_settings.link_rate = link->verified_link_cap.link_rate; prefer_link_settings.link_spread = link->verified_link_cap.link_spread; cur_link_settings.lane_count = link->cur_link_settings.lane_count; cur_link_settings.link_rate = link->cur_link_settings.link_rate; cur_link_settings.link_spread = link->cur_link_settings.link_spread; link_training_settings.link_settings = cur_link_settings; if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN && prefer_link_settings.link_rate != LINK_RATE_UNKNOWN && (prefer_link_settings.lane_count != cur_link_settings.lane_count || prefer_link_settings.link_rate != cur_link_settings.link_rate)) link_training_settings.link_settings = prefer_link_settings; } for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++) link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i]; dc_link_dp_set_test_pattern( link, test_pattern, DP_TEST_PATTERN_COLOR_SPACE_RGB, &link_training_settings, custom_pattern, 10); /* Usage: Set DP physical test pattern using AMDDP with normal DP panel * Then plug out DP panel and connect a scope to measure DP PHY signal. * Need disable interrupt to avoid SW driver disable DP output. This is * done after the test pattern is set. */ if (valid_test_pattern && disable_hpd) dc_link_disable_hpd(link); kfree(wr_buf); return size; } /* * Returns the DMCUB tracebuffer contents. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer */ static int dmub_tracebuffer_show(struct seq_file *m, void *data) { struct amdgpu_device *adev = m->private; struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; struct dmub_debugfs_trace_entry *entries; uint8_t *tbuf_base; uint32_t tbuf_size, max_entries, num_entries, i; if (!fb_info) return 0; tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr; if (!tbuf_base) return 0; tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size; max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) / sizeof(struct dmub_debugfs_trace_entry); num_entries = ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count; num_entries = min(num_entries, max_entries); entries = (struct dmub_debugfs_trace_entry *)(tbuf_base + sizeof(struct dmub_debugfs_trace_header)); for (i = 0; i < num_entries; ++i) { struct dmub_debugfs_trace_entry *entry = &entries[i]; seq_printf(m, "trace_code=%u tick_count=%u param0=%u param1=%u\n", entry->trace_code, entry->tick_count, entry->param0, entry->param1); } return 0; } /* * Returns the DMCUB firmware state contents. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state */ static int dmub_fw_state_show(struct seq_file *m, void *data) { struct amdgpu_device *adev = m->private; struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; uint8_t *state_base; uint32_t state_size; if (!fb_info) return 0; state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr; if (!state_base) return 0; state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size; return seq_write(m, state_base, state_size); } /* psr_capability_show() - show eDP panel PSR capability * * The read function: sink_psr_capability_show * Shows if sink has PSR capability or not. * If yes - the PSR version is appended * * cat /sys/kernel/debug/dri/0/eDP-X/psr_capability * * Expected output: * "Sink support: no\n" - if panel doesn't support PSR * "Sink support: yes [0x01]\n" - if panel supports PSR1 * "Driver support: no\n" - if driver doesn't support PSR * "Driver support: yes [0x01]\n" - if driver supports PSR1 */ static int psr_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *link = aconnector->dc_link; if (!link) return -ENODEV; if (link->type == dc_connection_none) return -ENODEV; if (!(link->connector_signal & SIGNAL_TYPE_EDP)) return -ENODEV; seq_printf(m, "Sink support: %s", str_yes_no(link->dpcd_caps.psr_info.psr_version != 0)); if (link->dpcd_caps.psr_info.psr_version) seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_info.psr_version); seq_puts(m, "\n"); seq_printf(m, "Driver support: %s", str_yes_no(link->psr_settings.psr_feature_enabled)); if (link->psr_settings.psr_version) seq_printf(m, " [0x%02x]", link->psr_settings.psr_version); seq_puts(m, "\n"); return 0; } /* * Returns the current bpc for the crtc. * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_bpc */ static int amdgpu_current_bpc_show(struct seq_file *m, void *data) { struct drm_crtc *crtc = m->private; struct drm_device *dev = crtc->dev; struct dm_crtc_state *dm_crtc_state = NULL; int res = -ENODEV; unsigned int bpc; mutex_lock(&dev->mode_config.mutex); drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state == NULL) goto unlock; dm_crtc_state = to_dm_crtc_state(crtc->state); if (dm_crtc_state->stream == NULL) goto unlock; switch (dm_crtc_state->stream->timing.display_color_depth) { case COLOR_DEPTH_666: bpc = 6; break; case COLOR_DEPTH_888: bpc = 8; break; case COLOR_DEPTH_101010: bpc = 10; break; case COLOR_DEPTH_121212: bpc = 12; break; case COLOR_DEPTH_161616: bpc = 16; break; default: goto unlock; } seq_printf(m, "Current: %u\n", bpc); res = 0; unlock: drm_modeset_unlock(&crtc->mutex); mutex_unlock(&dev->mode_config.mutex); return res; } DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc); /* * Returns the current colorspace for the crtc. * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_colorspace */ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) { struct drm_crtc *crtc = m->private; struct drm_device *dev = crtc->dev; struct dm_crtc_state *dm_crtc_state = NULL; int res = -ENODEV; mutex_lock(&dev->mode_config.mutex); drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state == NULL) goto unlock; dm_crtc_state = to_dm_crtc_state(crtc->state); if (dm_crtc_state->stream == NULL) goto unlock; switch (dm_crtc_state->stream->output_color_space) { case COLOR_SPACE_SRGB: seq_puts(m, "sRGB"); break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: seq_puts(m, "BT601_YCC"); break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: seq_puts(m, "BT709_YCC"); break; case COLOR_SPACE_ADOBERGB: seq_puts(m, "opRGB"); break; case COLOR_SPACE_2020_RGB_FULLRANGE: seq_puts(m, "BT2020_RGB"); break; case COLOR_SPACE_2020_YCBCR: seq_puts(m, "BT2020_YCC"); break; default: goto unlock; } res = 0; unlock: drm_modeset_unlock(&crtc->mutex); mutex_unlock(&dev->mode_config.mutex); return res; } DEFINE_SHOW_ATTRIBUTE(amdgpu_current_colorspace); /* * Example usage: * Disable dsc passthrough, i.e.,: have dsc decoding at converver, not external RX * echo 1 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough * Enable dsc passthrough, i.e.,: have dsc passthrough to external RX * echo 0 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough */ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; char *wr_buf = NULL; uint32_t wr_buf_size = 42; int max_param_num = 1; long param; uint8_t param_nums = 0; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) { DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); return -ENOSPC; } if (parse_write_buffer_into_params(wr_buf, wr_buf_size, &param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } aconnector->dsc_settings.dsc_force_disable_passthrough = param; kfree(wr_buf); return 0; } /* * Returns the HDCP capability of the Display (1.4 for now). * * NOTE* Not all HDMI displays report their HDCP caps even when they are capable. * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable. * * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability * or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability */ static int hdcp_sink_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); bool hdcp_cap, hdcp2_cap; if (connector->status != connector_status_connected) return -ENODEV; seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal); hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal); if (hdcp_cap) seq_printf(m, "%s ", "HDCP1.4"); if (hdcp2_cap) seq_printf(m, "%s ", "HDCP2.2"); if (!hdcp_cap && !hdcp2_cap) seq_printf(m, "%s ", "None"); seq_puts(m, "\n"); return 0; } /* * Returns whether the connected display is internal and not hotpluggable. * Example usage: cat /sys/kernel/debug/dri/0/DP-1/internal_display */ static int internal_display_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *link = aconnector->dc_link; seq_printf(m, "Internal: %u\n", link->is_internal_display); return 0; } /* function description * * generic SDP message access for testing * * debugfs sdp_message is located at /syskernel/debug/dri/0/DP-x * * SDP header * Hb0 : Secondary-Data Packet ID * Hb1 : Secondary-Data Packet type * Hb2 : Secondary-Data-packet-specific header, Byte 0 * Hb3 : Secondary-Data-packet-specific header, Byte 1 * * for using custom sdp message: input 4 bytes SDP header and 32 bytes raw data */ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { int r; uint8_t data[36]; struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dm_crtc_state *acrtc_state; uint32_t write_size = 36; if (connector->base.status != connector_status_connected) return -ENODEV; if (size == 0) return 0; acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state); r = copy_from_user(data, buf, write_size); write_size -= r; dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size); return write_size; } /* function: Read link's DSC & FEC capabilities * * * Access it with the following command (you need to specify * connector like DP-1): * * cat /sys/kernel/debug/dri/0/DP-X/dp_dsc_fec_support * */ static int dp_dsc_fec_support_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_modeset_acquire_ctx ctx; struct drm_device *dev = connector->dev; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); int ret = 0; bool try_again = false; bool is_fec_supported = false; bool is_dsc_supported = false; struct dpcd_caps dpcd_caps; drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); do { try_again = false; ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); if (ret) { if (ret == -EDEADLK) { ret = drm_modeset_backoff(&ctx); if (!ret) { try_again = true; continue; } } break; } if (connector->status != connector_status_connected) { ret = -ENODEV; break; } dpcd_caps = aconnector->dc_link->dpcd_caps; if (aconnector->mst_output_port) { /* aconnector sets dsc_aux during get_modes call * if MST connector has it means it can either * enable DSC on the sink device or on MST branch * its connected to. */ if (aconnector->dsc_aux) { is_fec_supported = true; is_dsc_supported = true; } } else { is_fec_supported = dpcd_caps.fec_cap.raw & 0x1; is_dsc_supported = dpcd_caps.dsc_caps.dsc_basic_caps.raw[0] & 0x1; } } while (try_again); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(is_fec_supported)); seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(is_dsc_supported)); return ret; } /* function: Trigger virtual HPD redetection on connector * * This function will perform link rediscovery, link disable * and enable, and dm connector state update. * * Retrigger HPD on an existing connector by echoing 1 into * its respectful "trigger_hotplug" debugfs entry: * * echo 1 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug * * This function can perform HPD unplug: * * echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug * */ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct drm_connector *connector = &aconnector->base; struct dc_link *link = NULL; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); enum dc_connection_type new_connection_type = dc_connection_none; char *wr_buf = NULL; uint32_t wr_buf_size = 42; int max_param_num = 1; long param[1] = {0}; uint8_t param_nums = 0; bool ret = false; if (!aconnector || !aconnector->dc_link) return -EINVAL; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) { DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); return -ENOSPC; } if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } kfree(wr_buf); if (param_nums <= 0) { DRM_DEBUG_DRIVER("user data not be read\n"); return -EINVAL; } mutex_lock(&aconnector->hpd_lock); /* Don't support for mst end device*/ if (aconnector->mst_root) { mutex_unlock(&aconnector->hpd_lock); return -EINVAL; } if (param[0] == 1) { if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type) && new_connection_type != dc_connection_none) goto unlock; mutex_lock(&adev->dm.dc_lock); ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); mutex_unlock(&adev->dm.dc_lock); if (!ret) goto unlock; amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); drm_kms_helper_connector_hotplug_event(connector); } else if (param[0] == 0) { if (!aconnector->dc_link) goto unlock; link = aconnector->dc_link; if (link->local_sink) { dc_sink_release(link->local_sink); link->local_sink = NULL; } link->dpcd_sink_count = 0; link->type = dc_connection_none; link->dongle_max_pix_clk = 0; amdgpu_dm_update_connector_after_detect(aconnector); /* If the aconnector is the root node in mst topology */ if (aconnector->mst_mgr.mst_state == true) dc_link_reset_cur_dp_mst_topology(link); drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); drm_kms_helper_connector_hotplug_event(connector); } unlock: mutex_unlock(&aconnector->hpd_lock); return size; } /* function: read DSC status on the connector * * The read function: dp_dsc_clock_en_read * returns current status of DSC clock on the connector. * The return is a boolean flag: 1 or 0. * * Access it with the following command (you need to specify * connector like DP-1): * * cat /sys/kernel/debug/dri/0/DP-X/dsc_clock_en * * Expected output: * 1 - means that DSC is currently enabled * 0 - means that DSC is disabled */ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 10; struct pipe_ctx *pipe_ctx; ssize_t result = 0; int i, r, str_len = 30; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; rd_buf_ptr = rd_buf; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); snprintf(rd_buf_ptr, str_len, "%d\n", dsc_state.dsc_clock_en); rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } /* function: write force DSC on the connector * * The write function: dp_dsc_clock_en_write * enables to force DSC on the connector. * User can write to either force enable or force disable DSC * on the next modeset or set it to driver default * * Accepted inputs: * 0 - default DSC enablement policy * 1 - force enable DSC on the connector * 2 - force disable DSC on the connector (might cause fail in atomic_check) * * Writing DSC settings is done with the following command: * - To force enable DSC (you need to specify * connector like DP-1): * * echo 0x1 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en * * - To return to default state set the flag to zero and * let driver deal with DSC automatically * (you need to specify connector like DP-1): * * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en * */ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct drm_crtc *crtc = NULL; struct dm_crtc_state *dm_crtc_state = NULL; struct pipe_ctx *pipe_ctx; int i; char *wr_buf = NULL; uint32_t wr_buf_size = 42; int max_param_num = 1; long param[1] = {0}; uint8_t param_nums = 0; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) { DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); return -ENOSPC; } if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { DRM_DEBUG_DRIVER("user data not be read\n"); kfree(wr_buf); return -EINVAL; } for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } if (!pipe_ctx->stream) goto done; // Get CRTC state mutex_lock(&dev->mode_config.mutex); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); if (connector->state == NULL) goto unlock; crtc = connector->state->crtc; if (crtc == NULL) goto unlock; drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state == NULL) goto unlock; dm_crtc_state = to_dm_crtc_state(crtc->state); if (dm_crtc_state->stream == NULL) goto unlock; if (param[0] == 1) aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE; else if (param[0] == 2) aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE; else aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT; dm_crtc_state->dsc_force_changed = true; unlock: if (crtc) drm_modeset_unlock(&crtc->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); mutex_unlock(&dev->mode_config.mutex); done: kfree(wr_buf); return size; } /* function: read DSC slice width parameter on the connector * * The read function: dp_dsc_slice_width_read * returns dsc slice width used in the current configuration * The return is an integer: 0 or other positive number * * Access the status with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_width * * 0 - means that DSC is disabled * * Any other number more than zero represents the * slice width currently used by DSC in pixels * */ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 100; struct pipe_ctx *pipe_ctx; ssize_t result = 0; int i, r, str_len = 30; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; rd_buf_ptr = rd_buf; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); snprintf(rd_buf_ptr, str_len, "%d\n", dsc_state.dsc_slice_width); rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } /* function: write DSC slice width parameter * * The write function: dp_dsc_slice_width_write * overwrites automatically generated DSC configuration * of slice width. * * The user has to write the slice width divisible by the * picture width. * * Also the user has to write width in hexidecimal * rather than in decimal. * * Writing DSC settings is done with the following command: * - To force overwrite slice width: (example sets to 1920 pixels) * * echo 0x780 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width * * - To stop overwriting and let driver find the optimal size, * set the width to zero: * * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width * */ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct pipe_ctx *pipe_ctx; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct drm_crtc *crtc = NULL; struct dm_crtc_state *dm_crtc_state = NULL; int i; char *wr_buf = NULL; uint32_t wr_buf_size = 42; int max_param_num = 1; long param[1] = {0}; uint8_t param_nums = 0; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) { DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); return -ENOSPC; } if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { DRM_DEBUG_DRIVER("user data not be read\n"); kfree(wr_buf); return -EINVAL; } for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } if (!pipe_ctx->stream) goto done; // Safely get CRTC state mutex_lock(&dev->mode_config.mutex); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); if (connector->state == NULL) goto unlock; crtc = connector->state->crtc; if (crtc == NULL) goto unlock; drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state == NULL) goto unlock; dm_crtc_state = to_dm_crtc_state(crtc->state); if (dm_crtc_state->stream == NULL) goto unlock; if (param[0] > 0) aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP( pipe_ctx->stream->timing.h_addressable, param[0]); else aconnector->dsc_settings.dsc_num_slices_h = 0; dm_crtc_state->dsc_force_changed = true; unlock: if (crtc) drm_modeset_unlock(&crtc->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); mutex_unlock(&dev->mode_config.mutex); done: kfree(wr_buf); return size; } /* function: read DSC slice height parameter on the connector * * The read function: dp_dsc_slice_height_read * returns dsc slice height used in the current configuration * The return is an integer: 0 or other positive number * * Access the status with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_height * * 0 - means that DSC is disabled * * Any other number more than zero represents the * slice height currently used by DSC in pixels * */ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 100; struct pipe_ctx *pipe_ctx; ssize_t result = 0; int i, r, str_len = 30; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; rd_buf_ptr = rd_buf; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); snprintf(rd_buf_ptr, str_len, "%d\n", dsc_state.dsc_slice_height); rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } /* function: write DSC slice height parameter * * The write function: dp_dsc_slice_height_write * overwrites automatically generated DSC configuration * of slice height. * * The user has to write the slice height divisible by the * picture height. * * Also the user has to write height in hexidecimal * rather than in decimal. * * Writing DSC settings is done with the following command: * - To force overwrite slice height (example sets to 128 pixels): * * echo 0x80 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height * * - To stop overwriting and let driver find the optimal size, * set the height to zero: * * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height * */ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct drm_crtc *crtc = NULL; struct dm_crtc_state *dm_crtc_state = NULL; struct pipe_ctx *pipe_ctx; int i; char *wr_buf = NULL; uint32_t wr_buf_size = 42; int max_param_num = 1; uint8_t param_nums = 0; long param[1] = {0}; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) { DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); return -ENOSPC; } if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { DRM_DEBUG_DRIVER("user data not be read\n"); kfree(wr_buf); return -EINVAL; } for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } if (!pipe_ctx->stream) goto done; // Get CRTC state mutex_lock(&dev->mode_config.mutex); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); if (connector->state == NULL) goto unlock; crtc = connector->state->crtc; if (crtc == NULL) goto unlock; drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state == NULL) goto unlock; dm_crtc_state = to_dm_crtc_state(crtc->state); if (dm_crtc_state->stream == NULL) goto unlock; if (param[0] > 0) aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP( pipe_ctx->stream->timing.v_addressable, param[0]); else aconnector->dsc_settings.dsc_num_slices_v = 0; dm_crtc_state->dsc_force_changed = true; unlock: if (crtc) drm_modeset_unlock(&crtc->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); mutex_unlock(&dev->mode_config.mutex); done: kfree(wr_buf); return size; } /* function: read DSC target rate on the connector in bits per pixel * * The read function: dp_dsc_bits_per_pixel_read * returns target rate of compression in bits per pixel * The return is an integer: 0 or other positive integer * * Access it with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel * * 0 - means that DSC is disabled */ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 100; struct pipe_ctx *pipe_ctx; ssize_t result = 0; int i, r, str_len = 30; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; rd_buf_ptr = rd_buf; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); snprintf(rd_buf_ptr, str_len, "%d\n", dsc_state.dsc_bits_per_pixel); rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } /* function: write DSC target rate in bits per pixel * * The write function: dp_dsc_bits_per_pixel_write * overwrites automatically generated DSC configuration * of DSC target bit rate. * * Also the user has to write bpp in hexidecimal * rather than in decimal. * * Writing DSC settings is done with the following command: * - To force overwrite rate (example sets to 256 bpp x 1/16): * * echo 0x100 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel * * - To stop overwriting and let driver find the optimal rate, * set the rate to zero: * * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel * */ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct drm_crtc *crtc = NULL; struct dm_crtc_state *dm_crtc_state = NULL; struct pipe_ctx *pipe_ctx; int i; char *wr_buf = NULL; uint32_t wr_buf_size = 42; int max_param_num = 1; uint8_t param_nums = 0; long param[1] = {0}; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) { DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); return -ENOSPC; } if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { DRM_DEBUG_DRIVER("user data not be read\n"); kfree(wr_buf); return -EINVAL; } for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } if (!pipe_ctx->stream) goto done; // Get CRTC state mutex_lock(&dev->mode_config.mutex); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); if (connector->state == NULL) goto unlock; crtc = connector->state->crtc; if (crtc == NULL) goto unlock; drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state == NULL) goto unlock; dm_crtc_state = to_dm_crtc_state(crtc->state); if (dm_crtc_state->stream == NULL) goto unlock; aconnector->dsc_settings.dsc_bits_per_pixel = param[0]; dm_crtc_state->dsc_force_changed = true; unlock: if (crtc) drm_modeset_unlock(&crtc->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); mutex_unlock(&dev->mode_config.mutex); done: kfree(wr_buf); return size; } /* function: read DSC picture width parameter on the connector * * The read function: dp_dsc_pic_width_read * returns dsc picture width used in the current configuration * It is the same as h_addressable of the current * display's timing * The return is an integer: 0 or other positive integer * If 0 then DSC is disabled. * * Access it with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/dsc_pic_width * * 0 - means that DSC is disabled */ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 100; struct pipe_ctx *pipe_ctx; ssize_t result = 0; int i, r, str_len = 30; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; rd_buf_ptr = rd_buf; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); snprintf(rd_buf_ptr, str_len, "%d\n", dsc_state.dsc_pic_width); rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 100; struct pipe_ctx *pipe_ctx; ssize_t result = 0; int i, r, str_len = 30; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; rd_buf_ptr = rd_buf; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); snprintf(rd_buf_ptr, str_len, "%d\n", dsc_state.dsc_pic_height); rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } /* function: read DSC chunk size parameter on the connector * * The read function: dp_dsc_chunk_size_read * returns dsc chunk size set in the current configuration * The value is calculated automatically by DSC code * and depends on slice parameters and bpp target rate * The return is an integer: 0 or other positive integer * If 0 then DSC is disabled. * * Access it with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/dsc_chunk_size * * 0 - means that DSC is disabled */ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 100; struct pipe_ctx *pipe_ctx; ssize_t result = 0; int i, r, str_len = 30; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; rd_buf_ptr = rd_buf; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); snprintf(rd_buf_ptr, str_len, "%d\n", dsc_state.dsc_chunk_size); rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } /* function: read DSC slice bpg offset on the connector * * The read function: dp_dsc_slice_bpg_offset_read * returns dsc bpg slice offset set in the current configuration * The value is calculated automatically by DSC code * and depends on slice parameters and bpp target rate * The return is an integer: 0 or other positive integer * If 0 then DSC is disabled. * * Access it with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_bpg_offset * * 0 - means that DSC is disabled */ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 100; struct pipe_ctx *pipe_ctx; ssize_t result = 0; int i, r, str_len = 30; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; rd_buf_ptr = rd_buf; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && pipe_ctx->stream->link == aconnector->dc_link) break; } dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); snprintf(rd_buf_ptr, str_len, "%d\n", dsc_state.dsc_slice_bpg_offset); rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } /* * function description: Read max_requested_bpc property from the connector * * Access it with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/max_bpc * */ static ssize_t dp_max_bpc_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct dm_connector_state *state; ssize_t result = 0; char *rd_buf = NULL; char *rd_buf_ptr = NULL; const uint32_t rd_buf_size = 10; int r; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; mutex_lock(&dev->mode_config.mutex); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); if (connector->state == NULL) goto unlock; state = to_dm_connector_state(connector->state); rd_buf_ptr = rd_buf; snprintf(rd_buf_ptr, rd_buf_size, "%u\n", state->base.max_requested_bpc); while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { result = r; /* r = -EFAULT */ goto unlock; } buf += 1; size -= 1; *pos += 1; result += 1; } unlock: drm_modeset_unlock(&dev->mode_config.connection_mutex); mutex_unlock(&dev->mode_config.mutex); kfree(rd_buf); return result; } /* * function description: Set max_requested_bpc property on the connector * * This function will not force the input BPC on connector, it will only * change the max value. This is equivalent to setting max_bpc through * xrandr. * * The BPC value written must be >= 6 and <= 16. Values outside of this * range will result in errors. * * BPC values: * 0x6 - 6 BPC * 0x8 - 8 BPC * 0xa - 10 BPC * 0xc - 12 BPC * 0x10 - 16 BPC * * Write the max_bpc in the following way: * * echo 0x6 > /sys/kernel/debug/dri/0/DP-X/max_bpc * */ static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct drm_connector *connector = &aconnector->base; struct dm_connector_state *state; struct drm_device *dev = connector->dev; char *wr_buf = NULL; uint32_t wr_buf_size = 42; int max_param_num = 1; long param[1] = {0}; uint8_t param_nums = 0; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) { DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); return -ENOSPC; } if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { DRM_DEBUG_DRIVER("user data not be read\n"); kfree(wr_buf); return -EINVAL; } if (param[0] < 6 || param[0] > 16) { DRM_DEBUG_DRIVER("bad max_bpc value\n"); kfree(wr_buf); return -EINVAL; } mutex_lock(&dev->mode_config.mutex); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); if (connector->state == NULL) goto unlock; state = to_dm_connector_state(connector->state); state->base.max_requested_bpc = param[0]; unlock: drm_modeset_unlock(&dev->mode_config.connection_mutex); mutex_unlock(&dev->mode_config.mutex); kfree(wr_buf); return size; } /* * Backlight at this moment. Read only. * As written to display, taking ABM and backlight lut into account. * Ranges from 0x0 to 0x10000 (= 100% PWM) * * Example usage: cat /sys/kernel/debug/dri/0/eDP-1/current_backlight */ static int current_backlight_show(struct seq_file *m, void *unused) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); struct dc_link *link = aconnector->dc_link; unsigned int backlight; backlight = dc_link_get_backlight_level(link); seq_printf(m, "0x%x\n", backlight); return 0; } /* * Backlight value that is being approached. Read only. * As written to display, taking ABM and backlight lut into account. * Ranges from 0x0 to 0x10000 (= 100% PWM) * * Example usage: cat /sys/kernel/debug/dri/0/eDP-1/target_backlight */ static int target_backlight_show(struct seq_file *m, void *unused) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); struct dc_link *link = aconnector->dc_link; unsigned int backlight; backlight = dc_link_get_target_backlight_pwm(link); seq_printf(m, "0x%x\n", backlight); return 0; } /* * function description: Determine if the connector is mst connector * * This function helps to determine whether a connector is a mst connector. * - "root" stands for the root connector of the topology * - "branch" stands for branch device of the topology * - "end" stands for leaf node connector of the topology * - "no" stands for the connector is not a device of a mst topology * Access it with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/is_mst_connector * */ static int dp_is_mst_connector_show(struct seq_file *m, void *unused) { struct drm_connector *connector = m->private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct drm_dp_mst_topology_mgr *mgr = NULL; struct drm_dp_mst_port *port = NULL; char *role = NULL; mutex_lock(&aconnector->hpd_lock); if (aconnector->mst_mgr.mst_state) { role = "root"; } else if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) { role = "end"; mgr = &aconnector->mst_root->mst_mgr; port = aconnector->mst_output_port; drm_modeset_lock(&mgr->base.lock, NULL); if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && port->mcs) role = "branch"; drm_modeset_unlock(&mgr->base.lock); } else { role = "no"; } seq_printf(m, "%s\n", role); mutex_unlock(&aconnector->hpd_lock); return 0; } /* * function description: Read out the mst progress status * * This function helps to determine the mst progress status of * a mst connector. * * Access it with the following command: * * cat /sys/kernel/debug/dri/0/DP-X/mst_progress_status * */ static int dp_mst_progress_status_show(struct seq_file *m, void *unused) { struct drm_connector *connector = m->private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_device *adev = drm_to_adev(connector->dev); int i; mutex_lock(&aconnector->hpd_lock); mutex_lock(&adev->dm.dc_lock); if (aconnector->mst_status == MST_STATUS_DEFAULT) { seq_puts(m, "disabled\n"); } else { for (i = 0; i < sizeof(mst_progress_status)/sizeof(char *); i++) seq_printf(m, "%s:%s\n", mst_progress_status[i], aconnector->mst_status & BIT(i) ? "done" : "not_done"); } mutex_unlock(&adev->dm.dc_lock); mutex_unlock(&aconnector->hpd_lock); return 0; } /* * Reports whether the connected display is a USB4 DPIA tunneled display * Example usage: cat /sys/kernel/debug/dri/0/DP-8/is_dpia_link */ static int is_dpia_link_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *link = aconnector->dc_link; if (connector->status != connector_status_connected) return -ENODEV; seq_printf(m, "%s\n", (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? "yes" : (link->ep_type == DISPLAY_ENDPOINT_PHY) ? "no" : "unknown"); return 0; } DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support); DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); DEFINE_SHOW_ATTRIBUTE(internal_display); DEFINE_SHOW_ATTRIBUTE(psr_capability); DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector); DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status); DEFINE_SHOW_ATTRIBUTE(is_dpia_link); static const struct file_operations dp_dsc_clock_en_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_clock_en_read, .write = dp_dsc_clock_en_write, .llseek = default_llseek }; static const struct file_operations dp_dsc_slice_width_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_slice_width_read, .write = dp_dsc_slice_width_write, .llseek = default_llseek }; static const struct file_operations dp_dsc_slice_height_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_slice_height_read, .write = dp_dsc_slice_height_write, .llseek = default_llseek }; static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_bits_per_pixel_read, .write = dp_dsc_bits_per_pixel_write, .llseek = default_llseek }; static const struct file_operations dp_dsc_pic_width_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_pic_width_read, .llseek = default_llseek }; static const struct file_operations dp_dsc_pic_height_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_pic_height_read, .llseek = default_llseek }; static const struct file_operations dp_dsc_chunk_size_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_chunk_size_read, .llseek = default_llseek }; static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_slice_bpg_offset_read, .llseek = default_llseek }; static const struct file_operations trigger_hotplug_debugfs_fops = { .owner = THIS_MODULE, .write = trigger_hotplug, .llseek = default_llseek }; static const struct file_operations dp_link_settings_debugfs_fops = { .owner = THIS_MODULE, .read = dp_link_settings_read, .write = dp_link_settings_write, .llseek = default_llseek }; static const struct file_operations dp_phy_settings_debugfs_fop = { .owner = THIS_MODULE, .read = dp_phy_settings_read, .write = dp_phy_settings_write, .llseek = default_llseek }; static const struct file_operations dp_phy_test_pattern_fops = { .owner = THIS_MODULE, .write = dp_phy_test_pattern_debugfs_write, .llseek = default_llseek }; static const struct file_operations sdp_message_fops = { .owner = THIS_MODULE, .write = dp_sdp_message_debugfs_write, .llseek = default_llseek }; static const struct file_operations dp_max_bpc_debugfs_fops = { .owner = THIS_MODULE, .read = dp_max_bpc_read, .write = dp_max_bpc_write, .llseek = default_llseek }; static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = { .owner = THIS_MODULE, .write = dp_dsc_passthrough_set, .llseek = default_llseek }; static const struct file_operations dp_mst_link_settings_debugfs_fops = { .owner = THIS_MODULE, .write = dp_mst_link_setting, .llseek = default_llseek }; static const struct { char *name; const struct file_operations *fops; } dp_debugfs_entries[] = { {"link_settings", &dp_link_settings_debugfs_fops}, {"phy_settings", &dp_phy_settings_debugfs_fop}, {"lttpr_status", &dp_lttpr_status_fops}, {"test_pattern", &dp_phy_test_pattern_fops}, {"hdcp_sink_capability", &hdcp_sink_capability_fops}, {"sdp_message", &sdp_message_fops}, {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops}, {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops}, {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops}, {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops}, {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops}, {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops}, {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops}, {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}, {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}, {"max_bpc", &dp_max_bpc_debugfs_fops}, {"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops}, {"is_mst_connector", &dp_is_mst_connector_fops}, {"mst_progress_status", &dp_mst_progress_status_fops}, {"is_dpia_link", &is_dpia_link_fops}, {"mst_link_settings", &dp_mst_link_settings_debugfs_fops} }; static const struct { char *name; const struct file_operations *fops; } hdmi_debugfs_entries[] = { {"hdcp_sink_capability", &hdcp_sink_capability_fops} }; /* * Force YUV420 output if available from the given mode */ static int force_yuv420_output_set(void *data, u64 val) { struct amdgpu_dm_connector *connector = data; connector->force_yuv420_output = (bool)val; return 0; } /* * Check if YUV420 is forced when available from the given mode */ static int force_yuv420_output_get(void *data, u64 *val) { struct amdgpu_dm_connector *connector = data; *val = connector->force_yuv420_output; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, force_yuv420_output_set, "%llu\n"); /* * Read PSR state */ static int psr_get(void *data, u64 *val) { struct amdgpu_dm_connector *connector = data; struct dc_link *link = connector->dc_link; enum dc_psr_state state = PSR_STATE0; dc_link_get_psr_state(link, &state); *val = state; return 0; } /* * Read PSR state residency */ static int psr_read_residency(void *data, u64 *val) { struct amdgpu_dm_connector *connector = data; struct dc_link *link = connector->dc_link; u32 residency; link->dc->link_srv->edp_get_psr_residency(link, &residency); *val = (u64)residency; return 0; } /* read allow_edp_hotplug_detection */ static int allow_edp_hotplug_detection_get(void *data, u64 *val) { struct amdgpu_dm_connector *aconnector = data; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); *val = adev->dm.dc->config.allow_edp_hotplug_detection; return 0; } /* set allow_edp_hotplug_detection */ static int allow_edp_hotplug_detection_set(void *data, u64 val) { struct amdgpu_dm_connector *aconnector = data; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); adev->dm.dc->config.allow_edp_hotplug_detection = (uint32_t) val; return 0; } /* * Set dmcub trace event IRQ enable or disable. * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en * Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en */ static int dmcub_trace_event_state_set(void *data, u64 val) { struct amdgpu_device *adev = data; if (val == 1 || val == 0) { dc_dmub_trace_event_control(adev->dm.dc, val); adev->dm.dmcub_trace_event_en = (bool)val; } else return 0; return 0; } /* * The interface doesn't need get function, so it will return the * value of zero * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en */ static int dmcub_trace_event_state_get(void *data, u64 *val) { struct amdgpu_device *adev = data; *val = adev->dm.dmcub_trace_event_en; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_get, dmcub_trace_event_state_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops, allow_edp_hotplug_detection_get, allow_edp_hotplug_detection_set, "%llu\n"); DEFINE_SHOW_ATTRIBUTE(current_backlight); DEFINE_SHOW_ATTRIBUTE(target_backlight); static const struct { char *name; const struct file_operations *fops; } connector_debugfs_entries[] = { {"force_yuv420_output", &force_yuv420_output_fops}, {"trigger_hotplug", &trigger_hotplug_debugfs_fops}, {"internal_display", &internal_display_fops} }; /* * Returns supported customized link rates by this eDP panel. * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting */ static int edp_ilr_show(struct seq_file *m, void *unused) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); struct dc_link *link = aconnector->dc_link; uint8_t supported_link_rates[16]; uint32_t link_rate_in_khz; uint32_t entry = 0; uint8_t dpcd_rev; memset(supported_link_rates, 0, sizeof(supported_link_rates)); dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, supported_link_rates, sizeof(supported_link_rates)); dpcd_rev = link->dpcd_caps.dpcd_rev.raw; if (dpcd_rev >= DP_DPCD_REV_13 && (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) { for (entry = 0; entry < 16; entry += 2) { link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + supported_link_rates[entry]) * 200; seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); } } else { seq_puts(m, "ILR is not supported by this eDP panel.\n"); } return 0; } /* * Set supported customized link rate to eDP panel. * * echo <lane_count> <link_rate option> > ilr_setting * * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ... * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting * to set 4 lanes and 2.16 GHz */ static ssize_t edp_ilr_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; struct amdgpu_device *adev = drm_to_adev(connector->base.dev); struct dc *dc = (struct dc *)link->dc; struct dc_link_settings prefer_link_settings; char *wr_buf = NULL; const uint32_t wr_buf_size = 40; /* 0: lane_count; 1: link_rate */ int max_param_num = 2; uint8_t param_nums = 0; long param[2]; bool valid_input = true; if (size == 0) return -EINVAL; wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); if (!wr_buf) return -ENOMEM; if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, &param_nums)) { kfree(wr_buf); return -EINVAL; } if (param_nums <= 0) { kfree(wr_buf); return -EINVAL; } switch (param[0]) { case LANE_COUNT_ONE: case LANE_COUNT_TWO: case LANE_COUNT_FOUR: break; default: valid_input = false; break; } if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count) valid_input = false; if (!valid_input) { kfree(wr_buf); DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n"); prefer_link_settings.use_link_rate_set = false; mutex_lock(&adev->dm.dc_lock); dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); mutex_unlock(&adev->dm.dc_lock); return size; } /* save user force lane_count, link_rate to preferred settings * spread spectrum will not be changed */ prefer_link_settings.link_spread = link->cur_link_settings.link_spread; prefer_link_settings.lane_count = param[0]; prefer_link_settings.use_link_rate_set = true; prefer_link_settings.link_rate_set = param[1]; prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; mutex_lock(&adev->dm.dc_lock); dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, false); mutex_unlock(&adev->dm.dc_lock); kfree(wr_buf); return size; } static int edp_ilr_open(struct inode *inode, struct file *file) { return single_open(file, edp_ilr_show, inode->i_private); } static const struct file_operations edp_ilr_debugfs_fops = { .owner = THIS_MODULE, .open = edp_ilr_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = edp_ilr_write }; void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; struct dentry *dir = connector->base.debugfs_entry; if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) { debugfs_create_file(dp_debugfs_entries[i].name, 0644, dir, connector, dp_debugfs_entries[i].fops); } } if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops); debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); debugfs_create_file_unsafe("psr_residency", 0444, dir, connector, &psr_residency_fops); debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector, &current_backlight_fops); debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector, &target_backlight_fops); debugfs_create_file("ilr_setting", 0644, dir, connector, &edp_ilr_debugfs_fops); debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector, &allow_edp_hotplug_detection_fops); } for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { debugfs_create_file(connector_debugfs_entries[i].name, 0644, dir, connector, connector_debugfs_entries[i].fops); } if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) { for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) { debugfs_create_file(hdmi_debugfs_entries[i].name, 0644, dir, connector, hdmi_debugfs_entries[i].fops); } } } #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY /* * Set crc window coordinate x start */ static int crc_win_x_start_set(void *data, u64 val) { struct drm_crtc *crtc = data; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); acrtc->dm_irq_params.window_param.x_start = (uint16_t) val; acrtc->dm_irq_params.window_param.update_win = false; spin_unlock_irq(&drm_dev->event_lock); return 0; } /* * Get crc window coordinate x start */ static int crc_win_x_start_get(void *data, u64 *val) { struct drm_crtc *crtc = data; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); *val = acrtc->dm_irq_params.window_param.x_start; spin_unlock_irq(&drm_dev->event_lock); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_start_fops, crc_win_x_start_get, crc_win_x_start_set, "%llu\n"); /* * Set crc window coordinate y start */ static int crc_win_y_start_set(void *data, u64 val) { struct drm_crtc *crtc = data; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); acrtc->dm_irq_params.window_param.y_start = (uint16_t) val; acrtc->dm_irq_params.window_param.update_win = false; spin_unlock_irq(&drm_dev->event_lock); return 0; } /* * Get crc window coordinate y start */ static int crc_win_y_start_get(void *data, u64 *val) { struct drm_crtc *crtc = data; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); *val = acrtc->dm_irq_params.window_param.y_start; spin_unlock_irq(&drm_dev->event_lock); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_start_fops, crc_win_y_start_get, crc_win_y_start_set, "%llu\n"); /* * Set crc window coordinate x end */ static int crc_win_x_end_set(void *data, u64 val) { struct drm_crtc *crtc = data; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); acrtc->dm_irq_params.window_param.x_end = (uint16_t) val; acrtc->dm_irq_params.window_param.update_win = false; spin_unlock_irq(&drm_dev->event_lock); return 0; } /* * Get crc window coordinate x end */ static int crc_win_x_end_get(void *data, u64 *val) { struct drm_crtc *crtc = data; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); *val = acrtc->dm_irq_params.window_param.x_end; spin_unlock_irq(&drm_dev->event_lock); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_end_fops, crc_win_x_end_get, crc_win_x_end_set, "%llu\n"); /* * Set crc window coordinate y end */ static int crc_win_y_end_set(void *data, u64 val) { struct drm_crtc *crtc = data; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); acrtc->dm_irq_params.window_param.y_end = (uint16_t) val; acrtc->dm_irq_params.window_param.update_win = false; spin_unlock_irq(&drm_dev->event_lock); return 0; } /* * Get crc window coordinate y end */ static int crc_win_y_end_get(void *data, u64 *val) { struct drm_crtc *crtc = data; struct drm_device *drm_dev = crtc->dev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); *val = acrtc->dm_irq_params.window_param.y_end; spin_unlock_irq(&drm_dev->event_lock); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get, crc_win_y_end_set, "%llu\n"); /* * Trigger to commit crc window */ static int crc_win_update_set(void *data, u64 val) { struct drm_crtc *crtc = data; struct amdgpu_crtc *acrtc; struct amdgpu_device *adev = drm_to_adev(crtc->dev); if (val) { acrtc = to_amdgpu_crtc(crtc); mutex_lock(&adev->dm.dc_lock); /* PSR may write to OTG CRC window control register, * so close it before starting secure_display. */ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream); spin_lock_irq(&adev_to_drm(adev)->event_lock); acrtc->dm_irq_params.window_param.activated = true; acrtc->dm_irq_params.window_param.update_win = true; acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; spin_unlock_irq(&adev_to_drm(adev)->event_lock); mutex_unlock(&adev->dm.dc_lock); } return 0; } /* * Get crc window update flag */ static int crc_win_update_get(void *data, u64 *val) { *val = 0; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get, crc_win_update_set, "%llu\n"); #endif void crtc_debugfs_init(struct drm_crtc *crtc) { #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry); if (!dir) return; debugfs_create_file_unsafe("crc_win_x_start", 0644, dir, crtc, &crc_win_x_start_fops); debugfs_create_file_unsafe("crc_win_y_start", 0644, dir, crtc, &crc_win_y_start_fops); debugfs_create_file_unsafe("crc_win_x_end", 0644, dir, crtc, &crc_win_x_end_fops); debugfs_create_file_unsafe("crc_win_y_end", 0644, dir, crtc, &crc_win_y_end_fops); debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, &crc_win_update_fops); dput(dir); #endif debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry, crtc, &amdgpu_current_bpc_fops); debugfs_create_file("amdgpu_current_colorspace", 0644, crtc->debugfs_entry, crtc, &amdgpu_current_colorspace_fops); } /* * Writes DTN log state to the user supplied buffer. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log */ static ssize_t dtn_log_read( struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_device *adev = file_inode(f)->i_private; struct dc *dc = adev->dm.dc; struct dc_log_buffer_ctx log_ctx = { 0 }; ssize_t result = 0; if (!buf || !size) return -EINVAL; if (!dc->hwss.log_hw_state) return 0; dc->hwss.log_hw_state(dc, &log_ctx); if (*pos < log_ctx.pos) { size_t to_copy = log_ctx.pos - *pos; to_copy = min(to_copy, size); if (!copy_to_user(buf, log_ctx.buf + *pos, to_copy)) { *pos += to_copy; result = to_copy; } } kfree(log_ctx.buf); return result; } /* * Writes DTN log state to dmesg when triggered via a write. * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log */ static ssize_t dtn_log_write( struct file *f, const char __user *buf, size_t size, loff_t *pos) { struct amdgpu_device *adev = file_inode(f)->i_private; struct dc *dc = adev->dm.dc; /* Write triggers log output via dmesg. */ if (size == 0) return 0; if (dc->hwss.log_hw_state) dc->hwss.log_hw_state(dc, NULL); return size; } static int mst_topo_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct amdgpu_dm_connector *aconnector; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; aconnector = to_amdgpu_dm_connector(connector); /* Ensure we're only dumping the topology of a root mst node */ if (!aconnector->mst_mgr.mst_state) continue; seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id); drm_dp_mst_dump_topology(m, &aconnector->mst_mgr); } drm_connector_list_iter_end(&conn_iter); return 0; } /* * Sets trigger hpd for MST topologies. * All connected connectors will be rediscovered and re started as needed if val of 1 is sent. * All topologies will be disconnected if val of 0 is set . * Usage to enable topologies: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst * Usage to disable topologies: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst */ static int trigger_hpd_mst_set(void *data, u64 val) { struct amdgpu_device *adev = data; struct drm_device *dev = adev_to_drm(adev); struct drm_connector_list_iter iter; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct dc_link *link = NULL; if (val == 1) { drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { mutex_lock(&adev->dm.dc_lock); dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); mutex_unlock(&adev->dm.dc_lock); drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); } } } else if (val == 0) { drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->dc_link) continue; if (!aconnector->mst_root) continue; link = aconnector->dc_link; dc_link_dp_receiver_power_ctrl(link, false); drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_root->mst_mgr, false); link->mst_stream_alloc_table.stream_count = 0; memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations)); } } else { return 0; } drm_kms_helper_hotplug_event(dev); return 0; } /* * The interface doesn't need get function, so it will return the * value of zero * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst */ static int trigger_hpd_mst_get(void *data, u64 *val) { *val = 0; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(trigger_hpd_mst_ops, trigger_hpd_mst_get, trigger_hpd_mst_set, "%llu\n"); /* * Sets the force_timing_sync debug option from the given string. * All connected displays will be force synchronized immediately. * Usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync */ static int force_timing_sync_set(void *data, u64 val) { struct amdgpu_device *adev = data; adev->dm.force_timing_sync = (bool)val; amdgpu_dm_trigger_timing_sync(adev_to_drm(adev)); return 0; } /* * Gets the force_timing_sync debug option value into the given buffer. * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync */ static int force_timing_sync_get(void *data, u64 *val) { struct amdgpu_device *adev = data; *val = adev->dm.force_timing_sync; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get, force_timing_sync_set, "%llu\n"); /* * Disables all HPD and HPD RX interrupt handling in the * driver when set to 1. Default is 0. */ static int disable_hpd_set(void *data, u64 val) { struct amdgpu_device *adev = data; adev->dm.disable_hpd_irq = (bool)val; return 0; } /* * Returns 1 if HPD and HPRX interrupt handling is disabled, * 0 otherwise. */ static int disable_hpd_get(void *data, u64 *val) { struct amdgpu_device *adev = data; *val = adev->dm.disable_hpd_irq; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, disable_hpd_set, "%llu\n"); /* * Temporary w/a to force sst sequence in M42D DP2 mst receiver * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst */ static int dp_force_sst_set(void *data, u64 val) { struct amdgpu_device *adev = data; adev->dm.dc->debug.set_mst_en_for_sst = val; return 0; } static int dp_force_sst_get(void *data, u64 *val) { struct amdgpu_device *adev = data; *val = adev->dm.dc->debug.set_mst_en_for_sst; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get, dp_force_sst_set, "%llu\n"); /* * Force DP2 sequence without VESA certified cable. * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_ignore_cable_id */ static int dp_ignore_cable_id_set(void *data, u64 val) { struct amdgpu_device *adev = data; adev->dm.dc->debug.ignore_cable_id = val; return 0; } static int dp_ignore_cable_id_get(void *data, u64 *val) { struct amdgpu_device *adev = data; *val = adev->dm.dc->debug.ignore_cable_id; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get, dp_ignore_cable_id_set, "%llu\n"); /* * Sets the DC visual confirm debug option from the given string. * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm */ static int visual_confirm_set(void *data, u64 val) { struct amdgpu_device *adev = data; adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val; return 0; } /* * Reads the DC visual confirm debug option value into the given buffer. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm */ static int visual_confirm_get(void *data, u64 *val) { struct amdgpu_device *adev = data; *val = adev->dm.dc->debug.visual_confirm; return 0; } DEFINE_SHOW_ATTRIBUTE(mst_topo); DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get, visual_confirm_set, "%llu\n"); /* * Sets the DC skip_detection_link_training debug option from the given string. * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_skip_detection_link_training */ static int skip_detection_link_training_set(void *data, u64 val) { struct amdgpu_device *adev = data; if (val == 0) adev->dm.dc->debug.skip_detection_link_training = false; else adev->dm.dc->debug.skip_detection_link_training = true; return 0; } /* * Reads the DC skip_detection_link_training debug option value into the given buffer. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_skip_detection_link_training */ static int skip_detection_link_training_get(void *data, u64 *val) { struct amdgpu_device *adev = data; *val = adev->dm.dc->debug.skip_detection_link_training; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(skip_detection_link_training_fops, skip_detection_link_training_get, skip_detection_link_training_set, "%llu\n"); /* * Dumps the DCC_EN bit for each pipe. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en */ static ssize_t dcc_en_bits_read( struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_device *adev = file_inode(f)->i_private; struct dc *dc = adev->dm.dc; char *rd_buf = NULL; const uint32_t rd_buf_size = 32; uint32_t result = 0; int offset = 0; int num_pipes = dc->res_pool->pipe_count; int *dcc_en_bits; int i, r; dcc_en_bits = kcalloc(num_pipes, sizeof(int), GFP_KERNEL); if (!dcc_en_bits) return -ENOMEM; if (!dc->hwss.get_dcc_en_bits) { kfree(dcc_en_bits); return 0; } dc->hwss.get_dcc_en_bits(dc, dcc_en_bits); rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) { kfree(dcc_en_bits); return -ENOMEM; } for (i = 0; i < num_pipes; i++) offset += snprintf(rd_buf + offset, rd_buf_size - offset, "%d ", dcc_en_bits[i]); rd_buf[strlen(rd_buf)] = '\n'; kfree(dcc_en_bits); while (size) { if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); if (r) { kfree(rd_buf); return r; /* r = -EFAULT */ } buf += 1; size -= 1; *pos += 1; result += 1; } kfree(rd_buf); return result; } void dtn_debugfs_init(struct amdgpu_device *adev) { static const struct file_operations dtn_log_fops = { .owner = THIS_MODULE, .read = dtn_log_read, .write = dtn_log_write, .llseek = default_llseek }; static const struct file_operations dcc_en_bits_fops = { .owner = THIS_MODULE, .read = dcc_en_bits_read, .llseek = default_llseek }; struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; debugfs_create_file("amdgpu_mst_topology", 0444, root, adev, &mst_topo_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, &dp_set_mst_en_for_sst_ops); debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev, &dp_ignore_cable_id_ops); debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, &visual_confirm_fops); debugfs_create_file_unsafe("amdgpu_dm_skip_detection_link_training", 0644, root, adev, &skip_detection_link_training_fops); debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root, adev, &dmub_tracebuffer_fops); debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root, adev, &dmub_fw_state_fops); debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, adev, &force_timing_sync_ops); debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, adev, &dmcub_trace_event_state_fops); debugfs_create_file_unsafe("amdgpu_dm_trigger_hpd_mst", 0644, root, adev, &trigger_hpd_mst_ops); debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev, &dcc_en_bits_fops); debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev, &disable_hpd_ops); }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/string.h> #include <linux/acpi.h> #include <linux/i2c.h> #include <drm/drm_atomic.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <drm/drm_edid.h> #include "dm_services.h" #include "amdgpu.h" #include "dc.h" #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h" #include "amdgpu_dm_mst_types.h" #include "dpcd_defs.h" #include "dc/inc/core_types.h" #include "dm_helpers.h" #include "ddc_service_types.h" static u32 edid_extract_panel_id(struct edid *edid) { return (u32)edid->mfg_id[0] << 24 | (u32)edid->mfg_id[1] << 16 | (u32)EDID_PRODUCT_ID(edid); } static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) { uint32_t panel_id = edid_extract_panel_id(edid); switch (panel_id) { /* Workaround for some monitors which does not work well with FAMS */ case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.disable_fams = true; break; default: return; } } /** * dm_helpers_parse_edid_caps() - Parse edid caps * * @link: current detected link * @edid: [in] pointer to edid * @edid_caps: [in] pointer to edid caps * * Return: void */ enum dc_edid_status dm_helpers_parse_edid_caps( struct dc_link *link, const struct dc_edid *edid, struct dc_edid_caps *edid_caps) { struct amdgpu_dm_connector *aconnector = link->priv; struct drm_connector *connector = &aconnector->base; struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; struct cea_sad *sads; int sad_count = -1; int sadb_count = -1; int i = 0; uint8_t *sadb = NULL; enum dc_edid_status result = EDID_OK; if (!edid_caps || !edid) return EDID_BAD_INPUT; if (!drm_edid_is_valid(edid_buf)) result = EDID_BAD_CHECKSUM; edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | ((uint16_t) edid_buf->mfg_id[1])<<8; edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | ((uint16_t) edid_buf->prod_code[1])<<8; edid_caps->serial_number = edid_buf->serial; edid_caps->manufacture_week = edid_buf->mfg_week; edid_caps->manufacture_year = edid_buf->mfg_year; drm_edid_get_monitor_name(edid_buf, edid_caps->display_name, AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); edid_caps->edid_hdmi = connector->display_info.is_hdmi; sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) return result; edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); for (i = 0; i < edid_caps->audio_mode_count; ++i) { struct cea_sad *sad = &sads[i]; edid_caps->audio_modes[i].format_code = sad->format; edid_caps->audio_modes[i].channel_count = sad->channels + 1; edid_caps->audio_modes[i].sample_rate = sad->freq; edid_caps->audio_modes[i].sample_size = sad->byte2; } sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); if (sadb_count < 0) { DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); sadb_count = 0; } if (sadb_count) edid_caps->speaker_flags = sadb[0]; else edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; apply_edid_quirks(edid_buf, edid_caps); kfree(sads); kfree(sadb); return result; } static void fill_dc_mst_payload_table_from_drm(struct dc_link *link, bool enable, struct drm_dp_mst_atomic_payload *target_payload, struct dc_dp_mst_stream_allocation_table *table) { struct dc_dp_mst_stream_allocation_table new_table = { 0 }; struct dc_dp_mst_stream_allocation *sa; struct link_mst_stream_allocation_table copy_of_link_table = link->mst_stream_alloc_table; int i; int current_hw_table_stream_cnt = copy_of_link_table.stream_count; struct link_mst_stream_allocation *dc_alloc; /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ if (enable) { dc_alloc = &copy_of_link_table.stream_allocations[current_hw_table_stream_cnt]; dc_alloc->vcp_id = target_payload->vcpi; dc_alloc->slot_count = target_payload->time_slots; } else { for (i = 0; i < copy_of_link_table.stream_count; i++) { dc_alloc = &copy_of_link_table.stream_allocations[i]; if (dc_alloc->vcp_id == target_payload->vcpi) { dc_alloc->vcp_id = 0; dc_alloc->slot_count = 0; break; } } ASSERT(i != copy_of_link_table.stream_count); } /* Fill payload info*/ for (i = 0; i < MAX_CONTROLLER_NUM; i++) { dc_alloc = &copy_of_link_table.stream_allocations[i]; if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { sa = &new_table.stream_allocations[new_table.stream_count]; sa->slot_count = dc_alloc->slot_count; sa->vcp_id = dc_alloc->vcp_id; new_table.stream_count++; } } /* Overwrite the old table */ *table = new_table; } void dm_helpers_dp_update_branch_info( struct dc_context *ctx, const struct dc_link *link) {} static void dm_helpers_construct_old_payload( struct dc_link *link, int pbn_per_slot, struct drm_dp_mst_atomic_payload *new_payload, struct drm_dp_mst_atomic_payload *old_payload) { struct link_mst_stream_allocation_table current_link_table = link->mst_stream_alloc_table; struct link_mst_stream_allocation *dc_alloc; int i; *old_payload = *new_payload; /* Set correct time_slots/PBN of old payload. * other fields (delete & dsc_enabled) in * struct drm_dp_mst_atomic_payload are don't care fields * while calling drm_dp_remove_payload() */ for (i = 0; i < current_link_table.stream_count; i++) { dc_alloc = &current_link_table.stream_allocations[i]; if (dc_alloc->vcp_id == new_payload->vcpi) { old_payload->time_slots = dc_alloc->slot_count; old_payload->pbn = dc_alloc->slot_count * pbn_per_slot; break; } } /* make sure there is an old payload*/ ASSERT(i != current_link_table.stream_count); } /* * Writes payload allocation table in immediate downstream device. */ bool dm_helpers_dp_mst_write_payload_allocation_table( struct dc_context *ctx, const struct dc_stream_state *stream, struct dc_dp_mst_stream_allocation_table *proposed_table, bool enable) { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload; struct drm_dp_mst_topology_mgr *mst_mgr; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; /* Accessing the connector state is required for vcpi_slots allocation * and directly relies on behaviour in commit check * that blocks before commit guaranteeing that the state * is not gonna be swapped while still in use in commit tail */ if (!aconnector || !aconnector->mst_root) return false; mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); /* It's OK for this to fail */ new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); if (enable) { target_payload = new_payload; drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); } else { /* construct old payload by VCPI*/ dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div, new_payload, &old_payload); target_payload = &old_payload; drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload); } /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each * stream. AMD ASIC stream slot allocation should follow the same * sequence. copy DRM MST allocation to dc */ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); return true; } /* * poll pending down reply */ void dm_helpers_dp_mst_poll_pending_down_reply( struct dc_context *ctx, const struct dc_link *link) {} /* * Clear payload allocation table before enable MST DP link. */ void dm_helpers_dp_mst_clear_payload_allocation_table( struct dc_context *ctx, const struct dc_link *link) {} /* * Polls for ACT (allocation change trigger) handled and sends * ALLOCATE_PAYLOAD message. */ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream) { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_mgr *mst_mgr; int ret; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_root) return ACT_FAILED; mst_mgr = &aconnector->mst_root->mst_mgr; if (!mst_mgr->mst_state) return ACT_FAILED; ret = drm_dp_check_act_status(mst_mgr); if (ret) return ACT_FAILED; return ACT_SUCCESS; } bool dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, const struct dc_stream_state *stream, bool enable) { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_atomic_payload *payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; int ret = 0; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_root) return false; mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); if (!enable) { set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; clr_flag = MST_ALLOCATE_NEW_PAYLOAD; } if (enable) ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload); if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, false); } else { amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); } return true; } void dm_dtn_log_begin(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx) { static const char msg[] = "[dtn begin]\n"; if (!log_ctx) { pr_info("%s", msg); return; } dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); } __printf(3, 4) void dm_dtn_log_append_v(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx, const char *msg, ...) { va_list args; size_t total; int n; if (!log_ctx) { /* No context, redirect to dmesg. */ struct va_format vaf; vaf.fmt = msg; vaf.va = &args; va_start(args, msg); pr_info("%pV", &vaf); va_end(args); return; } /* Measure the output. */ va_start(args, msg); n = vsnprintf(NULL, 0, msg, args); va_end(args); if (n <= 0) return; /* Reallocate the string buffer as needed. */ total = log_ctx->pos + n + 1; if (total > log_ctx->size) { char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); if (buf) { memcpy(buf, log_ctx->buf, log_ctx->pos); kfree(log_ctx->buf); log_ctx->buf = buf; log_ctx->size = total; } } if (!log_ctx->buf) return; /* Write the formatted string to the log buffer. */ va_start(args, msg); n = vscnprintf( log_ctx->buf + log_ctx->pos, log_ctx->size - log_ctx->pos, msg, args); va_end(args); if (n > 0) log_ctx->pos += n; } void dm_dtn_log_end(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx) { static const char msg[] = "[dtn end]\n"; if (!log_ctx) { pr_info("%s", msg); return; } dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); } bool dm_helpers_dp_mst_start_top_mgr( struct dc_context *ctx, const struct dc_link *link, bool boot) { struct amdgpu_dm_connector *aconnector = link->priv; int ret; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); return false; } if (boot) { DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); return true; } DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); return false; } DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); return true; } bool dm_helpers_dp_mst_stop_top_mgr( struct dc_context *ctx, struct dc_link *link) { struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); return false; } DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); if (aconnector->mst_mgr.mst_state == true) { drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); link->cur_link_settings.lane_count = 0; } return false; } bool dm_helpers_dp_read_dpcd( struct dc_context *ctx, const struct dc_link *link, uint32_t address, uint8_t *data, uint32_t size) { struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { DC_LOG_DC("Failed to find connector for link!\n"); return false; } return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, size) == size; } bool dm_helpers_dp_write_dpcd( struct dc_context *ctx, const struct dc_link *link, uint32_t address, const uint8_t *data, uint32_t size) { struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); return false; } return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, address, (uint8_t *)data, size) > 0; } bool dm_helpers_submit_i2c( struct dc_context *ctx, const struct dc_link *link, struct i2c_command *cmd) { struct amdgpu_dm_connector *aconnector = link->priv; struct i2c_msg *msgs; int i = 0; int num = cmd->number_of_payloads; bool result; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); return false; } msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); if (!msgs) return false; for (i = 0; i < num; i++) { msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; msgs[i].addr = cmd->payloads[i].address; msgs[i].len = cmd->payloads[i].length; msgs[i].buf = cmd->payloads[i].data; } result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; kfree(msgs); return result; } static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, bool is_write_cmd, unsigned char cmd, unsigned int length, unsigned int offset, unsigned char *data) { bool success = false; unsigned char rc_data[16] = {0}; unsigned char rc_offset[4] = {0}; unsigned char rc_length[2] = {0}; unsigned char rc_cmd = 0; unsigned char rc_result = 0xFF; unsigned char i = 0; int ret; if (is_write_cmd) { // write rc data memmove(rc_data, data, length); ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); } // write rc offset rc_offset[0] = (unsigned char) offset & 0xFF; rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); // write rc length rc_length[0] = (unsigned char) length & 0xFF; rc_length[1] = (unsigned char) (length >> 8) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); // write rc cmd rc_cmd = cmd | 0x80; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); if (ret < 0) { DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); return false; } // poll until active is 0 for (i = 0; i < 10; i++) { drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); if (rc_cmd == cmd) // active is 0 break; msleep(10); } // read rc result drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); success = (rc_result == 0); if (success && !is_write_cmd) { // read rc data drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); } DC_LOG_DC("%s: success = %d\n", __func__, success); return success; } static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) { unsigned char data[16] = {0}; DC_LOG_DC("Start %s\n", __func__); // Step 2 data[0] = 'P'; data[1] = 'R'; data[2] = 'I'; data[3] = 'U'; data[4] = 'S'; if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) return; // Step 3 and 4 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) return; data[0] &= (~(1 << 1)); // set bit 1 to 0 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) return; if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) return; data[0] &= (~(1 << 1)); // set bit 1 to 0 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) return; if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) return; data[0] &= (~(1 << 1)); // set bit 1 to 0 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) return; // Step 3 and 5 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) return; data[0] |= (1 << 1); // set bit 1 to 1 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) return; if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) return; data[0] |= (1 << 1); // set bit 1 to 1 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) return; data[0] |= (1 << 1); // set bit 1 to 1 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) return; // Step 6 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) return; DC_LOG_DC("Done %s\n", __func__); } /* MST Dock */ static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( struct drm_dp_aux *aux, const struct dc_stream_state *stream, bool enable) { uint8_t ret = 0; DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); if (enable) { /* When DSC is enabled on previous boot and reboot with the hub, * there is a chance that Synaptics hub gets stuck during reboot sequence. * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream */ if (!stream->link->link_status.link_active && memcmp(stream->link->dpcd_caps.branch_dev_name, (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) apply_synaptics_fifo_reset_wa(aux); ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); DRM_INFO("Send DSC enable to synaptics\n"); } else { /* Synaptics hub not support virtual dpcd, * external monitor occur garbage while disable DSC, * Disable DSC only when entire link status turn to false, */ if (!stream->link->link_status.link_active) { ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); DRM_INFO("Send DSC disable to synaptics\n"); } } return ret; } bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, bool enable) { static const uint8_t DSC_DISABLE; static const uint8_t DSC_DECODING = 0x01; static const uint8_t DSC_PASSTHROUGH = 0x02; struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_port *port; uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; uint8_t ret = 0; if (!stream) return false; if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector->dsc_aux) return false; // apply w/a to synaptics if (needs_dsc_aux_workaround(aconnector->dc_link) && (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) return write_dsc_enable_synaptics_non_virtual_dpcd_mst( aconnector->dsc_aux, stream, enable_dsc); port = aconnector->mst_output_port; if (enable) { if (port->passthrough_aux) { ret = drm_dp_dpcd_write(port->passthrough_aux, DP_DSC_ENABLE, &enable_passthrough, 1); DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", ret); } ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n", (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret); } else { ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n", (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret); if (port->passthrough_aux) { ret = drm_dp_dpcd_write(port->passthrough_aux, DP_DSC_ENABLE, &enable_passthrough, 1); DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", ret); } } } if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); } } return ret; } bool dm_helpers_is_dp_sink_present(struct dc_link *link) { bool dp_sink_present; struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { BUG_ON("Failed to find connector for link!"); return true; } mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); dp_sink_present = dc_link_is_dp_sink_present(link); mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); return dp_sink_present; } enum dc_edid_status dm_helpers_read_local_edid( struct dc_context *ctx, struct dc_link *link, struct dc_sink *sink) { struct amdgpu_dm_connector *aconnector = link->priv; struct drm_connector *connector = &aconnector->base; struct i2c_adapter *ddc; int retry = 3; enum dc_edid_status edid_status; struct edid *edid; if (link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; else ddc = &aconnector->i2c->base; /* some dongles read edid incorrectly the first time, * do check sum and retry to make sure read correct edid. */ do { edid = drm_get_edid(&aconnector->base, ddc); /* DP Compliance Test 4.2.2.6 */ if (link->aux_mode && connector->edid_corrupt) drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); if (!edid && connector->edid_corrupt) { connector->edid_corrupt = false; return EDID_BAD_CHECKSUM; } if (!edid) return EDID_NO_RESPONSE; sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); /* We don't need the original edid anymore */ kfree(edid); edid_status = dm_helpers_parse_edid_caps( link, &sink->dc_edid, &sink->edid_caps); } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); if (edid_status != EDID_OK) DRM_ERROR("EDID err: %d, on connector: %s", edid_status, aconnector->base.name); if (link->aux_mode) { union test_request test_request = {0}; union test_response test_response = {0}; dm_helpers_dp_read_dpcd(ctx, link, DP_TEST_REQUEST, &test_request.raw, sizeof(union test_request)); if (!test_request.bits.EDID_READ) return edid_status; test_response.bits.EDID_CHECKSUM_WRITE = 1; dm_helpers_dp_write_dpcd(ctx, link, DP_TEST_EDID_CHECKSUM, &sink->dc_edid.raw_edid[sink->dc_edid.length-1], 1); dm_helpers_dp_write_dpcd(ctx, link, DP_TEST_RESPONSE, &test_response.raw, sizeof(test_response)); } return edid_status; } int dm_helper_dmub_aux_transfer_sync( struct dc_context *ctx, const struct dc_link *link, struct aux_payload *payload, enum aux_return_code_type *operation_result) { return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, operation_result); } int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, const struct dc_link *link, struct set_config_cmd_payload *payload, enum set_config_status *operation_result) { return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload, operation_result); } void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) { /* TODO: something */ } void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) { // TODO: //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); } void dm_helpers_init_panel_settings( struct dc_context *ctx, struct dc_panel_config *panel_config, struct dc_sink *sink) { // Extra Panel Power Sequence panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms; panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms; panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off; panel_config->pps.extra_post_t7_ms = 0; panel_config->pps.extra_pre_t11_ms = 0; panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms; panel_config->pps.extra_post_OUI_ms = 0; // Feature DSC panel_config->dsc.disable_dsc_edp = false; panel_config->dsc.force_dsc_edp_policy = 0; } void dm_helpers_override_panel_settings( struct dc_context *ctx, struct dc_panel_config *panel_config) { // Feature DSC if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) panel_config->dsc.disable_dsc_edp = true; } void *dm_helpers_allocate_gpu_mem( struct dc_context *ctx, enum dc_gpu_mem_alloc_type type, size_t size, long long *addr) { struct amdgpu_device *adev = ctx->driver_context; struct dal_allocation *da; u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; int ret; da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); if (!da) return NULL; ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, domain, &da->bo, &da->gpu_addr, &da->cpu_ptr); *addr = da->gpu_addr; if (ret) { kfree(da); return NULL; } /* add da to list in dm */ list_add(&da->list, &adev->dm.da_list); return da->cpu_ptr; } void dm_helpers_free_gpu_mem( struct dc_context *ctx, enum dc_gpu_mem_alloc_type type, void *pvMem) { struct amdgpu_device *adev = ctx->driver_context; struct dal_allocation *da; /* walk the da list in DM */ list_for_each_entry(da, &adev->dm.da_list, list) { if (pvMem == da->cpu_ptr) { amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); list_del(&da->list); kfree(da); break; } } } bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) { enum dc_irq_source irq_source; bool ret; irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; ret = dc_interrupt_set(ctx->dc, irq_source, enable); DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", enable ? "en" : "dis", ret); return ret; } void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) { /* TODO: virtual DPCD */ struct dc_link *link = stream->link; union down_spread_ctrl old_downspread; union down_spread_ctrl new_downspread; if (link->aux_access_disabled) return; if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, &old_downspread.raw, sizeof(old_downspread))) return; new_downspread.raw = old_downspread.raw; new_downspread.bits.IGNORE_MSA_TIMING_PARAM = (stream->ignore_msa_timing_param) ? 1 : 0; if (new_downspread.raw != old_downspread.raw) dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, &new_downspread.raw, sizeof(new_downspread)); } bool dm_helpers_dp_handle_test_pattern_request( struct dc_context *ctx, const struct dc_link *link, union link_test_pattern dpcd_test_pattern, union test_misc dpcd_test_params) { enum dp_test_pattern test_pattern; enum dp_test_pattern_color_space test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; struct pipe_ctx *pipe_ctx = NULL; struct amdgpu_dm_connector *aconnector = link->priv; int i; for (i = 0; i < MAX_PIPES; i++) { if (pipes[i].stream == NULL) continue; if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { pipe_ctx = &pipes[i]; break; } } if (pipe_ctx == NULL) return false; switch (dpcd_test_pattern.bits.PATTERN) { case LINK_TEST_PATTERN_COLOR_RAMP: test_pattern = DP_TEST_PATTERN_COLOR_RAMP; break; case LINK_TEST_PATTERN_VERTICAL_BARS: test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; break; /* black and white */ case LINK_TEST_PATTERN_COLOR_SQUARES: test_pattern = (dpcd_test_params.bits.DYN_RANGE == TEST_DYN_RANGE_VESA ? DP_TEST_PATTERN_COLOR_SQUARES : DP_TEST_PATTERN_COLOR_SQUARES_CEA); break; default: test_pattern = DP_TEST_PATTERN_VIDEO_MODE; break; } if (dpcd_test_params.bits.CLR_FORMAT == 0) test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; else test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; switch (dpcd_test_params.bits.BPC) { case 0: // 6 bits requestColorDepth = COLOR_DEPTH_666; break; case 1: // 8 bits requestColorDepth = COLOR_DEPTH_888; break; case 2: // 10 bits requestColorDepth = COLOR_DEPTH_101010; break; case 3: // 12 bits requestColorDepth = COLOR_DEPTH_121212; break; default: break; } switch (dpcd_test_params.bits.CLR_FORMAT) { case 0: requestPixelEncoding = PIXEL_ENCODING_RGB; break; case 1: requestPixelEncoding = PIXEL_ENCODING_YCBCR422; break; case 2: requestPixelEncoding = PIXEL_ENCODING_YCBCR444; break; default: requestPixelEncoding = PIXEL_ENCODING_RGB; break; } if ((requestColorDepth != COLOR_DEPTH_UNDEFINED && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n", __func__, pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->timing.pixel_encoding, requestColorDepth, requestPixelEncoding); pipe_ctx->stream->timing.display_color_depth = requestColorDepth; pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; dc_link_update_dsc_config(pipe_ctx); aconnector->timing_changed = true; /* store current timing */ if (aconnector->timing_requested) *aconnector->timing_requested = pipe_ctx->stream->timing; else DC_LOG_ERROR("%s: timing storage failed\n", __func__); } dc_link_dp_set_test_pattern( (struct dc_link *) link, test_pattern, test_pattern_color_space, NULL, NULL, 0); return false; } void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) { // TODO } void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) { /* TODO: add periodic detection implementation */ } void dm_helpers_dp_mst_update_branch_bandwidth( struct dc_context *ctx, struct dc_link *link) { // TODO } static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) { bool ret_val = false; switch (branch_dev_id) { case DP_BRANCH_DEVICE_ID_0060AD: case DP_BRANCH_DEVICE_ID_00E04C: case DP_BRANCH_DEVICE_ID_90CC24: ret_val = true; break; default: break; } return ret_val; } enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) { struct dpcd_caps *dpcd_caps = &link->dpcd_caps; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; switch (dpcd_caps->dongle_type) { case DISPLAY_DONGLE_DP_HDMI_CONVERTER: if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && dpcd_caps->allow_invalid_MSA_timing_param == true && dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; break; default: break; } return as_type; }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
// SPDX-License-Identifier: MIT /* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_trace.h" #if defined(CONFIG_X86) #include <asm/fpu/api.h> #elif defined(CONFIG_PPC64) #include <asm/switch_to.h> #include <asm/cputable.h> #elif defined(CONFIG_ARM64) #include <asm/neon.h> #elif defined(CONFIG_LOONGARCH) #include <asm/fpu.h> #endif /** * DOC: DC FPU manipulation overview * * DC core uses FPU operations in multiple parts of the code, which requires a * more specialized way to manage these areas' entrance. To fulfill this * requirement, we created some wrapper functions that encapsulate * kernel_fpu_begin/end to better fit our need in the display component. In * summary, in this file, you can find functions related to FPU operation * management. */ static DEFINE_PER_CPU(int, fpu_recursion_depth); /** * dc_assert_fp_enabled - Check if FPU protection is enabled * * This function tells if the code is already under FPU protection or not. A * function that works as an API for a set of FPU operations can use this * function for checking if the caller invoked it after DC_FP_START(). For * example, take a look at dcn20_fpu.c file. */ inline void dc_assert_fp_enabled(void) { int *pcpu, depth = 0; pcpu = get_cpu_ptr(&fpu_recursion_depth); depth = *pcpu; put_cpu_ptr(&fpu_recursion_depth); ASSERT(depth >= 1); } /** * dc_fpu_begin - Enables FPU protection * @function_name: A string containing the function name for debug purposes * (usually __func__) * * @line: A line number where DC_FP_START was invoked for debug purpose * (usually __LINE__) * * This function is responsible for managing the use of kernel_fpu_begin() with * the advantage of providing an event trace for debugging. * * Note: Do not call this function directly; always use DC_FP_START(). */ void dc_fpu_begin(const char *function_name, const int line) { int *pcpu; pcpu = get_cpu_ptr(&fpu_recursion_depth); *pcpu += 1; if (*pcpu == 1) { #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) migrate_disable(); kernel_fpu_begin(); #elif defined(CONFIG_PPC64) if (cpu_has_feature(CPU_FTR_VSX_COMP)) { preempt_disable(); enable_kernel_vsx(); } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { preempt_disable(); enable_kernel_altivec(); } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { preempt_disable(); enable_kernel_fp(); } #elif defined(CONFIG_ARM64) kernel_neon_begin(); #endif } TRACE_DCN_FPU(true, function_name, line, *pcpu); put_cpu_ptr(&fpu_recursion_depth); } /** * dc_fpu_end - Disable FPU protection * @function_name: A string containing the function name for debug purposes * @line: A-line number where DC_FP_END was invoked for debug purpose * * This function is responsible for managing the use of kernel_fpu_end() with * the advantage of providing an event trace for debugging. * * Note: Do not call this function directly; always use DC_FP_END(). */ void dc_fpu_end(const char *function_name, const int line) { int *pcpu; pcpu = get_cpu_ptr(&fpu_recursion_depth); *pcpu -= 1; if (*pcpu <= 0) { #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) kernel_fpu_end(); migrate_enable(); #elif defined(CONFIG_PPC64) if (cpu_has_feature(CPU_FTR_VSX_COMP)) { disable_kernel_vsx(); preempt_enable(); } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { disable_kernel_altivec(); preempt_enable(); } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { disable_kernel_fp(); preempt_enable(); } #elif defined(CONFIG_ARM64) kernel_neon_end(); #endif } TRACE_DCN_FPU(false, function_name, line, *pcpu); put_cpu_ptr(&fpu_recursion_depth); }
linux-master
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce/dce_dmcu.h" #include "dc_edid_parser.h" bool dc_edid_parser_send_cea(struct dc *dc, int offset, int total_length, uint8_t *data, int length) { struct dmcu *dmcu = dc->res_pool->dmcu; if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu) && dmcu->funcs->send_edid_cea) { return dmcu->funcs->send_edid_cea(dmcu, offset, total_length, data, length); } return false; } bool dc_edid_parser_recv_cea_ack(struct dc *dc, int *offset) { struct dmcu *dmcu = dc->res_pool->dmcu; if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu) && dmcu->funcs->recv_edid_cea_ack) { return dmcu->funcs->recv_edid_cea_ack(dmcu, offset); } return false; } bool dc_edid_parser_recv_amd_vsdb(struct dc *dc, int *version, int *min_frame_rate, int *max_frame_rate) { struct dmcu *dmcu = dc->res_pool->dmcu; if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu) && dmcu->funcs->recv_amd_vsdb) { return dmcu->funcs->recv_amd_vsdb(dmcu, version, min_frame_rate, max_frame_rate); } return false; }
linux-master
drivers/gpu/drm/amd/display/dc/dc_edid_parser.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc.h" #include "dc_dmub_srv.h" #include "../dmub/dmub_srv.h" #include "dm_helpers.h" #include "dc_hw_types.h" #include "core_types.h" #include "../basics/conversion.h" #include "cursor_reg_cache.h" #include "resource.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, struct dmub_srv *dmub) { dc_srv->dmub = dmub; dc_srv->ctx = dc->ctx; } struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) { struct dc_dmub_srv *dc_srv = kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL); if (dc_srv == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dc_dmub_srv_construct(dc_srv, dc, dmub); return dc_srv; } void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) { if (*dmub_srv) { kfree(*dmub_srv); *dmub_srv = NULL; } } void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub = dc_dmub_srv->dmub; struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status; status = dmub_srv_wait_for_idle(dmub, 100000); if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } } void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv) { struct dmub_srv *dmub = dmub_srv->dmub; struct dc_context *dc_ctx = dmub_srv->ctx; enum dmub_status status = DMUB_STATUS_OK; status = dmub_srv_clear_inbox0_ack(dmub); if (status != DMUB_STATUS_OK) { DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dmub_srv); } } void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv) { struct dmub_srv *dmub = dmub_srv->dmub; struct dc_context *dc_ctx = dmub_srv->ctx; enum dmub_status status = DMUB_STATUS_OK; status = dmub_srv_wait_for_inbox0_ack(dmub, 100000); if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); dc_dmub_srv_log_diagnostic_data(dmub_srv); } } void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data) { struct dmub_srv *dmub = dmub_srv->dmub; struct dc_context *dc_ctx = dmub_srv->ctx; enum dmub_status status = DMUB_STATUS_OK; status = dmub_srv_send_inbox0_cmd(dmub, data); if (status != DMUB_STATUS_OK) { DC_ERROR("Error sending INBOX0 cmd\n"); dc_dmub_srv_log_diagnostic_data(dmub_srv); } } bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); } bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) { struct dc_context *dc_ctx; struct dmub_srv *dmub; enum dmub_status status; int i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dc_ctx = dc_dmub_srv->ctx; dmub = dc_dmub_srv->dmub; for (i = 0 ; i < count; i++) { // Queue command status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ dmub_srv_cmd_execute(dmub); dmub_srv_wait_for_idle(dmub, 100000); /* Requeue the command. */ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); } if (status != DMUB_STATUS_OK) { DC_ERROR("Error queueing DMUB command: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } } status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { DC_ERROR("Error starting DMUB execution: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } // Wait for DMUB to process command if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { status = dmub_srv_wait_for_idle(dmub, 100000); if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } // Copy data back from ring buffer into command if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); } return true; } bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub; struct dc_context *dc_ctx; union dmub_fw_boot_status boot_status; enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; dc_ctx = dc_dmub_srv->ctx; status = dmub_srv_get_fw_boot_status(dmub, &boot_status); if (status != DMUB_STATUS_OK) { DC_ERROR("Error querying DMUB boot status: error=%d\n", status); return false; } return boot_status.bits.optimized_init_done; } bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, unsigned int stream_mask) { struct dmub_srv *dmub; const uint32_t timeout = 30; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; return dmub_srv_send_gpint_command( dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, stream_mask, timeout) == DMUB_STATUS_OK; } bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub; struct dc_context *dc_ctx; union dmub_fw_boot_status boot_status; enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; dc_ctx = dc_dmub_srv->ctx; status = dmub_srv_get_fw_boot_status(dmub, &boot_status); if (status != DMUB_STATUS_OK) { DC_ERROR("Error querying DMUB boot status: error=%d\n", status); return false; } return boot_status.bits.restore_required; } bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry) { struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; return dmub_srv_get_outbox0_msg(dmub, entry); } void dc_dmub_trace_event_control(struct dc *dc, bool enable) { dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); } void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max) { union dmub_rb_cmd cmd = { 0 }; cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE; cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max; cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min; cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) { union dmub_rb_cmd cmd = { 0 }; cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER; cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) { uint8_t pipes = 0; int i = 0; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.tg) pipes = i; } return pipes; } static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context, struct pipe_ctx *head_pipe, struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data) { int j; int pipe_idx = 0; fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst; for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j]; if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) { fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst; } } fams_pipe_data->pipe_count = pipe_idx; } bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context) { union dmub_rb_cmd cmd = { 0 }; struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data; int i = 0, k = 0; int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. uint8_t visual_confirm_enabled; int pipe_idx = 0; if (dc == NULL) return false; visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS; // Format command. cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL; cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate; cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; if (should_manage_pstate) { for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) continue; /* If FAMS is being used to support P-State and there is a stream * that does not use FAMS, we are in an FPO + VActive scenario. * Assign vactive stretch margin in this case. */ if (!pipe->stream->fpo_in_use) { cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; break; } pipe_idx++; } } for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz; config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz; config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps; config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream); dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]); k++; } } cmd.fw_assisted_mclk_switch.header.payload_bytes = sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); // Send the command to the DMCUB. dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) { union dmub_rb_cmd cmd = { 0 }; if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) return; memset(&cmd, 0, sizeof(cmd)); /* Prepare fw command */ cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS; cmd.query_feature_caps.header.sub_type = 0; cmd.query_feature_caps.header.ret_status = 1; cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); /* If command was processed, copy feature caps to dmub srv */ if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_feature_caps.header.ret_status == 0) { memcpy(&dc_dmub_srv->dmub->feature_caps, &cmd.query_feature_caps.query_feature_caps_data, sizeof(struct dmub_feature_caps)); } } void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) { union dmub_rb_cmd cmd = { 0 }; unsigned int panel_inst = 0; dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst); memset(&cmd, 0, sizeof(cmd)); // Prepare fw command cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR; cmd.visual_confirm_color.header.sub_type = 0; cmd.visual_confirm_color.header.ret_status = 1; cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; // If command was processed, copy feature caps to dmub srv if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.visual_confirm_color.header.ret_status == 0) { memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, &cmd.visual_confirm_color.visual_confirm_color_data, sizeof(struct dmub_visual_confirm_color)); } } /** * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command * * @dc: [in] current dc state * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @vblank_pipe: [in] pipe_ctx for the DRR pipe * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info * * Populate the DMCUB SubVP command with DRR pipe info. All the information * required for calculating the SubVP + DRR microschedule is populated here. * * High level algorithm: * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule * 3. Populate the drr_info with the min and max supported vtotal values */ static void populate_subvp_cmd_drr_info(struct dc *dc, struct pipe_ctx *subvp_pipe, struct pipe_ctx *vblank_pipe, struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) { struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; uint16_t drr_frame_us = 0; uint16_t min_drr_supported_us = 0; uint16_t max_drr_supported_us = 0; uint16_t max_drr_vblank_us = 0; uint16_t max_drr_mallregion_us = 0; uint16_t mall_region_us = 0; uint16_t prefetch_us = 0; uint16_t subvp_active_us = 0; uint16_t drr_active_us = 0; uint16_t min_vtotal_supported = 0; uint16_t max_vtotal_supported = 0; pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true; pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000), (((uint64_t)drr_timing->pix_clk_100hz * 100))); // P-State allow width and FW delays already included phantom_timing->v_addressable mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000), (((uint64_t)phantom_timing->pix_clk_100hz * 100))); min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us), (((uint64_t)drr_timing->h_total * 1000000))); prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000), (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000), (((uint64_t)main_timing->pix_clk_100hz * 100))); drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000), (((uint64_t)drr_timing->pix_clk_100hz * 100))); max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us; max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us; max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us; max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us), (((uint64_t)drr_timing->h_total * 1000000))); /* When calculating the max vtotal supported for SubVP + DRR cases, add * margin due to possible rounding errors (being off by 1 line in the * FW calculation can incorrectly push the P-State switch to wait 1 frame * longer). */ max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us; pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported; pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported; pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us; } /** * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd * * Populate the DMCUB SubVP command with VBLANK pipe info. All the information * required to calculate the microschedule for SubVP + VBLANK case is stored in * the pipe_data (subvp_data and vblank_data). Also check if the VBLANK pipe * is a DRR display -- if it is make a call to populate drr_info. */ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *vblank_pipe, uint8_t cmd_pipe_index) { uint32_t i; struct pipe_ctx *pipe = NULL; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; // Find the SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) if (!resource_is_pipe_type(pipe, OTG_MASTER) || !resource_is_pipe_type(pipe, DPP_PIPE)) continue; // Find the SubVP pipe if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) break; } pipe_data->mode = VBLANK; pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz; pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch; pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total; pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total; pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx; pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start; pipe_data->pipe_config.vblank_data.vblank_end = vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable; if (vblank_pipe->stream->ignore_msa_timing_param) populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data); } /** * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2) * * For SubVP + SubVP, we use a single vertical interrupt to start the * microschedule for both SubVP pipes. In order for this to work correctly, the * MALL REGION of both SubVP pipes must start at the same time. This function * lengthens the prefetch end to mall start delay of the SubVP pipe that has * the shorter prefetch so that both MALL REGION's will start at the same time. */ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *subvp_pipes[]) { uint32_t subvp0_prefetch_us = 0; uint32_t subvp1_prefetch_us = 0; uint32_t prefetch_delta_us = 0; struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing; struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * (uint64_t)phantom_timing0->h_total * 1000000), (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) * (uint64_t)phantom_timing1->h_total * 1000000), (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time) // should increase it's prefetch time to match the other if (subvp0_prefetch_us > subvp1_prefetch_us) { pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1]; prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us; pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)), ((uint64_t)phantom_timing1->h_total * 1000000)); } else if (subvp1_prefetch_us > subvp0_prefetch_us) { pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0]; prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us; pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)), ((uint64_t)phantom_timing0->h_total * 1000000)); } } /** * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd * * Populate the DMCUB SubVP command with SubVP pipe info. All the information * required to calculate the microschedule for the SubVP pipe is stored in the * pipe_data of the DMCUB SubVP command. */ static void populate_subvp_cmd_pipe_info(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *subvp_pipe, uint8_t cmd_pipe_index) { uint32_t j; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; pipe_data->mode = SUBVP; pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz; pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total; pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total; pipe_data->pipe_config.subvp_data.main_vblank_start = main_timing->v_total - main_timing->v_front_porch; pipe_data->pipe_config.subvp_data.main_vblank_end = main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable; pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable; pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst; pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param; /* Calculate the scaling factor from the src and dst height. * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2. * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor" * * Make sure to combine stream and plane scaling together. */ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, &out_num_stream, &out_den_stream); reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height, &out_num_plane, &out_den_plane); reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den); pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num; pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den; // Prefetch lines is equal to VACTIVE + BP + VSYNC pipe_data->pipe_config.subvp_data.prefetch_lines = phantom_timing->v_total - phantom_timing->v_front_porch; // Round up pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); pipe_data->pipe_config.subvp_data.processing_delay_lines = div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); if (subvp_pipe->bottom_pipe) { pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx; } else if (subvp_pipe->next_odm_pipe) { pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx; } else { pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0; } // Find phantom pipe index based on phantom stream for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) { pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; if (phantom_pipe->bottom_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; } else if (phantom_pipe->next_odm_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst; } else { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0; } break; } } } /** * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command * * @dc: [in] current dc state * @context: [in] new dc state * @enable: [in] if true enables the pipes population * * This function loops through each pipe and populates the DMUB SubVP CMD info * based on the pipe (e.g. SubVP, VBLANK). */ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable) { uint8_t cmd_pipe_index = 0; uint32_t i, pipe_idx; uint8_t subvp_count = 0; union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2]; uint32_t wm_val_refclk = 0; memset(&cmd, 0, sizeof(cmd)); // FW command for SUBVP cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD; cmd.fw_assisted_mclk_switch_v2.header.payload_bytes = sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; /* For SubVP pipe count, only count the top most (ODM / MPC) pipe */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && pipe->stream->mall_stream_config.type == SUBVP_MAIN) subvp_pipes[subvp_count++] = pipe; } if (enable) { // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) continue; /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe. * Any ODM or MPC splits being used in SubVP will be handled internally in * populate_subvp_cmd_pipe_info */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && pipe->stream->mall_stream_config.paired_stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } else if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && pipe->stream->mall_stream_config.type == SUBVP_NONE) { // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where // we run through DML without calculating "natural" P-state support populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } pipe_idx++; } if (subvp_count == 2) { update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes); } cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us; // Store the original watermark value for this SubVP config so we can lower it when the // MCLK switch starts wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns * (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000; cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; } dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) { if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data) return false; return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data); } void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_diagnostic_data diag_data = {0}; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { DC_LOG_ERROR("%s: invalid parameters.", __func__); return; } if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); return; } DC_LOG_DEBUG("DMCUB STATE:"); DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version); DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]); DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]); DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]); DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]); DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]); DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]); DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]); DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]); DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]); DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]); DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]); DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]); DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]); DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); DC_LOG_DEBUG(" pc : %08x", diag_data.pc); DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr); DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr); DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size); DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr); DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr); DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size); DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled); DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset); DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset); DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en); DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled); DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled); } static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) { struct pipe_ctx *test_pipe, *split_pipe; const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; struct rect r1 = scl_data->recout, r2, r2_half; int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; int cur_layer = pipe_ctx->plane_state->layer_index; /** * Disable the cursor if there's another pipe above this with a * plane that contains this pipe's viewport to prevent double cursor * and incorrect scaling artifacts. */ for (test_pipe = pipe_ctx->top_pipe; test_pipe; test_pipe = test_pipe->top_pipe) { // Skip invisible layer and pipe-split plane on same layer if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer) continue; r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; split_pipe = test_pipe; /** * There is another half plane on same layer because of * pipe-split, merge together per same height. */ for (split_pipe = pipe_ctx->top_pipe; split_pipe; split_pipe = split_pipe->top_pipe) if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { r2_half = split_pipe->plane_res.scl_data.recout; r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; r2.width = r2.width + r2_half.width; r2_r = r2.x + r2.width; break; } if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) return true; } return false; } static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state != NULL) { if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) return false; if (dc_can_pipe_disable_cursor(pipe_ctx)) return false; } if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1) return true; if (pipe_ctx->stream->link->replay_settings.config.replay_supported) return true; return false; } static void dc_build_cursor_update_payload0( struct pipe_ctx *pipe_ctx, uint8_t p_idx, struct dmub_cmd_update_cursor_payload0 *payload) { struct hubp *hubp = pipe_ctx->plane_res.hubp; unsigned int panel_inst = 0; if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst)) return; /* Payload: Cursor Rect is built from position & attribute * x & y are obtained from postion */ payload->cursor_rect.x = hubp->cur_rect.x; payload->cursor_rect.y = hubp->cur_rect.y; /* w & h are obtained from attribute */ payload->cursor_rect.width = hubp->cur_rect.w; payload->cursor_rect.height = hubp->cur_rect.h; payload->enable = hubp->pos.cur_ctl.bits.cur_enable; payload->pipe_idx = p_idx; payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; payload->panel_inst = panel_inst; } static void dc_build_cursor_position_update_payload0( struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, const struct hubp *hubp, const struct dpp *dpp) { /* Hubp */ pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw; pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw; pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw; pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw; /* dpp */ pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw; pl->position_cfg.pipe_idx = p_idx; } static void dc_build_cursor_attribute_update_payload1( struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx, const struct hubp *hubp, const struct dpp *dpp) { /* Hubp */ pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH; pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR; pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw; pl_A->aHubp.size.raw = hubp->att.size.raw; pl_A->aHubp.settings.raw = hubp->att.settings.raw; /* dpp */ pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw; } /** * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command * * @pCtx: [in] pipe context * @pipe_idx: [in] pipe index * * This function would store the cursor related information and pass it into * dmub */ void dc_send_update_cursor_info_to_dmu( struct pipe_ctx *pCtx, uint8_t pipe_idx) { union dmub_rb_cmd cmd[2]; union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = &cmd[0].update_cursor_info.update_cursor_info_data; memset(cmd, 0, sizeof(cmd)); if (!dc_dmub_should_update_cursor_data(pCtx)) return; /* * Since we use multi_cmd_pending for dmub command, the 2nd command is * only assigned to store cursor attributes info. * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other * is to store cursor position info. * * Command heaer type must be the same type if using multi_cmd_pending. * Besides, while process 2nd command in DMU, the sub type is useless. * So it's meanless to pass the sub type header with different type. */ { /* Build Payload#0 Header */ cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; cmd[0].update_cursor_info.header.payload_bytes = sizeof(cmd[0].update_cursor_info.update_cursor_info_data); cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd /* Prepare Payload */ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); } { /* Build Payload#1 Header */ cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. dc_build_cursor_attribute_update_payload1( &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); /* Combine 2nd cmds update_curosr_info to DMU */ dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); } } bool dc_dmub_check_min_version(struct dmub_srv *srv) { if (!srv->hw_funcs.is_psrsu_supported) return true; return srv->hw_funcs.is_psrsu_supported(srv); } void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; struct dmub_srv *dmub; enum dmub_status status; static const uint32_t timeout_us = 30; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { DC_LOG_ERROR("%s: invalid parameters.", __func__); return; } dmub = dc_dmub_srv->dmub; status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us); if (status != DMUB_STATUS_OK) { DC_LOG_ERROR("timeout updating trace buffer mask word\n"); return; } status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us); if (status != DMUB_STATUS_OK) { DC_LOG_ERROR("timeout updating trace buffer mask word\n"); return; } DC_LOG_DEBUG("Enabled DPIA trace\n"); }
linux-master
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ /* * dc_helper.c * * Created on: Aug 30, 2016 * Author: agrodzov */ #include <linux/delay.h> #include <linux/stdarg.h> #include "dm_services.h" #include "dc.h" #include "dc_dmub_srv.h" #include "reg_helper.h" static inline void submit_dmub_read_modify_write( struct dc_reg_helper_state *offload, const struct dc_context *ctx) { struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; offload->should_burst_write = (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); cmd_buf->header.payload_bytes = sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); offload->reg_seq_count = 0; offload->same_addr_count = 0; } static inline void submit_dmub_burst_write( struct dc_reg_helper_state *offload, const struct dc_context *ctx) { struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; cmd_buf->header.payload_bytes = sizeof(uint32_t) * offload->reg_seq_count; dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); offload->reg_seq_count = 0; } static inline void submit_dmub_reg_wait( struct dc_reg_helper_state *offload, const struct dc_context *ctx) { struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); offload->reg_seq_count = 0; } struct dc_reg_value_masks { uint32_t value; uint32_t mask; }; struct dc_reg_sequence { uint32_t addr; struct dc_reg_value_masks value_masks; }; static inline void set_reg_field_value_masks( struct dc_reg_value_masks *field_value_mask, uint32_t value, uint32_t mask, uint8_t shift) { ASSERT(mask != 0); field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); field_value_mask->mask = field_value_mask->mask | mask; } static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, va_list ap) { uint32_t shift, mask, field_value; int i = 1; /* gather all bits value/mask getting updated in this register */ set_reg_field_value_masks(field_value_mask, field_value1, mask1, shift1); while (i < n) { shift = va_arg(ap, uint32_t); mask = va_arg(ap, uint32_t); field_value = va_arg(ap, uint32_t); set_reg_field_value_masks(field_value_mask, field_value, mask, shift); i++; } } static void dmub_flush_buffer_execute( struct dc_reg_helper_state *offload, const struct dc_context *ctx) { submit_dmub_read_modify_write(offload, ctx); } static void dmub_flush_burst_write_buffer_execute( struct dc_reg_helper_state *offload, const struct dc_context *ctx) { submit_dmub_burst_write(offload, ctx); } static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, uint32_t reg_val) { struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; /* flush command if buffer is full */ if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) dmub_flush_burst_write_buffer_execute(offload, ctx); if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && addr != cmd_buf->addr) { dmub_flush_burst_write_buffer_execute(offload, ctx); return false; } cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; cmd_buf->header.sub_type = 0; cmd_buf->addr = addr; cmd_buf->write_values[offload->reg_seq_count] = reg_val; offload->reg_seq_count++; return true; } static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, struct dc_reg_value_masks *field_value_mask) { struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; struct dmub_cmd_read_modify_write_sequence *seq; /* flush command if buffer is full */ if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) dmub_flush_buffer_execute(offload, ctx); if (offload->should_burst_write) { if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) return field_value_mask->value; else offload->should_burst_write = false; } /* pack commands */ cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; cmd_buf->header.sub_type = 0; seq = &cmd_buf->seq[offload->reg_seq_count]; if (offload->reg_seq_count) { if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) offload->same_addr_count++; else offload->same_addr_count = 0; } seq->addr = addr; seq->modify_mask = field_value_mask->mask; seq->modify_value = field_value_mask->value; offload->reg_seq_count++; return field_value_mask->value; } static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) { struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; cmd_buf->header.sub_type = 0; cmd_buf->reg_wait.addr = addr; cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); cmd_buf->reg_wait.mask = mask; cmd_buf->reg_wait.time_out_us = time_out_us; } uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) { struct dc_reg_value_masks field_value_mask = {0}; uint32_t reg_val; va_list ap; va_start(ap, field_value1); set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, field_value1, ap); va_end(ap); if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress) return dmub_reg_value_pack(ctx, addr, &field_value_mask); /* todo: return void so we can decouple code running in driver from register states */ /* mmio write directly */ reg_val = dm_read_reg(ctx, addr); reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; dm_write_reg(ctx, addr, reg_val); return reg_val; } uint32_t generic_reg_set_ex(const struct dc_context *ctx, uint32_t addr, uint32_t reg_val, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) { struct dc_reg_value_masks field_value_mask = {0}; va_list ap; va_start(ap, field_value1); set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, field_value1, ap); va_end(ap); /* mmio write directly */ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress) { return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); /* todo: return void so we can decouple code running in driver from register states */ } dm_write_reg(ctx, addr, reg_val); return reg_val; } uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr, uint8_t shift, uint32_t mask, uint32_t *field_value) { uint32_t reg_val = dm_read_reg(ctx, addr); *field_value = get_reg_field_value_ex(reg_val, mask, shift); return reg_val; } uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, uint8_t shift2, uint32_t mask2, uint32_t *field_value2) { uint32_t reg_val = dm_read_reg(ctx, addr); *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); return reg_val; } uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, uint8_t shift2, uint32_t mask2, uint32_t *field_value2, uint8_t shift3, uint32_t mask3, uint32_t *field_value3) { uint32_t reg_val = dm_read_reg(ctx, addr); *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); return reg_val; } uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, uint8_t shift2, uint32_t mask2, uint32_t *field_value2, uint8_t shift3, uint32_t mask3, uint32_t *field_value3, uint8_t shift4, uint32_t mask4, uint32_t *field_value4) { uint32_t reg_val = dm_read_reg(ctx, addr); *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); return reg_val; } uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, uint8_t shift2, uint32_t mask2, uint32_t *field_value2, uint8_t shift3, uint32_t mask3, uint32_t *field_value3, uint8_t shift4, uint32_t mask4, uint32_t *field_value4, uint8_t shift5, uint32_t mask5, uint32_t *field_value5) { uint32_t reg_val = dm_read_reg(ctx, addr); *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); return reg_val; } uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, uint8_t shift2, uint32_t mask2, uint32_t *field_value2, uint8_t shift3, uint32_t mask3, uint32_t *field_value3, uint8_t shift4, uint32_t mask4, uint32_t *field_value4, uint8_t shift5, uint32_t mask5, uint32_t *field_value5, uint8_t shift6, uint32_t mask6, uint32_t *field_value6) { uint32_t reg_val = dm_read_reg(ctx, addr); *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); return reg_val; } uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, uint8_t shift2, uint32_t mask2, uint32_t *field_value2, uint8_t shift3, uint32_t mask3, uint32_t *field_value3, uint8_t shift4, uint32_t mask4, uint32_t *field_value4, uint8_t shift5, uint32_t mask5, uint32_t *field_value5, uint8_t shift6, uint32_t mask6, uint32_t *field_value6, uint8_t shift7, uint32_t mask7, uint32_t *field_value7) { uint32_t reg_val = dm_read_reg(ctx, addr); *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); return reg_val; } uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, uint8_t shift2, uint32_t mask2, uint32_t *field_value2, uint8_t shift3, uint32_t mask3, uint32_t *field_value3, uint8_t shift4, uint32_t mask4, uint32_t *field_value4, uint8_t shift5, uint32_t mask5, uint32_t *field_value5, uint8_t shift6, uint32_t mask6, uint32_t *field_value6, uint8_t shift7, uint32_t mask7, uint32_t *field_value7, uint8_t shift8, uint32_t mask8, uint32_t *field_value8) { uint32_t reg_val = dm_read_reg(ctx, addr); *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8); return reg_val; } /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer * compiler won't be able to check for size match and is prone to stack corruption type of bugs uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr, int n, ...) { uint32_t shift, mask; uint32_t *field_value; uint32_t reg_val; int i = 0; reg_val = dm_read_reg(ctx, addr); va_list ap; va_start(ap, n); while (i < n) { shift = va_arg(ap, uint32_t); mask = va_arg(ap, uint32_t); field_value = va_arg(ap, uint32_t *); *field_value = get_reg_field_value_ex(reg_val, mask, shift); i++; } va_end(ap); return reg_val; } */ void generic_reg_wait(const struct dc_context *ctx, uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value, unsigned int delay_between_poll_us, unsigned int time_out_num_tries, const char *func_name, int line) { uint32_t field_value; uint32_t reg_val; int i; if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress) { dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, delay_between_poll_us * time_out_num_tries); return; } /* * Something is terribly wrong if time out is > 3000ms. * 3000ms is the maximum time needed for SMU to pass values back. * This value comes from experiments. * */ ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); for (i = 0; i <= time_out_num_tries; i++) { if (i) { if (delay_between_poll_us >= 1000) msleep(delay_between_poll_us/1000); else if (delay_between_poll_us > 0) udelay(delay_between_poll_us); } reg_val = dm_read_reg(ctx, addr); field_value = get_reg_field_value_ex(reg_val, mask, shift); if (field_value == condition_value) { if (i * delay_between_poll_us > 1000) DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", delay_between_poll_us * i / 1000, func_name, line); return; } } DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", delay_between_poll_us, time_out_num_tries, func_name, line); BREAK_TO_DEBUGGER(); } void generic_write_indirect_reg(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, uint32_t index, uint32_t data) { dm_write_reg(ctx, addr_index, index); dm_write_reg(ctx, addr_data, data); } uint32_t generic_read_indirect_reg(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, uint32_t index) { uint32_t value = 0; // when reg read, there should not be any offload. if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress) { ASSERT(false); } dm_write_reg(ctx, addr_index, index); value = dm_read_reg(ctx, addr_data); return value; } uint32_t generic_indirect_reg_get(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, uint32_t index, int n, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, ...) { uint32_t shift, mask, *field_value; uint32_t value = 0; int i = 1; va_list ap; va_start(ap, field_value1); value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); *field_value1 = get_reg_field_value_ex(value, mask1, shift1); while (i < n) { shift = va_arg(ap, uint32_t); mask = va_arg(ap, uint32_t); field_value = va_arg(ap, uint32_t *); *field_value = get_reg_field_value_ex(value, mask, shift); i++; } va_end(ap); return value; } uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, uint32_t index, uint32_t reg_val, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) { uint32_t shift, mask, field_value; int i = 1; va_list ap; va_start(ap, field_value1); reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); while (i < n) { shift = va_arg(ap, uint32_t); mask = va_arg(ap, uint32_t); field_value = va_arg(ap, uint32_t); reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); i++; } generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val); va_end(ap); return reg_val; } uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, uint32_t index, uint32_t reg_val, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) { uint32_t shift, mask, field_value; int i = 1; va_list ap; va_start(ap, field_value1); reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); while (i < n) { shift = va_arg(ap, uint32_t); mask = va_arg(ap, uint32_t); field_value = va_arg(ap, uint32_t); reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); i++; } dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val); va_end(ap); return reg_val; } uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, uint32_t index, int n, uint8_t shift1, uint32_t mask1, uint32_t *field_value1, ...) { uint32_t shift, mask, *field_value; uint32_t value = 0; int i = 1; va_list ap; va_start(ap, field_value1); value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index); *field_value1 = get_reg_field_value_ex(value, mask1, shift1); while (i < n) { shift = va_arg(ap, uint32_t); mask = va_arg(ap, uint32_t); field_value = va_arg(ap, uint32_t *); *field_value = get_reg_field_value_ex(value, mask, shift); i++; } va_end(ap); return value; } void reg_sequence_start_gather(const struct dc_context *ctx) { /* if reg sequence is supported and enabled, set flag to * indicate we want to have REG_SET, REG_UPDATE macro build * reg sequence command buffer rather than MMIO directly. */ if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; /* caller sequence mismatch. need to debug caller. offload will not work!!! */ ASSERT(!offload->gather_in_progress); offload->gather_in_progress = true; } } void reg_sequence_start_execute(const struct dc_context *ctx) { struct dc_reg_helper_state *offload; if (!ctx->dmub_srv) return; offload = &ctx->dmub_srv->reg_helper_offload; if (offload && offload->gather_in_progress) { offload->gather_in_progress = false; offload->should_burst_write = false; switch (offload->cmd_data.cmd_common.header.type) { case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: submit_dmub_read_modify_write(offload, ctx); break; case DMUB_CMD__REG_REG_WAIT: submit_dmub_reg_wait(offload, ctx); break; case DMUB_CMD__REG_SEQ_BURST_WRITE: submit_dmub_burst_write(offload, ctx); break; default: return; } } } void reg_sequence_wait_done(const struct dc_context *ctx) { /* callback to DM to poll for last submission done*/ struct dc_reg_helper_state *offload; if (!ctx->dmub_srv) return; offload = &ctx->dmub_srv->reg_helper_offload; if (offload && ctx->dc->debug.dmub_offload_enabled && !ctx->dc->debug.dmcub_emulation) { dc_dmub_srv_wait_idle(ctx->dmub_srv); } } char *dce_version_to_string(const int version) { switch (version) { case DCE_VERSION_8_0: return "DCE 8.0"; case DCE_VERSION_8_1: return "DCE 8.1"; case DCE_VERSION_8_3: return "DCE 8.3"; case DCE_VERSION_10_0: return "DCE 10.0"; case DCE_VERSION_11_0: return "DCE 11.0"; case DCE_VERSION_11_2: return "DCE 11.2"; case DCE_VERSION_11_22: return "DCE 11.22"; case DCE_VERSION_12_0: return "DCE 12.0"; case DCE_VERSION_12_1: return "DCE 12.1"; case DCN_VERSION_1_0: return "DCN 1.0"; case DCN_VERSION_1_01: return "DCN 1.0.1"; case DCN_VERSION_2_0: return "DCN 2.0"; case DCN_VERSION_2_1: return "DCN 2.1"; case DCN_VERSION_2_01: return "DCN 2.0.1"; case DCN_VERSION_3_0: return "DCN 3.0"; case DCN_VERSION_3_01: return "DCN 3.0.1"; case DCN_VERSION_3_02: return "DCN 3.0.2"; case DCN_VERSION_3_03: return "DCN 3.0.3"; case DCN_VERSION_3_1: return "DCN 3.1"; case DCN_VERSION_3_14: return "DCN 3.1.4"; case DCN_VERSION_3_15: return "DCN 3.1.5"; case DCN_VERSION_3_16: return "DCN 3.1.6"; case DCN_VERSION_3_2: return "DCN 3.2"; case DCN_VERSION_3_21: return "DCN 3.2.1"; default: return "Unknown"; } }
linux-master
drivers/gpu/drm/amd/display/dc/dc_helper.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * * Authors: AMD */ #include "dcn303_hwseq.h" #include "dce/dce_hwseq.h" #include "reg_helper.h" #include "dc.h" #define DC_LOGGER_INIT(logger) #define CTX \ hws->ctx #define REG(reg)\ hws->regs->reg #undef FN #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) { /*DCN303 removes PG registers*/ } void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) { /*DCN303 removes PG registers*/ } void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) { /*DCN303 removes PG registers*/ } void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable) { /*DCN303 removes PG registers*/ }
linux-master
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * * Authors: AMD */ #include "dcn303_hwseq.h" #include "dcn30/dcn30_init.h" #include "dc.h" #include "dcn303_init.h" void dcn303_hw_sequencer_construct(struct dc *dc) { dcn30_hw_sequencer_construct(dc); dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control; dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control; dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control; dc->hwseq->funcs.enable_power_gating_plane = dcn303_enable_power_gating_plane; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * * Authors: AMD */ #include "dcn303_init.h" #include "dcn303_resource.h" #include "dcn303_dccg.h" #include "irq/dcn303/irq_service_dcn303.h" #include "dcn30/dcn30_dio_link_encoder.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn30/dcn30_dpp.h" #include "dcn30/dcn30_dwb.h" #include "dcn30/dcn30_hubbub.h" #include "dcn30/dcn30_hubp.h" #include "dcn30/dcn30_mmhubbub.h" #include "dcn30/dcn30_mpc.h" #include "dcn30/dcn30_opp.h" #include "dcn30/dcn30_optc.h" #include "dcn30/dcn30_resource.h" #include "dcn20/dcn20_dsc.h" #include "dcn20/dcn20_resource.h" #include "dml/dcn30/dcn30_fpu.h" #include "dcn10/dcn10_resource.h" #include "link.h" #include "dce/dce_abm.h" #include "dce/dce_audio.h" #include "dce/dce_aux.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" #include "dce/dce_i2c_hw.h" #include "dce/dce_panel_cntl.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" #include "clk_mgr.h" #include "hw_sequencer_private.h" #include "reg_helper.h" #include "resource.h" #include "vm_helper.h" #include "sienna_cichlid_ip_offset.h" #include "dcn/dcn_3_0_3_offset.h" #include "dcn/dcn_3_0_3_sh_mask.h" #include "dpcs/dpcs_3_0_3_offset.h" #include "dpcs/dpcs_3_0_3_sh_mask.h" #include "nbio/nbio_2_3_offset.h" #include "dml/dcn303/dcn303_fpu.h" #define DC_LOGGER_INIT(logger) static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, .max_downscale_src_width = 7680,/*upto 8K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .exit_idle_opt_for_cursor_updates = true, .disable_idle_power_optimizations = false, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, .disallow_replay = false, }, }; enum dcn303_clk_src_array_id { DCN303_CLK_SRC_PLL0, DCN303_CLK_SRC_PLL1, DCN303_CLK_SRC_TOTAL }; static const struct resource_caps res_cap_dcn303 = { .num_timing_generator = 2, .num_opp = 2, .num_video_plane = 2, .num_audio = 2, .num_stream_encoder = 2, .num_dwb = 1, .num_ddc = 2, .num_vmid = 16, .num_mpc_3dlut = 1, .num_dsc = 2, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true, .p010 = true, .ayuv = false, }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 16000 }, .max_downscale_factor = { .argb8888 = 600, .nv12 = 600, .fp16 = 600 }, 16, 16 }; /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIO_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* DCN */ #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg #define BASE(seg) BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name #define SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name #define SRI2(reg_name, block, id)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define DCCG_SRII(reg_name, block, id)\ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define VUPDATE_SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ mm ## reg_name ## _ ## block ## id #define SRII_DWB(reg_name, temp_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## temp_name #define SF_DWB2(reg_name, block, id, field_name, post_fix) \ .field_name = reg_name ## __ ## field_name ## post_fix #define SRII_MPC_RMU(reg_name, block, id)\ .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN30(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN30(_MASK) }; #define vmid_regs(id)\ [id] = { DCN20_VMID_REG_LIST(id) } static const struct dcn_vmid_registers vmid_regs[] = { vmid_regs(0), vmid_regs(1), vmid_regs(2), vmid_regs(3), vmid_regs(4), vmid_regs(5), vmid_regs(6), vmid_regs(7), vmid_regs(8), vmid_regs(9), vmid_regs(10), vmid_regs(11), vmid_regs(12), vmid_regs(13), vmid_regs(14), vmid_regs(15) }; static const struct dcn20_vmid_shift vmid_shifts = { DCN20_VMID_MASK_SH_LIST(__SHIFT) }; static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; static struct hubbub *dcn303_hubbub_create(struct dc_context *ctx) { int i; struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub3) return NULL; hubbub3_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); for (i = 0; i < res_cap_dcn303.num_vmid; i++) { struct dcn20_vmid *vmid = &hubbub3->vmid[i]; vmid->ctx = ctx; vmid->regs = &vmid_regs[i]; vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } return &hubbub3->base; } #define vpg_regs(id)\ [id] = { VPG_DCN3_REG_LIST(id) } static const struct dcn30_vpg_registers vpg_regs[] = { vpg_regs(0), vpg_regs(1), vpg_regs(2) }; static const struct dcn30_vpg_shift vpg_shift = { DCN3_VPG_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_vpg_mask vpg_mask = { DCN3_VPG_MASK_SH_LIST(_MASK) }; static struct vpg *dcn303_vpg_create(struct dc_context *ctx, uint32_t inst) { struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); if (!vpg3) return NULL; vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); return &vpg3->base; } #define afmt_regs(id)\ [id] = { AFMT_DCN3_REG_LIST(id) } static const struct dcn30_afmt_registers afmt_regs[] = { afmt_regs(0), afmt_regs(1), afmt_regs(2) }; static const struct dcn30_afmt_shift afmt_shift = { DCN3_AFMT_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_afmt_mask afmt_mask = { DCN3_AFMT_MASK_SH_LIST(_MASK) }; static struct afmt *dcn303_afmt_create(struct dc_context *ctx, uint32_t inst) { struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); if (!afmt3) return NULL; afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); return &afmt3->base; } #define audio_regs(id)\ [id] = { AUD_COMMON_REG_LIST(id) } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6) }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; static struct audio *dcn303_create_audio(struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } #define stream_enc_regs(id)\ [id] = { SE_DCN3_REG_LIST(id) } static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1) }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; struct afmt *afmt; int vpg_inst; int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ if (eng_id <= ENGINE_ID_DIGB) { vpg_inst = eng_id; afmt_inst = eng_id; } else return NULL; enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn303_vpg_create(ctx, vpg_inst); afmt = dcn303_afmt_create(ctx, afmt_inst); if (!enc1 || !vpg || !afmt) { kfree(enc1); kfree(vpg); kfree(afmt); return NULL; } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } #define clk_src_regs(index, pllid)\ [index] = { CS_COMMON_REG_LIST_DCN3_03(index, pllid) } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; static struct clock_source *dcn303_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn3_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN303_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN303_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN303_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dcn303_hwseq_create(struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } #define hubp_regs(id)\ [id] = { HUBP_REG_LIST_DCN30(id) } static const struct dcn_hubp2_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1) }; static const struct dcn_hubp2_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn_hubp2_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN30(_MASK) }; static struct hubp *dcn303_hubp_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; if (hubp3_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) return &hubp2->base; BREAK_TO_DEBUGGER(); kfree(hubp2); return NULL; } #define dpp_regs(id)\ [id] = { DPP_REG_LIST_DCN30(id) } static const struct dcn3_dpp_registers dpp_regs[] = { dpp_regs(0), dpp_regs(1) }; static const struct dcn3_dpp_shift tf_shift = { DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) }; static const struct dcn3_dpp_mask tf_mask = { DPP_REG_LIST_SH_MASK_DCN30(_MASK) }; static struct dpp *dcn303_dpp_create(struct dc_context *ctx, uint32_t inst) { struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); if (!dpp) return NULL; if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) return &dpp->base; BREAK_TO_DEBUGGER(); kfree(dpp); return NULL; } #define opp_regs(id)\ [id] = { OPP_REG_LIST_DCN30(id) } static const struct dcn20_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1) }; static const struct dcn20_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn20_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN20(_MASK) }; static struct output_pixel_processor *dcn303_opp_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } #define optc_regs(id)\ [id] = { OPTC_COMMON_REG_LIST_DCN3_0(id) } static const struct dcn_optc_registers optc_regs[] = { optc_regs(0), optc_regs(1) }; static const struct dcn_optc_shift optc_shift = { OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn_optc_mask optc_mask = { OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static struct timing_generator *dcn303_timing_generator_create(struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &optc_regs[instance]; tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; dcn30_timing_generator_init(tgn10); return &tgn10->base; } static const struct dcn30_mpc_registers mpc_regs = { MPC_REG_LIST_DCN3_0(0), MPC_REG_LIST_DCN3_0(1), MPC_OUT_MUX_REG_LIST_DCN3_0(0), MPC_OUT_MUX_REG_LIST_DCN3_0(1), MPC_RMU_GLOBAL_REG_LIST_DCN3AG, MPC_RMU_REG_LIST_DCN3AG(0), MPC_DWB_MUX_REG_LIST_DCN3_0(0), }; static const struct dcn30_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN303(__SHIFT) }; static const struct dcn30_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN303(_MASK) }; static struct mpc *dcn303_mpc_create(struct dc_context *ctx, int num_mpcc, int num_rmu) { struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); if (!mpc30) return NULL; dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); return &mpc30->base; } #define dsc_regsDCN20(id)\ [id] = { DSC_REG_LIST_DCN20(id) } static const struct dcn20_dsc_registers dsc_regs[] = { dsc_regsDCN20(0), dsc_regsDCN20(1) }; static const struct dcn20_dsc_shift dsc_shift = { DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; static struct display_stream_compressor *dcn303_dsc_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); return NULL; } dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } #define dwbc_regs_dcn3(id)\ [id] = { DWBC_COMMON_REG_LIST_DCN30(id) } static const struct dcn30_dwbc_registers dwbc30_regs[] = { dwbc_regs_dcn3(0) }; static const struct dcn30_dwbc_shift dwbc30_shift = { DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_dwbc_mask dwbc30_mask = { DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static bool dcn303_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; for (i = 0; i < pipe_count; i++) { struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); if (!dwbc30) { dm_error("DC: failed to create dwbc30!\n"); return false; } dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); pool->dwbc[i] = &dwbc30->base; } return true; } #define mcif_wb_regs_dcn3(id)\ [id] = { MCIF_WB_COMMON_REG_LIST_DCN30(id) } static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { mcif_wb_regs_dcn3(0) }; static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static bool dcn303_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; for (i = 0; i < pipe_count; i++) { struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); if (!mcif_wb30) { dm_error("DC: failed to create mcif_wb30!\n"); return false; } dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); pool->mcif_wb[i] = &mcif_wb30->base; } return true; } #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST0(id), \ .AUXN_IMPCAL = 0, \ .AUXP_IMPCAL = 0, \ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1) }; static const struct dce110_aux_registers_shift aux_shift = { DCN_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCN_AUX_MASK_SH_LIST(_MASK) }; static struct dce_aux *dcn303_aux_engine_create(struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2) }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; static struct dce_i2c_hw *dcn303_i2c_hw_create(struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; #define link_regs(id, phyid)\ [id] = {\ LE_DCN3_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0, A), link_regs(1, B) }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT), DPCS_DCN2_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK), DPCS_DCN2_MASK_SH_LIST(_MASK) }; #define aux_regs(id)\ [id] = { DCN2_AUX_REG_LIST(id) } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1) }; #define hpd_regs(id)\ [id] = { HPD_REG_LIST(id) } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1) }; static struct link_encoder *dcn303_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc20->enc10.base; } static const struct dce_panel_cntl_registers panel_cntl_regs[] = { { DCN_PANEL_CNTL_REG_LIST() } }; static const struct dce_panel_cntl_shift panel_cntl_shift = { DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dce_panel_cntl_mask panel_cntl_mask = { DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) }; static struct panel_cntl *dcn303_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } static void read_dce_straps(struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn303_create_audio, .create_stream_encoder = dcn303_stream_encoder_create, .create_hwseq = dcn303_hwseq_create, }; static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; if (ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) return true; return false; } static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) { struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_03_soc; struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_03_ip; DC_LOGGER_INIT(dc->ctx->logger); if (!is_soc_bounding_box_valid(dc)) { DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); return false; } loaded_ip->max_num_otg = pool->pipe_count; loaded_ip->max_num_dpp = pool->pipe_count; loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = { 0 }; if (dc->ctx->dc_bios->funcs->get_soc_bb_info( dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { DC_FP_START(); dcn303_fpu_init_soc_bounding_box(bb_info); DC_FP_END(); } } return true; } static void dcn303_resource_destruct(struct resource_pool *pool) { unsigned int i; for (i = 0; i < pool->stream_enc_count; i++) { if (pool->stream_enc[i] != NULL) { if (pool->stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->stream_enc[i]->vpg)); pool->stream_enc[i]->vpg = NULL; } if (pool->stream_enc[i]->afmt != NULL) { kfree(DCN30_AFMT_FROM_AFMT(pool->stream_enc[i]->afmt)); pool->stream_enc[i]->afmt = NULL; } kfree(DCN10STRENC_FROM_STRENC(pool->stream_enc[i])); pool->stream_enc[i] = NULL; } } for (i = 0; i < pool->res_cap->num_dsc; i++) { if (pool->dscs[i] != NULL) dcn20_dsc_destroy(&pool->dscs[i]); } if (pool->mpc != NULL) { kfree(TO_DCN20_MPC(pool->mpc)); pool->mpc = NULL; } if (pool->hubbub != NULL) { kfree(pool->hubbub); pool->hubbub = NULL; } for (i = 0; i < pool->pipe_count; i++) { if (pool->dpps[i] != NULL) { kfree(TO_DCN20_DPP(pool->dpps[i])); pool->dpps[i] = NULL; } if (pool->hubps[i] != NULL) { kfree(TO_DCN20_HUBP(pool->hubps[i])); pool->hubps[i] = NULL; } if (pool->irqs != NULL) dal_irq_service_destroy(&pool->irqs); } for (i = 0; i < pool->res_cap->num_ddc; i++) { if (pool->engines[i] != NULL) dce110_engine_destroy(&pool->engines[i]); if (pool->hw_i2cs[i] != NULL) { kfree(pool->hw_i2cs[i]); pool->hw_i2cs[i] = NULL; } if (pool->sw_i2cs[i] != NULL) { kfree(pool->sw_i2cs[i]); pool->sw_i2cs[i] = NULL; } } for (i = 0; i < pool->res_cap->num_opp; i++) { if (pool->opps[i] != NULL) pool->opps[i]->funcs->opp_destroy(&pool->opps[i]); } for (i = 0; i < pool->res_cap->num_timing_generator; i++) { if (pool->timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->timing_generators[i])); pool->timing_generators[i] = NULL; } } for (i = 0; i < pool->res_cap->num_dwb; i++) { if (pool->dwbc[i] != NULL) { kfree(TO_DCN30_DWBC(pool->dwbc[i])); pool->dwbc[i] = NULL; } if (pool->mcif_wb[i] != NULL) { kfree(TO_DCN30_MMHUBBUB(pool->mcif_wb[i])); pool->mcif_wb[i] = NULL; } } for (i = 0; i < pool->audio_count; i++) { if (pool->audios[i]) dce_aud_destroy(&pool->audios[i]); } for (i = 0; i < pool->clk_src_count; i++) { if (pool->clock_sources[i] != NULL) dcn20_clock_source_destroy(&pool->clock_sources[i]); } if (pool->dp_clock_source != NULL) dcn20_clock_source_destroy(&pool->dp_clock_source); for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { if (pool->mpc_lut[i] != NULL) { dc_3dlut_func_release(pool->mpc_lut[i]); pool->mpc_lut[i] = NULL; } if (pool->mpc_shaper[i] != NULL) { dc_transfer_func_release(pool->mpc_shaper[i]); pool->mpc_shaper[i] = NULL; } } for (i = 0; i < pool->pipe_count; i++) { if (pool->multiple_abms[i] != NULL) dce_abm_destroy(&pool->multiple_abms[i]); } if (pool->psr != NULL) dmub_psr_destroy(&pool->psr); if (pool->dccg != NULL) dcn_dccg_destroy(&pool->dccg); if (pool->oem_device != NULL) { struct dc *dc = pool->oem_device->ctx->dc; dc->link_srv->destroy_ddc_service(&pool->oem_device); } } static void dcn303_destroy_resource_pool(struct resource_pool **pool) { dcn303_resource_destruct(*pool); kfree(*pool); *pool = NULL; } static void dcn303_get_panel_config_defaults(struct dc_panel_config *panel_config) { *panel_config = panel_config_defaults; } void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { DC_FP_START(); dcn303_fpu_update_bw_bounding_box(dc, bw_params); DC_FP_END(); } static struct resource_funcs dcn303_res_pool_funcs = { .destroy = dcn303_destroy_resource_pool, .link_enc_create = dcn303_link_encoder_create, .panel_cntl_create = dcn303_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, .set_mcif_arb_params = dcn30_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, .update_bw_bounding_box = dcn303_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn303_get_panel_config_defaults, }; static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN3_03() }; static const struct dccg_shift dccg_shift = { DCCG_MASK_SH_LIST_DCN3_03(__SHIFT) }; static const struct dccg_mask dccg_mask = { DCCG_MASK_SH_LIST_DCN3_03(_MASK) }; #define abm_regs(id)\ [id] = { ABM_DCN302_REG_LIST(id) } static const struct dce_abm_registers abm_regs[] = { abm_regs(0), abm_regs(1) }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN30(_MASK) }; static bool dcn303_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct resource_pool *pool) { int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; struct ddc_service_init_data ddc_init_data; ctx->dc_bios->regs = &bios_regs; pool->res_cap = &res_cap_dcn303; pool->funcs = &dcn303_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->underlay_pipe_index = NO_UNDERLAY_PIPE; pool->pipe_count = pool->res_cap->num_timing_generator; pool->mpcc_count = pool->res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by derfault*/ dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; /* total size = mall per channel * num channels * 1024 * 1024 */ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1024 * 1024; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.max_slave_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.input_lut_shared = 0; dc->caps.color.dpp.icsc = 1; dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr dc->caps.color.dpp.dgam_rom_caps.srgb = 1; dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; dc->caps.color.dpp.dgam_rom_caps.pq = 1; dc->caps.color.dpp.dgam_rom_caps.hlg = 1; dc->caps.color.dpp.post_csc = 1; dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; dc->caps.color.dpp.hw_3d_lut = 1; dc->caps.color.dpp.ogam_ram = 1; // no OGAM ROM on DCN3 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.dpp.ogam_rom_caps.pq = 0; dc->caps.color.dpp.ogam_rom_caps.hlg = 0; dc->caps.color.dpp.ocsc = 0; dc->caps.color.mpc.gamut_remap = 1; dc->caps.color.mpc.num_3dluts = pool->res_cap->num_mpc_3dlut; //3 dc->caps.color.mpc.ogam_ram = 1; dc->caps.color.mpc.ogam_rom_caps.srgb = 0; dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ if (ctx->dc_bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; uint8_t is_vbios_lttpr_enable = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; } if (ctx->dc_bios->funcs->get_lttpr_interop) { enum bp_result bp_query_result; uint8_t is_vbios_interop_enabled = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); /************************************************* * Create resources * *************************************************/ /* Clock Sources for Pixel Clock*/ pool->clock_sources[DCN303_CLK_SRC_PLL0] = dcn303_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->clock_sources[DCN303_CLK_SRC_PLL1] = dcn303_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->clk_src_count = DCN303_CLK_SRC_TOTAL; /* todo: not reuse phy_pll registers */ pool->dp_clock_source = dcn303_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->clk_src_count; i++) { if (pool->clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } } /* DCCG */ pool->dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); if (pool->dccg == NULL) { dm_error("DC: failed to create dccg!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* PP Lib and SMU interfaces */ init_soc_bounding_box(dc, pool); /* DML */ dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); /* IRQ */ init_data.ctx = dc->ctx; pool->irqs = dal_irq_service_dcn303_create(&init_data); if (!pool->irqs) goto create_fail; /* HUBBUB */ pool->hubbub = dcn303_hubbub_create(ctx); if (pool->hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto create_fail; } /* HUBPs, DPPs, OPPs and TGs */ for (i = 0; i < pool->pipe_count; i++) { pool->hubps[i] = dcn303_hubp_create(ctx, i); if (pool->hubps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubps!\n"); goto create_fail; } pool->dpps[i] = dcn303_dpp_create(ctx, i); if (pool->dpps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dpps!\n"); goto create_fail; } } for (i = 0; i < pool->res_cap->num_opp; i++) { pool->opps[i] = dcn303_opp_create(ctx, i); if (pool->opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto create_fail; } } for (i = 0; i < pool->res_cap->num_timing_generator; i++) { pool->timing_generators[i] = dcn303_timing_generator_create(ctx, i); if (pool->timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; } } pool->timing_generator_count = i; /* PSR */ pool->psr = dmub_psr_create(ctx); if (pool->psr == NULL) { dm_error("DC: failed to create psr!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* ABM */ for (i = 0; i < pool->res_cap->num_timing_generator; i++) { pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); if (pool->multiple_abms[i] == NULL) { dm_error("DC: failed to create abm for pipe %d!\n", i); BREAK_TO_DEBUGGER(); goto create_fail; } } /* MPC and DSC */ pool->mpc = dcn303_mpc_create(ctx, pool->mpcc_count, pool->res_cap->num_mpc_3dlut); if (pool->mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto create_fail; } for (i = 0; i < pool->res_cap->num_dsc; i++) { pool->dscs[i] = dcn303_dsc_create(ctx, i); if (pool->dscs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create display stream compressor %d!\n", i); goto create_fail; } } /* DWB and MMHUBBUB */ if (!dcn303_dwbc_create(ctx, pool)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dwbc!\n"); goto create_fail; } if (!dcn303_mmhubbub_create(ctx, pool)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mcif_wb!\n"); goto create_fail; } /* AUX and I2C */ for (i = 0; i < pool->res_cap->num_ddc; i++) { pool->engines[i] = dcn303_aux_engine_create(ctx, i); if (pool->engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC:failed to create aux engine!!\n"); goto create_fail; } pool->hw_i2cs[i] = dcn303_i2c_hw_create(ctx, i); if (pool->hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC:failed to create hw i2c!!\n"); goto create_fail; } pool->sw_i2cs[i] = NULL; } /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, pool, &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ dcn303_hw_sequencer_construct(dc); dc->caps.max_planes = pool->pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { ddc_init_data.ctx = dc->ctx; ddc_init_data.link = NULL; ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); } else { pool->oem_device = NULL; } return true; create_fail: dcn303_resource_destruct(pool); return false; } struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) { struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dcn303_resource_construct(init_data->num_virtual_links, dc, pool)) return pool; BREAK_TO_DEBUGGER(); kfree(pool); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "core_types.h" #include "dc_common.h" #include "basics/conversion.h" bool is_rgb_cspace(enum dc_color_space output_color_space) { switch (output_color_space) { case COLOR_SPACE_SRGB: case COLOR_SPACE_SRGB_LIMITED: case COLOR_SPACE_2020_RGB_FULLRANGE: case COLOR_SPACE_2020_RGB_LIMITEDRANGE: case COLOR_SPACE_ADOBERGB: return true; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR601_LIMITED: case COLOR_SPACE_YCBCR709_LIMITED: case COLOR_SPACE_2020_YCBCR: return false; default: /* Add a case to switch */ BREAK_TO_DEBUGGER(); return false; } } bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) return true; if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) return true; return false; } bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) return true; if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) return true; return false; } bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) return true; if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) return true; if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) return true; return false; } void build_prescale_params(struct dc_bias_and_scale *bias_and_scale, const struct dc_plane_state *plane_state) { if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID && plane_state->input_csc_color_matrix.enable_adjustment && plane_state->coeff_reduction_factor.value != 0) { bias_and_scale->scale_blue = fixed_point_to_int_frac( dc_fixpt_mul(plane_state->coeff_reduction_factor, dc_fixpt_from_fraction(256, 255)), 2, 13); bias_and_scale->scale_red = bias_and_scale->scale_blue; bias_and_scale->scale_green = bias_and_scale->scale_blue; } else { bias_and_scale->scale_blue = 0x2000; bias_and_scale->scale_red = 0x2000; bias_and_scale->scale_green = 0x2000; } }
linux-master
drivers/gpu/drm/amd/display/dc/basics/dc_common.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "include/vector.h" bool dal_vector_construct( struct vector *vector, struct dc_context *ctx, uint32_t capacity, uint32_t struct_size) { vector->container = NULL; if (!struct_size || !capacity) { /* Container must be non-zero size*/ BREAK_TO_DEBUGGER(); return false; } vector->container = kcalloc(capacity, struct_size, GFP_KERNEL); if (vector->container == NULL) return false; vector->capacity = capacity; vector->struct_size = struct_size; vector->count = 0; vector->ctx = ctx; return true; } static bool dal_vector_presized_costruct(struct vector *vector, struct dc_context *ctx, uint32_t count, void *initial_value, uint32_t struct_size) { uint32_t i; vector->container = NULL; if (!struct_size || !count) { /* Container must be non-zero size*/ BREAK_TO_DEBUGGER(); return false; } vector->container = kcalloc(count, struct_size, GFP_KERNEL); if (vector->container == NULL) return false; /* If caller didn't supply initial value then the default * of all zeros is expected, which is exactly what dal_alloc() * initialises the memory to. */ if (NULL != initial_value) { for (i = 0; i < count; ++i) memmove( vector->container + i * struct_size, initial_value, struct_size); } vector->capacity = count; vector->struct_size = struct_size; vector->count = count; return true; } struct vector *dal_vector_presized_create( struct dc_context *ctx, uint32_t size, void *initial_value, uint32_t struct_size) { struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL); if (vector == NULL) return NULL; if (dal_vector_presized_costruct( vector, ctx, size, initial_value, struct_size)) return vector; BREAK_TO_DEBUGGER(); kfree(vector); return NULL; } struct vector *dal_vector_create( struct dc_context *ctx, uint32_t capacity, uint32_t struct_size) { struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL); if (vector == NULL) return NULL; if (dal_vector_construct(vector, ctx, capacity, struct_size)) return vector; BREAK_TO_DEBUGGER(); kfree(vector); return NULL; } void dal_vector_destruct( struct vector *vector) { kfree(vector->container); vector->count = 0; vector->capacity = 0; } void dal_vector_destroy( struct vector **vector) { if (vector == NULL || *vector == NULL) return; dal_vector_destruct(*vector); kfree(*vector); *vector = NULL; } uint32_t dal_vector_get_count( const struct vector *vector) { return vector->count; } void *dal_vector_at_index( const struct vector *vector, uint32_t index) { if (vector->container == NULL || index >= vector->count) return NULL; return vector->container + (index * vector->struct_size); } bool dal_vector_remove_at_index( struct vector *vector, uint32_t index) { if (index >= vector->count) return false; if (index != vector->count - 1) memmove( vector->container + (index * vector->struct_size), vector->container + ((index + 1) * vector->struct_size), (vector->count - index - 1) * vector->struct_size); vector->count -= 1; return true; } void dal_vector_set_at_index( const struct vector *vector, const void *what, uint32_t index) { void *where = dal_vector_at_index(vector, index); if (!where) { BREAK_TO_DEBUGGER(); return; } memmove( where, what, vector->struct_size); } static inline uint32_t calc_increased_capacity( uint32_t old_capacity) { return old_capacity * 2; } bool dal_vector_insert_at( struct vector *vector, const void *what, uint32_t position) { uint8_t *insert_address; if (vector->count == vector->capacity) { if (!dal_vector_reserve( vector, calc_increased_capacity(vector->capacity))) return false; } insert_address = vector->container + (vector->struct_size * position); if (vector->count && position < vector->count) memmove( insert_address + vector->struct_size, insert_address, vector->struct_size * (vector->count - position)); memmove( insert_address, what, vector->struct_size); vector->count++; return true; } bool dal_vector_append( struct vector *vector, const void *item) { return dal_vector_insert_at(vector, item, vector->count); } struct vector *dal_vector_clone( const struct vector *vector) { struct vector *vec_cloned; uint32_t count; /* create new vector */ count = dal_vector_get_count(vector); if (count == 0) /* when count is 0 we still want to create clone of the vector */ vec_cloned = dal_vector_create( vector->ctx, vector->capacity, vector->struct_size); else /* Call "presized create" version, independently of how the * original vector was created. * The owner of original vector must know how to treat the new * vector - as "presized" or as "regular". * But from vector point of view it doesn't matter. */ vec_cloned = dal_vector_presized_create(vector->ctx, count, NULL,/* no initial value */ vector->struct_size); if (NULL == vec_cloned) { BREAK_TO_DEBUGGER(); return NULL; } /* copy vector's data */ memmove(vec_cloned->container, vector->container, vec_cloned->struct_size * vec_cloned->capacity); return vec_cloned; } uint32_t dal_vector_capacity(const struct vector *vector) { return vector->capacity; } bool dal_vector_reserve(struct vector *vector, uint32_t capacity) { void *new_container; if (capacity <= vector->capacity) return true; new_container = krealloc(vector->container, capacity * vector->struct_size, GFP_KERNEL); if (new_container) { vector->container = new_container; vector->capacity = capacity; return true; } return false; } void dal_vector_clear(struct vector *vector) { vector->count = 0; }
linux-master
drivers/gpu/drm/amd/display/dc/basics/vector.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "include/fixed31_32.h" static const struct fixed31_32 dc_fixpt_two_pi = { 26986075409LL }; static const struct fixed31_32 dc_fixpt_ln2 = { 2977044471LL }; static const struct fixed31_32 dc_fixpt_ln2_div_2 = { 1488522236LL }; static inline unsigned long long abs_i64( long long arg) { if (arg > 0) return (unsigned long long)arg; else return (unsigned long long)(-arg); } /* * @brief * result = dividend / divisor * *remainder = dividend % divisor */ static inline unsigned long long complete_integer_division_u64( unsigned long long dividend, unsigned long long divisor, unsigned long long *remainder) { unsigned long long result; ASSERT(divisor); result = div64_u64_rem(dividend, divisor, remainder); return result; } #define FRACTIONAL_PART_MASK \ ((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1) #define GET_INTEGER_PART(x) \ ((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART) #define GET_FRACTIONAL_PART(x) \ (FRACTIONAL_PART_MASK & (x)) struct fixed31_32 dc_fixpt_from_fraction(long long numerator, long long denominator) { struct fixed31_32 res; bool arg1_negative = numerator < 0; bool arg2_negative = denominator < 0; unsigned long long arg1_value = arg1_negative ? -numerator : numerator; unsigned long long arg2_value = arg2_negative ? -denominator : denominator; unsigned long long remainder; /* determine integer part */ unsigned long long res_value = complete_integer_division_u64( arg1_value, arg2_value, &remainder); ASSERT(res_value <= LONG_MAX); /* determine fractional part */ { unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART; do { remainder <<= 1; res_value <<= 1; if (remainder >= arg2_value) { res_value |= 1; remainder -= arg2_value; } } while (--i != 0); } /* round up LSB */ { unsigned long long summand = (remainder << 1) >= arg2_value; ASSERT(res_value <= LLONG_MAX - summand); res_value += summand; } res.value = (long long)res_value; if (arg1_negative ^ arg2_negative) res.value = -res.value; return res; } struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2) { struct fixed31_32 res; bool arg1_negative = arg1.value < 0; bool arg2_negative = arg2.value < 0; unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value; unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value; unsigned long long arg1_int = GET_INTEGER_PART(arg1_value); unsigned long long arg2_int = GET_INTEGER_PART(arg2_value); unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value); unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value); unsigned long long tmp; res.value = arg1_int * arg2_int; ASSERT(res.value <= LONG_MAX); res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; tmp = arg1_int * arg2_fra; ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg2_int * arg1_fra; ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg1_fra * arg2_fra; tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + (tmp >= (unsigned long long)dc_fixpt_half.value); ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; if (arg1_negative ^ arg2_negative) res.value = -res.value; return res; } struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg) { struct fixed31_32 res; unsigned long long arg_value = abs_i64(arg.value); unsigned long long arg_int = GET_INTEGER_PART(arg_value); unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value); unsigned long long tmp; res.value = arg_int * arg_int; ASSERT(res.value <= LONG_MAX); res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; tmp = arg_int * arg_fra; ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg_fra * arg_fra; tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + (tmp >= (unsigned long long)dc_fixpt_half.value); ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; return res; } struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg) { /* * @note * Good idea to use Newton's method */ ASSERT(arg.value); return dc_fixpt_from_fraction( dc_fixpt_one.value, arg.value); } struct fixed31_32 dc_fixpt_sinc(struct fixed31_32 arg) { struct fixed31_32 square; struct fixed31_32 res = dc_fixpt_one; int n = 27; struct fixed31_32 arg_norm = arg; if (dc_fixpt_le( dc_fixpt_two_pi, dc_fixpt_abs(arg))) { arg_norm = dc_fixpt_sub( arg_norm, dc_fixpt_mul_int( dc_fixpt_two_pi, (int)div64_s64( arg_norm.value, dc_fixpt_two_pi.value))); } square = dc_fixpt_sqr(arg_norm); do { res = dc_fixpt_sub( dc_fixpt_one, dc_fixpt_div_int( dc_fixpt_mul( square, res), n * (n - 1))); n -= 2; } while (n > 2); if (arg.value != arg_norm.value) res = dc_fixpt_div( dc_fixpt_mul(res, arg_norm), arg); return res; } struct fixed31_32 dc_fixpt_sin(struct fixed31_32 arg) { return dc_fixpt_mul( arg, dc_fixpt_sinc(arg)); } struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg) { /* TODO implement argument normalization */ const struct fixed31_32 square = dc_fixpt_sqr(arg); struct fixed31_32 res = dc_fixpt_one; int n = 26; do { res = dc_fixpt_sub( dc_fixpt_one, dc_fixpt_div_int( dc_fixpt_mul( square, res), n * (n - 1))); n -= 2; } while (n != 0); return res; } /* * @brief * result = exp(arg), * where abs(arg) < 1 * * Calculated as Taylor series. */ static struct fixed31_32 fixed31_32_exp_from_taylor_series(struct fixed31_32 arg) { unsigned int n = 9; struct fixed31_32 res = dc_fixpt_from_fraction( n + 2, n + 1); /* TODO find correct res */ ASSERT(dc_fixpt_lt(arg, dc_fixpt_one)); do res = dc_fixpt_add( dc_fixpt_one, dc_fixpt_div_int( dc_fixpt_mul( arg, res), n)); while (--n != 1); return dc_fixpt_add( dc_fixpt_one, dc_fixpt_mul( arg, res)); } struct fixed31_32 dc_fixpt_exp(struct fixed31_32 arg) { /* * @brief * Main equation is: * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r), * where m = round(x / ln(2)), r = x - m * ln(2) */ if (dc_fixpt_le( dc_fixpt_ln2_div_2, dc_fixpt_abs(arg))) { int m = dc_fixpt_round( dc_fixpt_div( arg, dc_fixpt_ln2)); struct fixed31_32 r = dc_fixpt_sub( arg, dc_fixpt_mul_int( dc_fixpt_ln2, m)); ASSERT(m != 0); ASSERT(dc_fixpt_lt( dc_fixpt_abs(r), dc_fixpt_one)); if (m > 0) return dc_fixpt_shl( fixed31_32_exp_from_taylor_series(r), (unsigned char)m); else return dc_fixpt_div_int( fixed31_32_exp_from_taylor_series(r), 1LL << -m); } else if (arg.value != 0) return fixed31_32_exp_from_taylor_series(arg); else return dc_fixpt_one; } struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg) { struct fixed31_32 res = dc_fixpt_neg(dc_fixpt_one); /* TODO improve 1st estimation */ struct fixed31_32 error; ASSERT(arg.value > 0); /* TODO if arg is negative, return NaN */ /* TODO if arg is zero, return -INF */ do { struct fixed31_32 res1 = dc_fixpt_add( dc_fixpt_sub( res, dc_fixpt_one), dc_fixpt_div( arg, dc_fixpt_exp(res))); error = dc_fixpt_sub( res, res1); res = res1; /* TODO determine max_allowed_error based on quality of exp() */ } while (abs_i64(error.value) > 100ULL); return res; } /* this function is a generic helper to translate fixed point value to * specified integer format that will consist of integer_bits integer part and * fractional_bits fractional part. For example it is used in * dc_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional * part in 32 bits. It is used in hw programming (scaler) */ static inline unsigned int ux_dy( long long value, unsigned int integer_bits, unsigned int fractional_bits) { /* 1. create mask of integer part */ unsigned int result = (1 << integer_bits) - 1; /* 2. mask out fractional part */ unsigned int fractional_part = FRACTIONAL_PART_MASK & value; /* 3. shrink fixed point integer part to be of integer_bits width*/ result &= GET_INTEGER_PART(value); /* 4. make space for fractional part to be filled in after integer */ result <<= fractional_bits; /* 5. shrink fixed point fractional part to of fractional_bits width*/ fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits; /* 6. merge the result */ return result | fractional_part; } static inline unsigned int clamp_ux_dy( long long value, unsigned int integer_bits, unsigned int fractional_bits, unsigned int min_clamp) { unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits); if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART))) return (1 << (integer_bits + fractional_bits)) - 1; else if (truncated_val > min_clamp) return truncated_val; else return min_clamp; } unsigned int dc_fixpt_u4d19(struct fixed31_32 arg) { return ux_dy(arg.value, 4, 19); } unsigned int dc_fixpt_u3d19(struct fixed31_32 arg) { return ux_dy(arg.value, 3, 19); } unsigned int dc_fixpt_u2d19(struct fixed31_32 arg) { return ux_dy(arg.value, 2, 19); } unsigned int dc_fixpt_u0d19(struct fixed31_32 arg) { return ux_dy(arg.value, 0, 19); } unsigned int dc_fixpt_clamp_u0d14(struct fixed31_32 arg) { return clamp_ux_dy(arg.value, 0, 14, 1); } unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg) { return clamp_ux_dy(arg.value, 0, 10, 1); } int dc_fixpt_s4d19(struct fixed31_32 arg) { if (arg.value < 0) return -(int)ux_dy(dc_fixpt_abs(arg).value, 4, 19); else return ux_dy(arg.value, 4, 19); }
linux-master
drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "basics/conversion.h" #define DIVIDER 10000 /* S2D13 value in [-3.00...0.9999] */ #define S2D13_MIN (-3 * DIVIDER) #define S2D13_MAX (3 * DIVIDER) uint16_t fixed_point_to_int_frac( struct fixed31_32 arg, uint8_t integer_bits, uint8_t fractional_bits) { int32_t numerator; int32_t divisor = 1 << fractional_bits; uint16_t result; uint16_t d = (uint16_t)dc_fixpt_floor( dc_fixpt_abs( arg)); if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor)) numerator = (uint16_t)dc_fixpt_round( dc_fixpt_mul_int( arg, divisor)); else { numerator = dc_fixpt_floor( dc_fixpt_sub( dc_fixpt_from_int( 1LL << integer_bits), dc_fixpt_recip( dc_fixpt_from_int( divisor)))); } if (numerator >= 0) result = (uint16_t)numerator; else result = (uint16_t)( (1 << (integer_bits + fractional_bits + 1)) + numerator); if ((result != 0) && dc_fixpt_lt( arg, dc_fixpt_zero)) result |= 1 << (integer_bits + fractional_bits); return result; } /* * convert_float_matrix - This converts a double into HW register spec defined format S2D13. */ void convert_float_matrix( uint16_t *matrix, struct fixed31_32 *flt, uint32_t buffer_size) { const struct fixed31_32 min_2_13 = dc_fixpt_from_fraction(S2D13_MIN, DIVIDER); const struct fixed31_32 max_2_13 = dc_fixpt_from_fraction(S2D13_MAX, DIVIDER); uint32_t i; for (i = 0; i < buffer_size; ++i) { uint32_t reg_value = fixed_point_to_int_frac( dc_fixpt_clamp( flt[i], min_2_13, max_2_13), 2, 13); matrix[i] = (uint16_t)reg_value; } } static uint32_t find_gcd(uint32_t a, uint32_t b) { uint32_t remainder = 0; while (b != 0) { remainder = a % b; a = b; b = remainder; } return a; } void reduce_fraction(uint32_t num, uint32_t den, uint32_t *out_num, uint32_t *out_den) { uint32_t gcd = 0; gcd = find_gcd(num, den); *out_num = num / gcd; *out_den = den / gcd; }
linux-master
drivers/gpu/drm/amd/display/dc/basics/conversion.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "core_types.h" #include "dce80_hw_sequencer.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dce100/dce100_hw_sequencer.h" /* include DCE8 register header files */ #include "dce/dce_8_0_d.h" #include "dce/dce_8_0_sh_mask.h" /******************************************************************************* * Private definitions ******************************************************************************/ /***************************PIPE_CONTROL***********************************/ void dce80_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.pipe_control_lock = dce_pipe_control_lock; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; }
linux-master
drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" /* include DCE8 register header files */ #include "dce/dce_8_0_d.h" #include "dce/dce_8_0_sh_mask.h" #include "dc_types.h" #include "include/grph_object_id.h" #include "include/logger_interface.h" #include "../dce110/dce110_timing_generator.h" #include "dce80_timing_generator.h" #include "timing_generator.h" enum black_color_format { BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, /* used as index in array */ BLACK_COLOR_FORMAT_RGB_LIMITED, BLACK_COLOR_FORMAT_YUV_TV, BLACK_COLOR_FORMAT_YUV_CV, BLACK_COLOR_FORMAT_YUV_SUPER_AA, BLACK_COLOR_FORMAT_COUNT }; static const struct dce110_timing_generator_offsets reg_offsets[] = { { .crtc = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), }, { .crtc = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), .dcp = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), } }; #define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10 #define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1) #define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1) #define CRTC_REG(reg) (reg + tg110->offsets.crtc) #define DCP_REG(reg) (reg + tg110->offsets.dcp) #define DMIF_REG(reg) (reg + tg110->offsets.dmif) static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz) { uint64_t pix_dur; uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 + DCE110TG_FROM_TG(tg)->offsets.dmif; uint32_t value = dm_read_reg(tg->ctx, addr); if (pix_clk_100hz == 0) return; pix_dur = div_u64(10000000000ull, pix_clk_100hz); set_reg_field_value( value, pix_dur, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION); dm_write_reg(tg->ctx, addr, value); } static void program_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing, int vready_offset, int vstartup_start, int vupdate_offset, int vupdate_width, const enum signal_type signal, bool use_vbios) { if (!use_vbios) program_pix_dur(tg, timing->pix_clk_100hz); dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios); } static void dce80_timing_generator_enable_advanced_request( struct timing_generator *tg, bool enable, const struct dc_crtc_timing *timing) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL); uint32_t value = dm_read_reg(tg->ctx, addr); if (enable) { set_reg_field_value( value, 0, CRTC_START_LINE_CONTROL, CRTC_LEGACY_REQUESTOR_EN); } else { set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_LEGACY_REQUESTOR_EN); } if ((timing->v_sync_width + timing->v_front_porch) <= 3) { set_reg_field_value( value, 3, CRTC_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); set_reg_field_value( value, 0, CRTC_START_LINE_CONTROL, CRTC_PREFETCH_EN); } else { set_reg_field_value( value, 4, CRTC_START_LINE_CONTROL, CRTC_ADVANCED_START_LINE_POSITION); set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_PREFETCH_EN); } set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_PROGRESSIVE_START_LINE_EARLY); set_reg_field_value( value, 1, CRTC_START_LINE_CONTROL, CRTC_INTERLACE_START_LINE_EARLY); dm_write_reg(tg->ctx, addr, value); } static const struct timing_generator_funcs dce80_tg_funcs = { .validate_timing = dce110_tg_validate_timing, .program_timing = program_timing, .enable_crtc = dce110_timing_generator_enable_crtc, .disable_crtc = dce110_timing_generator_disable_crtc, .is_counter_moving = dce110_timing_generator_is_counter_moving, .get_position = dce110_timing_generator_get_position, .get_frame_count = dce110_timing_generator_get_vblank_counter, .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos, .set_early_control = dce110_timing_generator_set_early_control, .wait_for_state = dce110_tg_wait_for_state, .set_blank = dce110_tg_set_blank, .is_blanked = dce110_tg_is_blanked, .set_colors = dce110_tg_set_colors, .set_overscan_blank_color = dce110_timing_generator_set_overscan_color_black, .set_blank_color = dce110_timing_generator_program_blank_color, .disable_vga = dce110_timing_generator_disable_vga, .did_triggered_reset_occur = dce110_timing_generator_did_triggered_reset_occur, .setup_global_swap_lock = dce110_timing_generator_setup_global_swap_lock, .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger, .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger, .tear_down_global_swap_lock = dce110_timing_generator_tear_down_global_swap_lock, .set_drr = dce110_timing_generator_set_drr, .get_last_used_drr_vtotal = NULL, .set_static_screen_control = dce110_timing_generator_set_static_screen_control, .set_test_pattern = dce110_timing_generator_set_test_pattern, .arm_vert_intr = dce110_arm_vert_intr, /* DCE8.0 overrides */ .enable_advanced_request = dce80_timing_generator_enable_advanced_request, .configure_crc = dce110_configure_crc, .get_crc = dce110_get_crc, }; void dce80_timing_generator_construct( struct dce110_timing_generator *tg110, struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { tg110->controller_id = CONTROLLER_ID_D0 + instance; tg110->base.inst = instance; tg110->offsets = *offsets; tg110->derived_offsets = reg_offsets[instance]; tg110->base.funcs = &dce80_tg_funcs; tg110->base.ctx = ctx; tg110->base.bp = ctx->dc_bios; tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1; tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1; tg110->min_h_blank = 56; tg110->min_h_front_porch = 4; tg110->min_h_back_porch = 4; }
linux-master
drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce/dce_8_0_d.h" #include "dce/dce_8_0_sh_mask.h" #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "irq/dce80/irq_service_dce80.h" #include "dce110/dce110_timing_generator.h" #include "dce110/dce110_resource.h" #include "dce80/dce80_timing_generator.h" #include "dce/dce_mem_input.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "dce80/dce80_hw_sequencer.h" #include "dce100/dce100_resource.h" #include "dce/dce_panel_cntl.h" #include "reg_helper.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" /* TODO remove this include */ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" #include "gmc/gmc_7_1_sh_mask.h" #endif #include "dce80/dce80_resource.h" #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x4EDE #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x1CCE #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE #define mmDP6_DP_DPHY_FAST_TRAINING 0x4ECE #endif #ifndef mmHPD_DC_HPD_CONTROL #define mmHPD_DC_HPD_CONTROL 0x189A #define mmHPD0_DC_HPD_CONTROL 0x189A #define mmHPD1_DC_HPD_CONTROL 0x18A2 #define mmHPD2_DC_HPD_CONTROL 0x18AA #define mmHPD3_DC_HPD_CONTROL 0x18B2 #define mmHPD4_DC_HPD_CONTROL 0x18BA #define mmHPD5_DC_HPD_CONTROL 0x18C2 #endif #define DCE11_DIG_FE_CNTL 0x4a00 #define DCE11_DIG_BE_CNTL 0x4a47 #define DCE11_DP_SEC 0x4ac3 static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name #define ipp_regs(id)\ [id] = {\ IPP_COMMON_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE80(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE80(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE80(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE80_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST_DCE_BASE(id),\ .AFMT_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), stream_enc_regs(6) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) }; static const struct dce_panel_cntl_registers panel_cntl_regs[] = { { DCE_PANEL_CNTL_REG_LIST() } }; static const struct dce_panel_cntl_shift panel_cntl_shift = { DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dce_panel_cntl_mask panel_cntl_mask = { DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_80_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_80(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) }; static const struct dce110_aux_registers_shift aux_shift = { DCE10_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCE10_AUX_MASK_SH_LIST(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_80(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, }; static const struct resource_caps res_cap_81 = { .num_timing_generator = 4, .num_audio = 7, .num_stream_encoder = 7, .num_pll = 3, .num_ddc = 6, }; static const struct resource_caps res_cap_83 = { .num_timing_generator = 2, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 2, .num_ddc = 2, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults = { .enable_legacy_fast_update = true, }; static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE80_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE80(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE80(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x1918 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static int map_transmitter_id_to_phy_instance( enum transmitter transmitter) { switch (transmitter) { case TRANSMITTER_UNIPHY_A: return 0; case TRANSMITTER_UNIPHY_B: return 1; case TRANSMITTER_UNIPHY_C: return 2; case TRANSMITTER_UNIPHY_D: return 3; case TRANSMITTER_UNIPHY_E: return 4; case TRANSMITTER_UNIPHY_F: return 5; case TRANSMITTER_UNIPHY_G: return 6; default: ASSERT(0); return 0; } } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce80_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce80_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct output_pixel_processor *dce80_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } static struct dce_aux *dce80_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static struct dce_i2c_hw *dce80_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct dce_i2c_sw *dce80_i2c_sw_create( struct dc_context *ctx) { struct dce_i2c_sw *dce_i2c_sw = kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL); if (!dce_i2c_sw) return NULL; dce_i2c_sw_construct(dce_i2c_sw, ctx); return dce_i2c_sw; } static struct stream_encoder *dce80_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE8_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE8_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE8_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce80_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce80_stream_encoder_create, .create_hwseq = dce80_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE8_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE8_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE8_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce80_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 2; return &dce_mi->base; } static void dce80_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce80_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); transform->prescaler_on = false; return &transform->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 297000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; static struct link_encoder *dce80_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; if (!enc110) return NULL; link_regs_id = map_transmitter_id_to_phy_instance(enc_init_data->transmitter); dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } static struct clock_source *dce80_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static void dce80_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static struct input_pixel_processor *dce80_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static void dce80_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce80_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dce80_clock_source_destroy(&pool->base.clock_sources[i]); } } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.dp_clock_source != NULL) dce80_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) { dce_aud_destroy(&pool->base.audios[i]); } } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } static bool dce80_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { int i; bool at_least_one_pipe = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].stream) at_least_one_pipe = true; } if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ context->bw_ctx.bw.dce.dispclk_khz = 681000; context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { context->bw_ctx.bw.dce.dispclk_khz = 0; context->bw_ctx.bw.dce.yclk_khz = 0; } return true; } static bool dce80_validate_surface_sets( struct dc_state *context) { int i; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 1) return false; if (context->stream_status[i].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; } return true; } static enum dc_status dce80_validate_global( struct dc *dc, struct dc_state *context) { if (!dce80_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static void dce80_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); dce80_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } static const struct resource_funcs dce80_res_pool_funcs = { .destroy = dce80_destroy_resource_pool, .link_enc_create = dce80_link_encoder_create, .panel_cntl_create = dce80_panel_cntl_create, .validate_bandwidth = dce80_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, .validate_global = dce80_validate_global, .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; static bool dce80_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce80_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = res_cap.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.i2c_speed_in_khz_hdcp = 40; dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[2] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 3; } else { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce80_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce80_timing_generator_create( ctx, i, &dce80_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce80_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce80_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce80_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce80_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce80_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce80_hw_sequencer_construct(dc); return true; res_create_fail: dce80_resource_destruct(pool); return false; } struct resource_pool *dce80_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce80_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } static bool dce81_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_81; pool->base.funcs = &dce80_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap_81.num_timing_generator; pool->base.timing_generator_count = res_cap_81.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.i2c_speed_in_khz_hdcp = 40; dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[2] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 3; } else { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce80_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce80_timing_generator_create( ctx, i, &dce80_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce80_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce80_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce80_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce80_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce80_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce80_hw_sequencer_construct(dc); return true; res_create_fail: dce80_resource_destruct(pool); return false; } struct resource_pool *dce81_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce81_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } static bool dce83_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_83; pool->base.funcs = &dce80_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap_83.num_timing_generator; pool->base.timing_generator_count = res_cap_83.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.i2c_speed_in_khz_hdcp = 40; dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->debug = debug_defaults; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); pool->base.clk_src_count = 1; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce80_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce80_timing_generator_create( ctx, i, &dce80_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce80_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce80_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce80_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce80_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce80_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce80_hw_sequencer_construct(dc); return true; res_create_fail: dce80_resource_destruct(pool); return false; } struct resource_pool *dce83_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce83_construct(num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn302_hwseq.h" #include "dce/dce_hwseq.h" #include "reg_helper.h" #include "dc.h" #define DC_LOGGER_INIT(logger) #define CTX \ hws->ctx #define REG(reg)\ hws->regs->reg #undef FN #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name void dcn302_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; if (hws->ctx->dc->debug.disable_dpp_power_gate) return; if (REG(DOMAIN1_PG_CONFIG) == 0) return; switch (dpp_inst) { case 0: /* DPP0 */ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_GATE, power_gate); REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 1: /* DPP1 */ REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_GATE, power_gate); REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 2: /* DPP2 */ REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_GATE, power_gate); REG_WAIT(DOMAIN5_PG_STATUS, DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 3: /* DPP3 */ REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_GATE, power_gate); REG_WAIT(DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 4: /* DPP4 */ REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_GATE, power_gate); REG_WAIT(DOMAIN9_PG_STATUS, DOMAIN9_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; default: BREAK_TO_DEBUGGER(); break; } } void dcn302_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; if (hws->ctx->dc->debug.disable_hubp_power_gate) return; if (REG(DOMAIN0_PG_CONFIG) == 0) return; switch (hubp_inst) { case 0: /* DCHUBP0 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_GATE, power_gate); REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 1: /* DCHUBP1 */ REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_GATE, power_gate); REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 2: /* DCHUBP2 */ REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_GATE, power_gate); REG_WAIT(DOMAIN4_PG_STATUS, DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 3: /* DCHUBP3 */ REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_GATE, power_gate); REG_WAIT(DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 4: /* DCHUBP4 */ REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_GATE, power_gate); REG_WAIT(DOMAIN8_PG_STATUS, DOMAIN8_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; default: BREAK_TO_DEBUGGER(); break; } } void dcn302_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; uint32_t org_ip_request_cntl = 0; if (hws->ctx->dc->debug.disable_dsc_power_gate) return; if (REG(DOMAIN16_PG_CONFIG) == 0) return; REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); switch (dsc_inst) { case 0: /* DSC0 */ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_GATE, power_gate); REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN16_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 1: /* DSC1 */ REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_GATE, power_gate); REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 2: /* DSC2 */ REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_GATE, power_gate); REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 3: /* DSC3 */ REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_GATE, power_gate); REG_WAIT(DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 4: /* DSC4 */ REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_GATE, power_gate); REG_WAIT(DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; default: BREAK_TO_DEBUGGER(); break; } if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); }
linux-master
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_hwseq.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn302_init.h" #include "dcn302_resource.h" #include "dcn302_dccg.h" #include "irq/dcn302/irq_service_dcn302.h" #include "dcn30/dcn30_dio_link_encoder.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn30/dcn30_dwb.h" #include "dcn30/dcn30_dpp.h" #include "dcn30/dcn30_hubbub.h" #include "dcn30/dcn30_hubp.h" #include "dcn30/dcn30_mmhubbub.h" #include "dcn30/dcn30_mpc.h" #include "dcn30/dcn30_opp.h" #include "dcn30/dcn30_optc.h" #include "dcn30/dcn30_resource.h" #include "dcn20/dcn20_dsc.h" #include "dcn20/dcn20_resource.h" #include "dml/dcn30/dcn30_fpu.h" #include "dcn10/dcn10_resource.h" #include "link.h" #include "dce/dce_abm.h" #include "dce/dce_audio.h" #include "dce/dce_aux.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" #include "dce/dce_i2c_hw.h" #include "dce/dce_panel_cntl.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" #include "clk_mgr.h" #include "hw_sequencer_private.h" #include "reg_helper.h" #include "resource.h" #include "vm_helper.h" #include "dml/dcn302/dcn302_fpu.h" #include "dimgrey_cavefish_ip_offset.h" #include "dcn/dcn_3_0_2_offset.h" #include "dcn/dcn_3_0_2_sh_mask.h" #include "dpcs/dpcs_3_0_0_offset.h" #include "dpcs/dpcs_3_0_0_sh_mask.h" #include "nbio/nbio_7_4_offset.h" #include "amdgpu_socbb.h" #define DC_LOGGER_INIT(logger) static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, .max_downscale_src_width = 7680,/*upto 8K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, .enable_legacy_fast_update = false, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, .disallow_replay = false, }, }; enum dcn302_clk_src_array_id { DCN302_CLK_SRC_PLL0, DCN302_CLK_SRC_PLL1, DCN302_CLK_SRC_PLL2, DCN302_CLK_SRC_PLL3, DCN302_CLK_SRC_PLL4, DCN302_CLK_SRC_TOTAL }; static const struct resource_caps res_cap_dcn302 = { .num_timing_generator = 5, .num_opp = 5, .num_video_plane = 5, .num_audio = 5, .num_stream_encoder = 5, .num_dwb = 1, .num_ddc = 5, .num_vmid = 16, .num_mpc_3dlut = 2, .num_dsc = 5, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true, .p010 = true, .ayuv = false, }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 16000 }, /* 6:1 downscaling ratio: 1000/6 = 166.666 */ .max_downscale_factor = { .argb8888 = 167, .nv12 = 167, .fp16 = 167 }, 16, 16 }; /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIO_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* DCN */ #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg #define BASE(seg) BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name #define SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name #define SRI2(reg_name, block, id)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define DCCG_SRII(reg_name, block, id)\ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define VUPDATE_SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ mm ## reg_name ## _ ## block ## id #define SRII_DWB(reg_name, temp_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## temp_name #define SF_DWB2(reg_name, block, id, field_name, post_fix) \ .field_name = reg_name ## __ ## field_name ## post_fix #define SRII_MPC_RMU(reg_name, block, id)\ .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN30(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN30(_MASK) }; #define vmid_regs(id)\ [id] = { DCN20_VMID_REG_LIST(id) } static const struct dcn_vmid_registers vmid_regs[] = { vmid_regs(0), vmid_regs(1), vmid_regs(2), vmid_regs(3), vmid_regs(4), vmid_regs(5), vmid_regs(6), vmid_regs(7), vmid_regs(8), vmid_regs(9), vmid_regs(10), vmid_regs(11), vmid_regs(12), vmid_regs(13), vmid_regs(14), vmid_regs(15) }; static const struct dcn20_vmid_shift vmid_shifts = { DCN20_VMID_MASK_SH_LIST(__SHIFT) }; static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; static struct hubbub *dcn302_hubbub_create(struct dc_context *ctx) { int i; struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub3) return NULL; hubbub3_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); for (i = 0; i < res_cap_dcn302.num_vmid; i++) { struct dcn20_vmid *vmid = &hubbub3->vmid[i]; vmid->ctx = ctx; vmid->regs = &vmid_regs[i]; vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } return &hubbub3->base; } #define vpg_regs(id)\ [id] = { VPG_DCN3_REG_LIST(id) } static const struct dcn30_vpg_registers vpg_regs[] = { vpg_regs(0), vpg_regs(1), vpg_regs(2), vpg_regs(3), vpg_regs(4), vpg_regs(5) }; static const struct dcn30_vpg_shift vpg_shift = { DCN3_VPG_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_vpg_mask vpg_mask = { DCN3_VPG_MASK_SH_LIST(_MASK) }; static struct vpg *dcn302_vpg_create(struct dc_context *ctx, uint32_t inst) { struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); if (!vpg3) return NULL; vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); return &vpg3->base; } #define afmt_regs(id)\ [id] = { AFMT_DCN3_REG_LIST(id) } static const struct dcn30_afmt_registers afmt_regs[] = { afmt_regs(0), afmt_regs(1), afmt_regs(2), afmt_regs(3), afmt_regs(4), afmt_regs(5) }; static const struct dcn30_afmt_shift afmt_shift = { DCN3_AFMT_MASK_SH_LIST(__SHIFT) }; static const struct dcn30_afmt_mask afmt_mask = { DCN3_AFMT_MASK_SH_LIST(_MASK) }; static struct afmt *dcn302_afmt_create(struct dc_context *ctx, uint32_t inst) { struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); if (!afmt3) return NULL; afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); return &afmt3->base; } #define audio_regs(id)\ [id] = { AUD_COMMON_REG_LIST(id) } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6) }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; static struct audio *dcn302_create_audio(struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } #define stream_enc_regs(id)\ [id] = { SE_DCN3_REG_LIST(id) } static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4) }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static struct stream_encoder *dcn302_stream_encoder_create(enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; struct afmt *afmt; int vpg_inst; int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ if (eng_id <= ENGINE_ID_DIGE) { vpg_inst = eng_id; afmt_inst = eng_id; } else return NULL; enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn302_vpg_create(ctx, vpg_inst); afmt = dcn302_afmt_create(ctx, afmt_inst); if (!enc1 || !vpg || !afmt) { kfree(enc1); kfree(vpg); kfree(afmt); return NULL; } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } #define clk_src_regs(index, pllid)\ [index] = { CS_COMMON_REG_LIST_DCN3_02(index, pllid) } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; static struct clock_source *dcn302_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn3_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN302_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN302_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN302_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dcn302_hwseq_create(struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } #define hubp_regs(id)\ [id] = { HUBP_REG_LIST_DCN30(id) } static const struct dcn_hubp2_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1), hubp_regs(2), hubp_regs(3), hubp_regs(4) }; static const struct dcn_hubp2_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn_hubp2_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN30(_MASK) }; static struct hubp *dcn302_hubp_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; if (hubp3_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) return &hubp2->base; BREAK_TO_DEBUGGER(); kfree(hubp2); return NULL; } #define dpp_regs(id)\ [id] = { DPP_REG_LIST_DCN30(id) } static const struct dcn3_dpp_registers dpp_regs[] = { dpp_regs(0), dpp_regs(1), dpp_regs(2), dpp_regs(3), dpp_regs(4) }; static const struct dcn3_dpp_shift tf_shift = { DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) }; static const struct dcn3_dpp_mask tf_mask = { DPP_REG_LIST_SH_MASK_DCN30(_MASK) }; static struct dpp *dcn302_dpp_create(struct dc_context *ctx, uint32_t inst) { struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); if (!dpp) return NULL; if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) return &dpp->base; BREAK_TO_DEBUGGER(); kfree(dpp); return NULL; } #define opp_regs(id)\ [id] = { OPP_REG_LIST_DCN30(id) } static const struct dcn20_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4) }; static const struct dcn20_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn20_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN20(_MASK) }; static struct output_pixel_processor *dcn302_opp_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } #define optc_regs(id)\ [id] = { OPTC_COMMON_REG_LIST_DCN3_0(id) } static const struct dcn_optc_registers optc_regs[] = { optc_regs(0), optc_regs(1), optc_regs(2), optc_regs(3), optc_regs(4) }; static const struct dcn_optc_shift optc_shift = { OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn_optc_mask optc_mask = { OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static struct timing_generator *dcn302_timing_generator_create(struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &optc_regs[instance]; tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; dcn30_timing_generator_init(tgn10); return &tgn10->base; } static const struct dcn30_mpc_registers mpc_regs = { MPC_REG_LIST_DCN3_0(0), MPC_REG_LIST_DCN3_0(1), MPC_REG_LIST_DCN3_0(2), MPC_REG_LIST_DCN3_0(3), MPC_REG_LIST_DCN3_0(4), MPC_OUT_MUX_REG_LIST_DCN3_0(0), MPC_OUT_MUX_REG_LIST_DCN3_0(1), MPC_OUT_MUX_REG_LIST_DCN3_0(2), MPC_OUT_MUX_REG_LIST_DCN3_0(3), MPC_OUT_MUX_REG_LIST_DCN3_0(4), MPC_RMU_GLOBAL_REG_LIST_DCN3AG, MPC_RMU_REG_LIST_DCN3AG(0), MPC_RMU_REG_LIST_DCN3AG(1), MPC_RMU_REG_LIST_DCN3AG(2), MPC_DWB_MUX_REG_LIST_DCN3_0(0), }; static const struct dcn30_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static struct mpc *dcn302_mpc_create(struct dc_context *ctx, int num_mpcc, int num_rmu) { struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); if (!mpc30) return NULL; dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); return &mpc30->base; } #define dsc_regsDCN20(id)\ [id] = { DSC_REG_LIST_DCN20(id) } static const struct dcn20_dsc_registers dsc_regs[] = { dsc_regsDCN20(0), dsc_regsDCN20(1), dsc_regsDCN20(2), dsc_regsDCN20(3), dsc_regsDCN20(4) }; static const struct dcn20_dsc_shift dsc_shift = { DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; static struct display_stream_compressor *dcn302_dsc_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); return NULL; } dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } #define dwbc_regs_dcn3(id)\ [id] = { DWBC_COMMON_REG_LIST_DCN30(id) } static const struct dcn30_dwbc_registers dwbc30_regs[] = { dwbc_regs_dcn3(0) }; static const struct dcn30_dwbc_shift dwbc30_shift = { DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_dwbc_mask dwbc30_mask = { DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static bool dcn302_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; for (i = 0; i < pipe_count; i++) { struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); if (!dwbc30) { dm_error("DC: failed to create dwbc30!\n"); return false; } dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); pool->dwbc[i] = &dwbc30->base; } return true; } #define mcif_wb_regs_dcn3(id)\ [id] = { MCIF_WB_COMMON_REG_LIST_DCN30(id) } static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { mcif_wb_regs_dcn3(0) }; static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static bool dcn302_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; for (i = 0; i < pipe_count; i++) { struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); if (!mcif_wb30) { dm_error("DC: failed to create mcif_wb30!\n"); return false; } dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); pool->mcif_wb[i] = &mcif_wb30->base; } return true; } #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST0(id), \ .AUXN_IMPCAL = 0, \ .AUXP_IMPCAL = 0, \ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4) }; static const struct dce110_aux_registers_shift aux_shift = { DCN_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCN_AUX_MASK_SH_LIST(_MASK) }; static struct dce_aux *dcn302_aux_engine_create(struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5) }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; static struct dce_i2c_hw *dcn302_i2c_hw_create(struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; #define link_regs(id, phyid)\ [id] = {\ LE_DCN3_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ DPCS_DCN2_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0, A), link_regs(1, B), link_regs(2, C), link_regs(3, D), link_regs(4, E) }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT), DPCS_DCN2_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK), DPCS_DCN2_MASK_SH_LIST(_MASK) }; #define aux_regs(id)\ [id] = { DCN2_AUX_REG_LIST(id) } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4) }; #define hpd_regs(id)\ [id] = { HPD_REG_LIST(id) } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4) }; static struct link_encoder *dcn302_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc20->enc10.base; } static const struct dce_panel_cntl_registers panel_cntl_regs[] = { { DCN_PANEL_CNTL_REG_LIST() } }; static const struct dce_panel_cntl_shift panel_cntl_shift = { DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dce_panel_cntl_mask panel_cntl_mask = { DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) }; static struct panel_cntl *dcn302_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } static void read_dce_straps(struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn302_create_audio, .create_stream_encoder = dcn302_stream_encoder_create, .create_hwseq = dcn302_hwseq_create, }; static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; if (ASICREV_IS_DIMGREY_CAVEFISH_P(hw_internal_rev)) return true; return false; } static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) { struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_02_soc; struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_02_ip; DC_LOGGER_INIT(dc->ctx->logger); if (!is_soc_bounding_box_valid(dc)) { DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); return false; } loaded_ip->max_num_otg = pool->pipe_count; loaded_ip->max_num_dpp = pool->pipe_count; loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = { 0 }; if (dc->ctx->dc_bios->funcs->get_soc_bb_info( dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { DC_FP_START(); dcn302_fpu_init_soc_bounding_box(bb_info); DC_FP_END(); } } return true; } static void dcn302_resource_destruct(struct resource_pool *pool) { unsigned int i; for (i = 0; i < pool->stream_enc_count; i++) { if (pool->stream_enc[i] != NULL) { if (pool->stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->stream_enc[i]->vpg)); pool->stream_enc[i]->vpg = NULL; } if (pool->stream_enc[i]->afmt != NULL) { kfree(DCN30_AFMT_FROM_AFMT(pool->stream_enc[i]->afmt)); pool->stream_enc[i]->afmt = NULL; } kfree(DCN10STRENC_FROM_STRENC(pool->stream_enc[i])); pool->stream_enc[i] = NULL; } } for (i = 0; i < pool->res_cap->num_dsc; i++) { if (pool->dscs[i] != NULL) dcn20_dsc_destroy(&pool->dscs[i]); } if (pool->mpc != NULL) { kfree(TO_DCN20_MPC(pool->mpc)); pool->mpc = NULL; } if (pool->hubbub != NULL) { kfree(pool->hubbub); pool->hubbub = NULL; } for (i = 0; i < pool->pipe_count; i++) { if (pool->dpps[i] != NULL) { kfree(TO_DCN20_DPP(pool->dpps[i])); pool->dpps[i] = NULL; } if (pool->hubps[i] != NULL) { kfree(TO_DCN20_HUBP(pool->hubps[i])); pool->hubps[i] = NULL; } if (pool->irqs != NULL) dal_irq_service_destroy(&pool->irqs); } for (i = 0; i < pool->res_cap->num_ddc; i++) { if (pool->engines[i] != NULL) dce110_engine_destroy(&pool->engines[i]); if (pool->hw_i2cs[i] != NULL) { kfree(pool->hw_i2cs[i]); pool->hw_i2cs[i] = NULL; } if (pool->sw_i2cs[i] != NULL) { kfree(pool->sw_i2cs[i]); pool->sw_i2cs[i] = NULL; } } for (i = 0; i < pool->res_cap->num_opp; i++) { if (pool->opps[i] != NULL) pool->opps[i]->funcs->opp_destroy(&pool->opps[i]); } for (i = 0; i < pool->res_cap->num_timing_generator; i++) { if (pool->timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->timing_generators[i])); pool->timing_generators[i] = NULL; } } for (i = 0; i < pool->res_cap->num_dwb; i++) { if (pool->dwbc[i] != NULL) { kfree(TO_DCN30_DWBC(pool->dwbc[i])); pool->dwbc[i] = NULL; } if (pool->mcif_wb[i] != NULL) { kfree(TO_DCN30_MMHUBBUB(pool->mcif_wb[i])); pool->mcif_wb[i] = NULL; } } for (i = 0; i < pool->audio_count; i++) { if (pool->audios[i]) dce_aud_destroy(&pool->audios[i]); } for (i = 0; i < pool->clk_src_count; i++) { if (pool->clock_sources[i] != NULL) dcn20_clock_source_destroy(&pool->clock_sources[i]); } if (pool->dp_clock_source != NULL) dcn20_clock_source_destroy(&pool->dp_clock_source); for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { if (pool->mpc_lut[i] != NULL) { dc_3dlut_func_release(pool->mpc_lut[i]); pool->mpc_lut[i] = NULL; } if (pool->mpc_shaper[i] != NULL) { dc_transfer_func_release(pool->mpc_shaper[i]); pool->mpc_shaper[i] = NULL; } } for (i = 0; i < pool->pipe_count; i++) { if (pool->multiple_abms[i] != NULL) dce_abm_destroy(&pool->multiple_abms[i]); } if (pool->psr != NULL) dmub_psr_destroy(&pool->psr); if (pool->dccg != NULL) dcn_dccg_destroy(&pool->dccg); if (pool->oem_device != NULL) { struct dc *dc = pool->oem_device->ctx->dc; dc->link_srv->destroy_ddc_service(&pool->oem_device); } } static void dcn302_destroy_resource_pool(struct resource_pool **pool) { dcn302_resource_destruct(*pool); kfree(*pool); *pool = NULL; } void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { DC_FP_START(); dcn302_fpu_update_bw_bounding_box(dc, bw_params); DC_FP_END(); } static void dcn302_get_panel_config_defaults(struct dc_panel_config *panel_config) { *panel_config = panel_config_defaults; } static struct resource_funcs dcn302_res_pool_funcs = { .destroy = dcn302_destroy_resource_pool, .link_enc_create = dcn302_link_encoder_create, .panel_cntl_create = dcn302_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, .set_mcif_arb_params = dcn30_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, .update_bw_bounding_box = dcn302_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn302_get_panel_config_defaults, }; static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN3_02() }; static const struct dccg_shift dccg_shift = { DCCG_MASK_SH_LIST_DCN3_02(__SHIFT) }; static const struct dccg_mask dccg_mask = { DCCG_MASK_SH_LIST_DCN3_02(_MASK) }; #define abm_regs(id)\ [id] = { ABM_DCN302_REG_LIST(id) } static const struct dce_abm_registers abm_regs[] = { abm_regs(0), abm_regs(1), abm_regs(2), abm_regs(3), abm_regs(4) }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN30(_MASK) }; static bool dcn302_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct resource_pool *pool) { int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; struct ddc_service_init_data ddc_init_data = {0}; ctx->dc_bios->regs = &bios_regs; pool->res_cap = &res_cap_dcn302; pool->funcs = &dcn302_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->underlay_pipe_index = NO_UNDERLAY_PIPE; pool->pipe_count = pool->res_cap->num_timing_generator; pool->mpcc_count = pool->res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by derfault*/ dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; /* total size = mall per channel * num channels * 1024 * 1024 */ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.max_slave_planes = 2; dc->caps.max_slave_yuv_planes = 2; dc->caps.max_slave_rgb_planes = 2; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.input_lut_shared = 0; dc->caps.color.dpp.icsc = 1; dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr dc->caps.color.dpp.dgam_rom_caps.srgb = 1; dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; dc->caps.color.dpp.dgam_rom_caps.pq = 1; dc->caps.color.dpp.dgam_rom_caps.hlg = 1; dc->caps.color.dpp.post_csc = 1; dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; dc->caps.color.dpp.hw_3d_lut = 1; dc->caps.color.dpp.ogam_ram = 1; // no OGAM ROM on DCN3 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.dpp.ogam_rom_caps.pq = 0; dc->caps.color.dpp.ogam_rom_caps.hlg = 0; dc->caps.color.dpp.ocsc = 0; dc->caps.color.mpc.gamut_remap = 1; dc->caps.color.mpc.num_3dluts = pool->res_cap->num_mpc_3dlut; //3 dc->caps.color.mpc.ogam_ram = 1; dc->caps.color.mpc.ogam_rom_caps.srgb = 0; dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; /* read VBIOS LTTPR caps */ if (ctx->dc_bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; uint8_t is_vbios_lttpr_enable = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; } if (ctx->dc_bios->funcs->get_lttpr_interop) { enum bp_result bp_query_result; uint8_t is_vbios_interop_enabled = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); /************************************************* * Create resources * *************************************************/ /* Clock Sources for Pixel Clock*/ pool->clock_sources[DCN302_CLK_SRC_PLL0] = dcn302_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->clock_sources[DCN302_CLK_SRC_PLL1] = dcn302_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->clock_sources[DCN302_CLK_SRC_PLL2] = dcn302_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->clock_sources[DCN302_CLK_SRC_PLL3] = dcn302_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->clock_sources[DCN302_CLK_SRC_PLL4] = dcn302_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->clk_src_count = DCN302_CLK_SRC_TOTAL; /* todo: not reuse phy_pll registers */ pool->dp_clock_source = dcn302_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->clk_src_count; i++) { if (pool->clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } } /* DCCG */ pool->dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); if (pool->dccg == NULL) { dm_error("DC: failed to create dccg!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* PP Lib and SMU interfaces */ init_soc_bounding_box(dc, pool); /* DML */ dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); /* IRQ */ init_data.ctx = dc->ctx; pool->irqs = dal_irq_service_dcn302_create(&init_data); if (!pool->irqs) goto create_fail; /* HUBBUB */ pool->hubbub = dcn302_hubbub_create(ctx); if (pool->hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto create_fail; } /* HUBPs, DPPs, OPPs and TGs */ for (i = 0; i < pool->pipe_count; i++) { pool->hubps[i] = dcn302_hubp_create(ctx, i); if (pool->hubps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubps!\n"); goto create_fail; } pool->dpps[i] = dcn302_dpp_create(ctx, i); if (pool->dpps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dpps!\n"); goto create_fail; } } for (i = 0; i < pool->res_cap->num_opp; i++) { pool->opps[i] = dcn302_opp_create(ctx, i); if (pool->opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto create_fail; } } for (i = 0; i < pool->res_cap->num_timing_generator; i++) { pool->timing_generators[i] = dcn302_timing_generator_create(ctx, i); if (pool->timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; } } pool->timing_generator_count = i; /* PSR */ pool->psr = dmub_psr_create(ctx); if (pool->psr == NULL) { dm_error("DC: failed to create psr!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* ABMs */ for (i = 0; i < pool->res_cap->num_timing_generator; i++) { pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); if (pool->multiple_abms[i] == NULL) { dm_error("DC: failed to create abm for pipe %d!\n", i); BREAK_TO_DEBUGGER(); goto create_fail; } } /* MPC and DSC */ pool->mpc = dcn302_mpc_create(ctx, pool->mpcc_count, pool->res_cap->num_mpc_3dlut); if (pool->mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto create_fail; } for (i = 0; i < pool->res_cap->num_dsc; i++) { pool->dscs[i] = dcn302_dsc_create(ctx, i); if (pool->dscs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create display stream compressor %d!\n", i); goto create_fail; } } /* DWB and MMHUBBUB */ if (!dcn302_dwbc_create(ctx, pool)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dwbc!\n"); goto create_fail; } if (!dcn302_mmhubbub_create(ctx, pool)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mcif_wb!\n"); goto create_fail; } /* AUX and I2C */ for (i = 0; i < pool->res_cap->num_ddc; i++) { pool->engines[i] = dcn302_aux_engine_create(ctx, i); if (pool->engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC:failed to create aux engine!!\n"); goto create_fail; } pool->hw_i2cs[i] = dcn302_i2c_hw_create(ctx, i); if (pool->hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC:failed to create hw i2c!!\n"); goto create_fail; } pool->sw_i2cs[i] = NULL; } /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, pool, &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ dcn302_hw_sequencer_construct(dc); dc->caps.max_planes = pool->pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { ddc_init_data.ctx = dc->ctx; ddc_init_data.link = NULL; ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); } else { pool->oem_device = NULL; } return true; create_fail: dcn302_resource_destruct(pool); return false; } struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) { struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dcn302_resource_construct(init_data->num_virtual_links, dc, pool)) return pool; BREAK_TO_DEBUGGER(); kfree(pool); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn302_hwseq.h" #include "dcn30/dcn30_init.h" #include "dc.h" #include "dcn302_init.h" void dcn302_hw_sequencer_construct(struct dc *dc) { dcn30_hw_sequencer_construct(dc); dc->hwseq->funcs.dpp_pg_control = dcn302_dpp_pg_control; dc->hwseq->funcs.hubp_pg_control = dcn302_hubp_pg_control; dc->hwseq->funcs.dsc_pg_control = dcn302_dsc_pg_control; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dce/dce_11_2_d.h" #include "dce/dce_11_2_sh_mask.h" #include "gmc/gmc_8_1_sh_mask.h" #include "gmc/gmc_8_1_d.h" #include "include/logger_interface.h" #include "dce112_compressor.h" #define DC_LOGGER \ cp110->base.ctx->logger #define DCP_REG(reg)\ (reg + cp110->offsets.dcp_offset) #define DMIF_REG(reg)\ (reg + cp110->offsets.dmif_offset) static const struct dce112_compressor_reg_offsets reg_offsets[] = { { .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL), }, { .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL), }, { .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL), } }; static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600; enum fbc_idle_force { /* Bit 0 - Display registers updated */ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001, /* Bit 2 - FBC_GRPH_COMP_EN register updated */ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002, /* Bit 3 - FBC_SRC_SEL register updated */ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004, /* Bit 4 - FBC_MIN_COMPRESSION register updated */ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008, /* Bit 5 - FBC_ALPHA_COMP_EN register updated */ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010, /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020, /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040, /* Bit 24 - Memory write to region 0 defined by MC registers. */ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000, /* Bit 25 - Memory write to region 1 defined by MC registers */ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000, /* Bit 26 - Memory write to region 2 defined by MC registers */ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000, /* Bit 27 - Memory write to region 3 defined by MC registers. */ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000, /* Bit 28 - Memory write from any client other than MCIF */ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000, /* Bit 29 - CG statics screen signal is inactive */ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000, }; static uint32_t lpt_size_alignment(struct dce112_compressor *cp110) { /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */ return cp110->base.raw_size * cp110->base.banks_num * cp110->base.dram_channels_num; } static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110, uint32_t lpt_control) { /*LPT MC Config */ if (cp110->base.options.bits.LPT_MC_CONFIG == 1) { /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS): * 00 - 1 CHANNEL * 01 - 2 CHANNELS * 02 - 4 OR 6 CHANNELS * (Only for discrete GPU, N/A for CZ) * 03 - 8 OR 12 CHANNELS * (Only for discrete GPU, N/A for CZ) */ switch (cp110->base.dram_channels_num) { case 2: set_reg_field_value( lpt_control, 1, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_NUM_PIPES); break; case 1: set_reg_field_value( lpt_control, 0, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_NUM_PIPES); break; default: DC_LOG_WARNING( "%s: Invalid LPT NUM_PIPES!!!", __func__); break; } /* The mapping for LPT NUM_BANKS is in * GRPH_CONTROL.GRPH_NUM_BANKS register field * Specifies the number of memory banks for tiling * purposes. Only applies to 2D and 3D tiling modes. * POSSIBLE VALUES: * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */ switch (cp110->base.banks_num) { case 16: set_reg_field_value( lpt_control, 3, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_NUM_BANKS); break; case 8: set_reg_field_value( lpt_control, 2, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_NUM_BANKS); break; case 4: set_reg_field_value( lpt_control, 1, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_NUM_BANKS); break; case 2: set_reg_field_value( lpt_control, 0, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_NUM_BANKS); break; default: DC_LOG_WARNING( "%s: Invalid LPT NUM_BANKS!!!", __func__); break; } /* The mapping is in DMIF_ADDR_CALC. * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for * Carrizo specifies the memory interleave per pipe. * It effectively specifies the location of pipe bits in * the memory address. * POSSIBLE VALUES: * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte * interleave * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte * interleave */ switch (cp110->base.channel_interleave_size) { case 256: /*256B */ set_reg_field_value( lpt_control, 0, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE); break; case 512: /*512B */ set_reg_field_value( lpt_control, 1, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE); break; default: DC_LOG_WARNING( "%s: Invalid LPT INTERLEAVE_SIZE!!!", __func__); break; } /* The mapping for LOW_POWER_TILING_ROW_SIZE is in * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field * for Carrizo. Specifies the size of dram row in bytes. * This should match up with NOOFCOLS field in * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns). * This register DMIF_ADDR_CALC is not used by the * hardware as it is only used for addrlib assertions. * POSSIBLE VALUES: * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row * boundary * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row * boundary * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row * boundary */ switch (cp110->base.raw_size) { case 4096: /*4 KB */ set_reg_field_value( lpt_control, 2, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_ROW_SIZE); break; case 2048: set_reg_field_value( lpt_control, 1, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_ROW_SIZE); break; case 1024: set_reg_field_value( lpt_control, 0, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_ROW_SIZE); break; default: DC_LOG_WARNING( "%s: Invalid LPT ROW_SIZE!!!", __func__); break; } } else { DC_LOG_WARNING( "%s: LPT MC Configuration is not provided", __func__); } return lpt_control; } static bool is_source_bigger_than_epanel_size( struct dce112_compressor *cp110, uint32_t source_view_width, uint32_t source_view_height) { if (cp110->base.embedded_panel_h_size != 0 && cp110->base.embedded_panel_v_size != 0 && ((source_view_width * source_view_height) > (cp110->base.embedded_panel_h_size * cp110->base.embedded_panel_v_size))) return true; return false; } static uint32_t align_to_chunks_number_per_line( struct dce112_compressor *cp110, uint32_t pixels) { return 256 * ((pixels + 255) / 256); } static void wait_for_fbc_state_changed( struct dce112_compressor *cp110, bool enabled) { uint8_t counter = 0; uint32_t addr = mmFBC_STATUS; uint32_t value; while (counter < 10) { value = dm_read_reg(cp110->base.ctx, addr); if (get_reg_field_value( value, FBC_STATUS, FBC_ENABLE_STATUS) == enabled) break; udelay(10); counter++; } if (counter == 10) { DC_LOG_WARNING( "%s: wait counter exceeded, changes to HW not applied", __func__); } } void dce112_compressor_power_up_fbc(struct compressor *compressor) { uint32_t value; uint32_t addr; addr = mmFBC_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN); set_reg_field_value(value, 1, FBC_CNTL, FBC_EN); set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE); if (compressor->options.bits.CLK_GATING_DISABLED == 1) { /* HW needs to do power measurement comparison. */ set_reg_field_value( value, 0, FBC_CNTL, FBC_COMP_CLK_GATE_EN); } dm_write_reg(compressor->ctx, addr, value); addr = mmFBC_COMP_MODE; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN); dm_write_reg(compressor->ctx, addr, value); addr = mmFBC_COMP_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN); dm_write_reg(compressor->ctx, addr, value); /*FBC_MIN_COMPRESSION 0 ==> 2:1 */ /* 1 ==> 4:1 */ /* 2 ==> 8:1 */ /* 0xF ==> 1:1 */ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION); dm_write_reg(compressor->ctx, addr, value); compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1; value = 0; dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value); value = 0xFFFFFF; dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value); } void dce112_compressor_enable_fbc( struct compressor *compressor, uint32_t paths_num, struct compr_addr_and_pitch_params *params) { struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor); if (compressor->options.bits.FBC_SUPPORT && (compressor->options.bits.DUMMY_BACKEND == 0) && (!dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) && (!is_source_bigger_than_epanel_size( cp110, params->source_view_width, params->source_view_height))) { uint32_t addr; uint32_t value; /* Before enabling FBC first need to enable LPT if applicable * LPT state should always be changed (enable/disable) while FBC * is disabled */ if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) && (params->source_view_width * params->source_view_height <= dce11_one_lpt_channel_max_resolution)) { dce112_compressor_enable_lpt(compressor); } addr = mmFBC_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN); set_reg_field_value( value, params->inst, FBC_CNTL, FBC_SRC_SEL); dm_write_reg(compressor->ctx, addr, value); /* Keep track of enum controller_id FBC is attached to */ compressor->is_enabled = true; compressor->attached_inst = params->inst; cp110->offsets = reg_offsets[params->inst]; /*Toggle it as there is bug in HW */ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, addr, value); set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, addr, value); wait_for_fbc_state_changed(cp110, true); } } void dce112_compressor_disable_fbc(struct compressor *compressor) { struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor); if (compressor->options.bits.FBC_SUPPORT && dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) { uint32_t reg_data; /* Turn off compression */ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL); set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data); /* Reset enum controller_id to undefined */ compressor->attached_inst = 0; compressor->is_enabled = false; /* Whenever disabling FBC make sure LPT is disabled if LPT * supported */ if (compressor->options.bits.LPT_SUPPORT) dce112_compressor_disable_lpt(compressor); wait_for_fbc_state_changed(cp110, false); } } bool dce112_compressor_is_fbc_enabled_in_hw( struct compressor *compressor, uint32_t *inst) { /* Check the hardware register */ uint32_t value; value = dm_read_reg(compressor->ctx, mmFBC_STATUS); if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) { if (inst != NULL) *inst = compressor->attached_inst; return true; } value = dm_read_reg(compressor->ctx, mmFBC_MISC); if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) { value = dm_read_reg(compressor->ctx, mmFBC_CNTL); if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) { if (inst != NULL) *inst = compressor->attached_inst; return true; } } return false; } bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *compressor) { /* Check the hardware register */ uint32_t value = dm_read_reg(compressor->ctx, mmLOW_POWER_TILING_CONTROL); return get_reg_field_value( value, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_ENABLE); } void dce112_compressor_program_compressed_surface_address_and_pitch( struct compressor *compressor, struct compr_addr_and_pitch_params *params) { struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor); uint32_t value = 0; uint32_t fbc_pitch = 0; uint32_t compressed_surf_address_low_part = compressor->compr_surface_address.addr.low_part; /* Clear content first. */ dm_write_reg( compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH), 0); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0); if (compressor->options.bits.LPT_SUPPORT) { uint32_t lpt_alignment = lpt_size_alignment(cp110); if (lpt_alignment != 0) { compressed_surf_address_low_part = ((compressed_surf_address_low_part + (lpt_alignment - 1)) / lpt_alignment) * lpt_alignment; } } /* Write address, HIGH has to be first. */ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH), compressor->compr_surface_address.addr.high_part); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), compressed_surf_address_low_part); fbc_pitch = align_to_chunks_number_per_line( cp110, params->source_view_width); if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1) fbc_pitch = fbc_pitch / 8; else DC_LOG_WARNING( "%s: Unexpected DCE11 compression ratio", __func__); /* Clear content first. */ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0); /* Write FBC Pitch. */ set_reg_field_value( value, fbc_pitch, GRPH_COMPRESS_PITCH, GRPH_COMPRESS_PITCH); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value); } void dce112_compressor_disable_lpt(struct compressor *compressor) { struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor); uint32_t value; uint32_t addr; uint32_t inx; /* Disable all pipes LPT Stutter */ for (inx = 0; inx < 3; inx++) { value = dm_read_reg( compressor->ctx, DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH)); set_reg_field_value( value, 0, DPG_PIPE_STUTTER_CONTROL_NONLPTCH, STUTTER_ENABLE_NONLPTCH); dm_write_reg( compressor->ctx, DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value); } /* Disable Underlay pipe LPT Stutter */ addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0, DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH, STUTTER_ENABLE_NONLPTCH); dm_write_reg(compressor->ctx, addr, value); /* Disable LPT */ addr = mmLOW_POWER_TILING_CONTROL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_ENABLE); dm_write_reg(compressor->ctx, addr, value); /* Clear selection of Channel(s) containing Compressed Surface */ addr = mmGMCON_LPT_TARGET; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0xFFFFFFFF, GMCON_LPT_TARGET, STCTRL_LPT_TARGET); dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value); } void dce112_compressor_enable_lpt(struct compressor *compressor) { struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor); uint32_t value; uint32_t addr; uint32_t value_control; uint32_t channels; /* Enable LPT Stutter from Display pipe */ value = dm_read_reg(compressor->ctx, DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH)); set_reg_field_value( value, 1, DPG_PIPE_STUTTER_CONTROL_NONLPTCH, STUTTER_ENABLE_NONLPTCH); dm_write_reg(compressor->ctx, DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value); /* Enable Underlay pipe LPT Stutter */ addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 1, DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH, STUTTER_ENABLE_NONLPTCH); dm_write_reg(compressor->ctx, addr, value); /* Selection of Channel(s) containing Compressed Surface: 0xfffffff * will disable LPT. * STCTRL_LPT_TARGETn corresponds to channel n. */ addr = mmLOW_POWER_TILING_CONTROL; value_control = dm_read_reg(compressor->ctx, addr); channels = get_reg_field_value(value_control, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_MODE); addr = mmGMCON_LPT_TARGET; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, channels + 1, /* not mentioned in programming guide, but follow DCE8.1 */ GMCON_LPT_TARGET, STCTRL_LPT_TARGET); dm_write_reg(compressor->ctx, addr, value); /* Enable LPT */ addr = mmLOW_POWER_TILING_CONTROL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 1, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_ENABLE); dm_write_reg(compressor->ctx, addr, value); } void dce112_compressor_program_lpt_control( struct compressor *compressor, struct compr_addr_and_pitch_params *params) { struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor); uint32_t rows_per_channel; uint32_t lpt_alignment; uint32_t source_view_width; uint32_t source_view_height; uint32_t lpt_control = 0; if (!compressor->options.bits.LPT_SUPPORT) return; lpt_control = dm_read_reg(compressor->ctx, mmLOW_POWER_TILING_CONTROL); /* POSSIBLE VALUES for Low Power Tiling Mode: * 00 - Use channel 0 * 01 - Use Channel 0 and 1 * 02 - Use Channel 0,1,2,3 * 03 - reserved */ switch (compressor->lpt_channels_num) { /* case 2: * Use Channel 0 & 1 / Not used for DCE 11 */ case 1: /*Use Channel 0 for LPT for DCE 11 */ set_reg_field_value( lpt_control, 0, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_MODE); break; default: DC_LOG_WARNING( "%s: Invalid selected DRAM channels for LPT!!!", __func__); break; } lpt_control = lpt_memory_control_config(cp110, lpt_control); /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on * FBC compressed surface pitch. * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height * * Surface Pitch) / (Row Size * Number of Channels * * Number of Banks)). */ rows_per_channel = 0; lpt_alignment = lpt_size_alignment(cp110); source_view_width = align_to_chunks_number_per_line( cp110, params->source_view_width); source_view_height = (params->source_view_height + 1) & (~0x1); if (lpt_alignment != 0) { rows_per_channel = source_view_width * source_view_height * 4; rows_per_channel = (rows_per_channel % lpt_alignment) ? (rows_per_channel / lpt_alignment + 1) : rows_per_channel / lpt_alignment; } set_reg_field_value( lpt_control, rows_per_channel, LOW_POWER_TILING_CONTROL, LOW_POWER_TILING_ROWS_PER_CHAN); dm_write_reg(compressor->ctx, mmLOW_POWER_TILING_CONTROL, lpt_control); } /* * DCE 11 Frame Buffer Compression Implementation */ void dce112_compressor_set_fbc_invalidation_triggers( struct compressor *compressor, uint32_t fbc_trigger) { /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19) * for DCE 11 regions cannot be used - does not work with S/G */ uint32_t addr = mmFBC_CLIENT_REGION_MASK; uint32_t value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0, FBC_CLIENT_REGION_MASK, FBC_MEMORY_REGION_MASK); dm_write_reg(compressor->ctx, addr, value); /* Setup events when to clear all CSM entries (effectively marking * current compressed data invalid) * For DCE 11 CSM metadata 11111 means - "Not Compressed" * Used as the initial value of the metadata sent to the compressor * after invalidation, to indicate that the compressor should attempt * to compress all chunks on the current pass. Also used when the chunk * is not successfully written to memory. * When this CSM value is detected, FBC reads from the uncompressed * buffer. Set events according to passed in value, these events are * valid for DCE11: * - bit 0 - display register updated * - bit 28 - memory write from any client except from MCIF * - bit 29 - CG static screen signal is inactive * In addition, DCE11.1 also needs to set new DCE11.1 specific events * that are used to trigger invalidation on certain register changes, * for example enabling of Alpha Compression may trigger invalidation of * FBC once bit is set. These events are as follows: * - Bit 2 - FBC_GRPH_COMP_EN register updated * - Bit 3 - FBC_SRC_SEL register updated * - Bit 4 - FBC_MIN_COMPRESSION register updated * - Bit 5 - FBC_ALPHA_COMP_EN register updated * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */ addr = mmFBC_IDLE_FORCE_CLEAR_MASK; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, fbc_trigger | FBC_IDLE_FORCE_GRPH_COMP_EN | FBC_IDLE_FORCE_SRC_SEL_CHANGE | FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE | FBC_IDLE_FORCE_ALPHA_COMP_EN | FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN | FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF, FBC_IDLE_FORCE_CLEAR_MASK, FBC_IDLE_FORCE_CLEAR_MASK); dm_write_reg(compressor->ctx, addr, value); } void dce112_compressor_construct(struct dce112_compressor *compressor, struct dc_context *ctx) { struct dc_bios *bp = ctx->dc_bios; struct embedded_panel_info panel_info; compressor->base.options.raw = 0; compressor->base.options.bits.FBC_SUPPORT = true; compressor->base.options.bits.LPT_SUPPORT = true; /* For DCE 11 always use one DRAM channel for LPT */ compressor->base.lpt_channels_num = 1; compressor->base.options.bits.DUMMY_BACKEND = false; /* Check if this system has more than 1 DRAM channel; if only 1 then LPT * should not be supported */ if (compressor->base.memory_bus_width == 64) compressor->base.options.bits.LPT_SUPPORT = false; compressor->base.options.bits.CLK_GATING_DISABLED = false; compressor->base.ctx = ctx; compressor->base.embedded_panel_h_size = 0; compressor->base.embedded_panel_v_size = 0; compressor->base.memory_bus_width = ctx->asic_id.vram_width; compressor->base.allocated_size = 0; compressor->base.preferred_requested_size = 0; compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID; compressor->base.banks_num = 0; compressor->base.raw_size = 0; compressor->base.channel_interleave_size = 0; compressor->base.dram_channels_num = 0; compressor->base.lpt_channels_num = 0; compressor->base.attached_inst = 0; compressor->base.is_enabled = false; if (BP_RESULT_OK == bp->funcs->get_embedded_panel_info(bp, &panel_info)) { compressor->base.embedded_panel_h_size = panel_info.lcd_timing.horizontal_addressable; compressor->base.embedded_panel_v_size = panel_info.lcd_timing.vertical_addressable; } } struct compressor *dce112_compressor_create(struct dc_context *ctx) { struct dce112_compressor *cp110 = kzalloc(sizeof(struct dce112_compressor), GFP_KERNEL); if (!cp110) return NULL; dce112_compressor_construct(cp110, ctx); return &cp110->base; } void dce112_compressor_destroy(struct compressor **compressor) { kfree(TO_DCE112_COMPRESSOR(*compressor)); *compressor = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "core_types.h" #include "dce112_hw_sequencer.h" #include "dce110/dce110_hw_sequencer.h" /* include DCE11.2 register header files */ #include "dce/dce_11_2_d.h" #include "dce/dce_11_2_sh_mask.h" struct dce112_hw_seq_reg_offsets { uint32_t crtc; }; static const struct dce112_hw_seq_reg_offsets reg_offsets[] = { { .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), }, { .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL), } }; #define HW_REG_CRTC(reg, id)\ (reg + reg_offsets[id].crtc) /******************************************************************************* * Private definitions ******************************************************************************/ static void dce112_init_pte(struct dc_context *ctx) { uint32_t addr; uint32_t value = 0; uint32_t chunk_int = 0; uint32_t chunk_mul = 0; addr = mmDVMM_PTE_REQ; value = dm_read_reg(ctx, addr); chunk_int = get_reg_field_value( value, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_INT); chunk_mul = get_reg_field_value( value, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER); if (chunk_int != 0x4 || chunk_mul != 0x4) { set_reg_field_value( value, 255, DVMM_PTE_REQ, MAX_PTEREQ_TO_ISSUE); set_reg_field_value( value, 4, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_INT); set_reg_field_value( value, 4, DVMM_PTE_REQ, HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER); dm_write_reg(ctx, addr, value); } } static bool dce112_enable_display_power_gating( struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, enum pipe_gating_control power_gating) { enum bp_result bp_result = BP_RESULT_OK; enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) cntl = ASIC_PIPE_ENABLE; else cntl = ASIC_PIPE_DISABLE; if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) { bp_result = dcb->funcs->enable_disp_power_gating( dcb, controller_id + 1, cntl); /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2 * by default when command table is called */ dm_write_reg(ctx, HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id), 0); } if (power_gating != PIPE_GATING_CONTROL_ENABLE) dce112_init_pte(ctx); if (bp_result == BP_RESULT_OK) return true; else return false; } void dce112_hw_sequencer_construct(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and * structure */ dce110_hw_sequencer_construct(dc); dc->hwseq->funcs.enable_display_power_gating = dce112_enable_display_power_gating; }
linux-master
drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dce110/dce110_resource.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" #include "dce/dce_mem_input.h" #include "dce/dce_transform.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_audio.h" #include "dce/dce_opp.h" #include "dce/dce_ipp.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" #include "dce112/dce112_hw_sequencer.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dce/dce_panel_cntl.h" #include "reg_helper.h" #include "dce/dce_11_2_d.h" #include "dce/dce_11_2_sh_mask.h" #include "dce100/dce100_resource.h" #include "dce112_resource.h" #define DC_LOGGER \ dc->ctx->logger #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_BS_SR_SWAP_CNTL #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x4ABC #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC #endif enum dce112_clk_src_array_id { DCE112_CLK_SRC_PLL0, DCE112_CLK_SRC_PLL1, DCE112_CLK_SRC_PLL2, DCE112_CLK_SRC_PLL3, DCE112_CLK_SRC_PLL4, DCE112_CLK_SRC_PLL5, DCE112_CLK_SRC_TOTAL }; static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce110_aux_registers_shift aux_shift = { DCE_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCE_AUX_MASK_SH_LIST(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; static const struct dce_panel_cntl_registers panel_cntl_regs[] = { { DCE_PANEL_CNTL_REG_LIST() } }; static const struct dce_panel_cntl_shift panel_cntl_shift = { DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) }; static const struct dce_panel_cntl_mask panel_cntl_mask = { DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE110_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE112(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE112(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_112_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5) }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E), clk_src_regs(5, F) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) }; static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps polaris_10_resource_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */ .num_ddc = 6, }; static const struct resource_caps polaris_11_resource_cap = { .num_timing_generator = 5, .num_audio = 5, .num_stream_encoder = 5, .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */ .num_ddc = 5, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 }, 64, 64 }; static const struct dc_debug_options debug_defaults = { .enable_legacy_fast_update = true, }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x4819 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static int map_transmitter_id_to_phy_instance( enum transmitter transmitter) { switch (transmitter) { case TRANSMITTER_UNIPHY_A: return 0; case TRANSMITTER_UNIPHY_B: return 1; case TRANSMITTER_UNIPHY_C: return 2; case TRANSMITTER_UNIPHY_D: return 3; case TRANSMITTER_UNIPHY_E: return 4; case TRANSMITTER_UNIPHY_F: return 5; case TRANSMITTER_UNIPHY_G: return 6; default: ASSERT(0); return 0; } } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce112_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce110_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct stream_encoder *dce112_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE112_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE112_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE112_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce112_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce112_stream_encoder_create, .create_hwseq = dce112_hwseq_create, }; #define mi_inst_regs(id) { MI_DCE11_2_REG_LIST(id) } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE11_2_MASK_SH_LIST(__SHIFT) }; static const struct dce_mem_input_mask mi_masks = { MI_DCE11_2_MASK_SH_LIST(_MASK) }; static struct mem_input *dce112_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); return &dce_mi->base; } static void dce112_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce112_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); transform->lb_memory_size = 0x1404; /*5124*/ return &transform->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; static struct link_encoder *dce112_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; if (!enc110) return NULL; link_regs_id = map_transmitter_id_to_phy_instance(enc_init_data->transmitter); dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], &panel_cntl_shift, &panel_cntl_mask); return &panel_cntl->base; } static struct input_pixel_processor *dce112_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static struct output_pixel_processor *dce112_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } static struct dce_aux *dce112_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; static struct dce_i2c_hw *dce112_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct clock_source *dce112_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static void dce112_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static void dce112_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce112_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dce112_clock_source_destroy(&pool->base.clock_sources[i]); } } if (pool->base.dp_clock_source != NULL) dce112_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) { dce_aud_destroy(&pool->base.audios[i]); } } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } static struct clock_source *find_matching_pll( struct resource_context *res_ctx, const struct resource_pool *pool, const struct dc_stream_state *const stream) { switch (stream->link->link_enc->transmitter) { case TRANSMITTER_UNIPHY_A: return pool->clock_sources[DCE112_CLK_SRC_PLL0]; case TRANSMITTER_UNIPHY_B: return pool->clock_sources[DCE112_CLK_SRC_PLL1]; case TRANSMITTER_UNIPHY_C: return pool->clock_sources[DCE112_CLK_SRC_PLL2]; case TRANSMITTER_UNIPHY_D: return pool->clock_sources[DCE112_CLK_SRC_PLL3]; case TRANSMITTER_UNIPHY_E: return pool->clock_sources[DCE112_CLK_SRC_PLL4]; case TRANSMITTER_UNIPHY_F: return pool->clock_sources[DCE112_CLK_SRC_PLL5]; default: return NULL; } return NULL; } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; dce110_resource_build_pipe_hw_param(pipe_ctx); resource_build_info_frame(pipe_ctx); return DC_OK; } bool dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { bool result = false; DC_LOG_BANDWIDTH_CALCS( "%s: start", __func__); if (bw_calcs( dc->ctx, dc->bw_dceip, dc->bw_vbios, context->res_ctx.pipe_ctx, dc->res_pool->pipe_count, &context->bw_ctx.bw.dce)) result = true; if (!result) DC_LOG_BANDWIDTH_VALIDATION( "%s: Bandwidth validation failed!", __func__); if (memcmp(&dc->current_state->bw_ctx.bw.dce, &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { DC_LOG_BANDWIDTH_CALCS( "%s: finish,\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" , __func__, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_mode_enable, context->bw_ctx.bw.dce.cpuc_state_change_enable, context->bw_ctx.bw.dce.cpup_state_change_enable, context->bw_ctx.bw.dce.nbp_state_change_enable, context->bw_ctx.bw.dce.all_displays_in_sync, context->bw_ctx.bw.dce.dispclk_khz, context->bw_ctx.bw.dce.sclk_khz, context->bw_ctx.bw.dce.sclk_deep_sleep_khz, context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } return result; } enum dc_status resource_map_phy_clock_resources( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { /* acquire new resources */ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream( &context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) pipe_ctx->clock_source = dc->res_pool->dp_clock_source; else { if (stream && stream->link && stream->link->link_enc) pipe_ctx->clock_source = find_matching_pll( &context->res_ctx, dc->res_pool, stream); } if (pipe_ctx->clock_source == NULL) return DC_NO_CLOCK_SOURCE_RESOURCE; resource_reference_clock_source( &context->res_ctx, dc->res_pool, pipe_ctx->clock_source); return DC_OK; } static bool dce112_validate_surface_sets( struct dc_state *context) { int i; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 1) return false; if (context->stream_status[i].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; } return true; } enum dc_status dce112_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static enum dc_status dce112_validate_global( struct dc *dc, struct dc_state *context) { if (!dce112_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static void dce112_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); dce112_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } static const struct resource_funcs dce112_res_pool_funcs = { .destroy = dce112_destroy_resource_pool, .link_enc_create = dce112_link_encoder_create, .panel_cntl_create = dce112_panel_cntl_create, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx, .validate_global = dce112_validate_global, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels_with_latency eng_clks = {0}; struct dm_pp_clock_levels_with_latency mem_clks = {0}; struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; struct dm_pp_clock_levels clks = {0}; int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm) memory_type_multiplier = MEMORY_TYPE_HBM; /*do system clock TODO PPLIB: after PPLIB implement, * then remove old way */ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &eng_clks)) { /* This is only for temporary */ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &clks); /* convert all the clock fro kHz to fix point mHz */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels/8], 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*2/8], 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*3/8], 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*4/8], 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*5/8], 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*6/8], 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); /*do memory clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( clks.clocks_in_khz[0] * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier, 1000); return; } /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( eng_clks.data[0].clocks_in_khz, 1000); /*do memory clock*/ dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &mem_clks); /* we don't need to call PPLIB for validation clock since they * also give us the highest sclk and highest mclk (UMA clock). * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): * YCLK = UMACLK*m_memoryTypeMultiplier */ dc->bw_vbios->low_yclk = bw_frc_to_fixed( mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select * depending on DPM state they are in. And update BW MGR GFX Engine and * Memory clock member variables for Watermarks calculations for each * Watermark Set */ clk_ranges.num_wm_sets = 4; clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); } static const struct resource_caps *dce112_resource_cap( struct hw_asic_id *asic_id) { if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) || ASIC_REV_IS_POLARIS12_V(asic_id->hw_internal_rev)) return &polaris_11_resource_cap; else return &polaris_10_resource_cap; } static bool dce112_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = dce112_resource_cap(&ctx->asic_id); pool->base.funcs = &dce112_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = pool->base.res_cap->num_timing_generator; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCE112_CLK_SRC_PLL0] = dce112_clock_source_create( ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCE112_CLK_SRC_PLL1] = dce112_clock_source_create( ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCE112_CLK_SRC_PLL2] = dce112_clock_source_create( ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCE112_CLK_SRC_PLL3] = dce112_clock_source_create( ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCE112_CLK_SRC_PLL4] = dce112_clock_source_create( ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clock_sources[DCE112_CLK_SRC_PLL5] = dce112_clock_source_create( ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL5, &clk_src_regs[5], false); pool->base.clk_src_count = DCE112_CLK_SRC_TOTAL; pool->base.dp_clock_source = dce112_clock_source_create( ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce110_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce112_timing_generator_create( ctx, i, &dce112_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce112_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce112_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce112_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce112_opp_create( ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce112_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce112_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; /* Create hardware sequencer */ dce112_hw_sequencer_construct(dc); bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; res_create_fail: dce112_resource_destruct(pool); return false; } struct resource_pool *dce112_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce112_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "link_encoder.h" #include "dcn31_dio_link_encoder.h" #include "stream_encoder.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" #include "link_enc_cfg.h" #include "dc_dmub_srv.h" #include "dal_asic_id.h" #include "link.h" #define CTX \ enc10->base.ctx #define DC_LOGGER \ enc10->base.ctx->logger #define REG(reg)\ (enc10->link_regs->reg) #undef FN #define FN(reg_name, field_name) \ enc10->link_shift->field_name, enc10->link_mask->field_name #define IND_REG(index) \ (enc10->link_regs->index) #define AUX_REG(reg)\ (enc10->aux_regs->reg) #define AUX_REG_READ(reg_name) \ dm_read_reg(CTX, AUX_REG(reg_name)) #define AUX_REG_WRITE(reg_name, val) \ dm_write_reg(CTX, AUX_REG(reg_name), val) #ifndef MIN #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif static uint8_t phy_id_from_transmitter(enum transmitter t) { uint8_t phy_id; switch (t) { case TRANSMITTER_UNIPHY_A: phy_id = 0; break; case TRANSMITTER_UNIPHY_B: phy_id = 1; break; case TRANSMITTER_UNIPHY_C: phy_id = 2; break; case TRANSMITTER_UNIPHY_D: phy_id = 3; break; case TRANSMITTER_UNIPHY_E: phy_id = 4; break; case TRANSMITTER_UNIPHY_F: phy_id = 5; break; case TRANSMITTER_UNIPHY_G: phy_id = 6; break; default: phy_id = 0; break; } return phy_id; } static bool has_query_dp_alt(struct link_encoder *enc) { struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; if (enc->ctx->dce_version >= DCN_VERSION_3_15) return true; /* Supports development firmware and firmware >= 4.0.11 */ return dc_dmub_srv && !(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) && dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10)); } static bool query_dp_alt_from_dmub(struct link_encoder *enc, union dmub_rb_cmd *cmd) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); memset(cmd, 0, sizeof(*cmd)); cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; cmd->query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return false; return true; } void dcn31_link_encoder_set_dio_phy_mux( struct link_encoder *enc, enum encoder_type_select sel, uint32_t hpo_inst) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); switch (enc->transmitter) { case TRANSMITTER_UNIPHY_A: if (sel == ENCODER_TYPE_HDMI_FRL) REG_UPDATE(DIO_LINKA_CNTL, HPO_HDMI_ENC_SEL, hpo_inst); else if (sel == ENCODER_TYPE_DP_128B132B) REG_UPDATE(DIO_LINKA_CNTL, HPO_DP_ENC_SEL, hpo_inst); REG_UPDATE(DIO_LINKA_CNTL, ENC_TYPE_SEL, sel); break; case TRANSMITTER_UNIPHY_B: if (sel == ENCODER_TYPE_HDMI_FRL) REG_UPDATE(DIO_LINKB_CNTL, HPO_HDMI_ENC_SEL, hpo_inst); else if (sel == ENCODER_TYPE_DP_128B132B) REG_UPDATE(DIO_LINKB_CNTL, HPO_DP_ENC_SEL, hpo_inst); REG_UPDATE(DIO_LINKB_CNTL, ENC_TYPE_SEL, sel); break; case TRANSMITTER_UNIPHY_C: if (sel == ENCODER_TYPE_HDMI_FRL) REG_UPDATE(DIO_LINKC_CNTL, HPO_HDMI_ENC_SEL, hpo_inst); else if (sel == ENCODER_TYPE_DP_128B132B) REG_UPDATE(DIO_LINKC_CNTL, HPO_DP_ENC_SEL, hpo_inst); REG_UPDATE(DIO_LINKC_CNTL, ENC_TYPE_SEL, sel); break; case TRANSMITTER_UNIPHY_D: if (sel == ENCODER_TYPE_HDMI_FRL) REG_UPDATE(DIO_LINKD_CNTL, HPO_HDMI_ENC_SEL, hpo_inst); else if (sel == ENCODER_TYPE_DP_128B132B) REG_UPDATE(DIO_LINKD_CNTL, HPO_DP_ENC_SEL, hpo_inst); REG_UPDATE(DIO_LINKD_CNTL, ENC_TYPE_SEL, sel); break; case TRANSMITTER_UNIPHY_E: if (sel == ENCODER_TYPE_HDMI_FRL) REG_UPDATE(DIO_LINKE_CNTL, HPO_HDMI_ENC_SEL, hpo_inst); else if (sel == ENCODER_TYPE_DP_128B132B) REG_UPDATE(DIO_LINKE_CNTL, HPO_DP_ENC_SEL, hpo_inst); REG_UPDATE(DIO_LINKE_CNTL, ENC_TYPE_SEL, sel); break; case TRANSMITTER_UNIPHY_F: if (sel == ENCODER_TYPE_HDMI_FRL) REG_UPDATE(DIO_LINKF_CNTL, HPO_HDMI_ENC_SEL, hpo_inst); else if (sel == ENCODER_TYPE_DP_128B132B) REG_UPDATE(DIO_LINKF_CNTL, HPO_DP_ENC_SEL, hpo_inst); REG_UPDATE(DIO_LINKF_CNTL, ENC_TYPE_SEL, sel); break; default: /* Do nothing */ break; } } static void enc31_hw_init(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); /* 00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2 01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4 02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8 03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16 04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32 05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64 06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128 07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256 */ /* AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0, AUX_RX_START_WINDOW = 1 [6:4] AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8] AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1 AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1 AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0 AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1 AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1 AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 AUX_RX_DETECTION_THRESHOLD [30:28] = 1 */ // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk // 27MHz -> 0xd // 100MHz -> 0x32 // 48MHz -> 0x18 // Set TMDS_CTL0 to 1. This is a legacy setting. REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1); dcn10_aux_initialize(enc10); } static const struct link_encoder_funcs dcn31_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = dcn30_link_encoder_validate_output_with_stream, .hw_init = enc31_hw_init, .setup = dcn10_link_encoder_setup, .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, .enable_dp_output = dcn31_link_encoder_enable_dp_output, .enable_dp_mst_output = dcn31_link_encoder_enable_dp_mst_output, .disable_output = dcn31_link_encoder_disable_output, .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, .update_mst_stream_allocation_table = dcn10_link_encoder_update_mst_stream_allocation_table, .psr_program_dp_dphy_fast_training = dcn10_psr_program_dp_dphy_fast_training, .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, .enable_hpd = dcn10_link_encoder_enable_hpd, .disable_hpd = dcn10_link_encoder_disable_hpd, .is_dig_enabled = dcn10_is_dig_enabled, .destroy = dcn10_link_encoder_destroy, .fec_set_enable = enc2_fec_set_enable, .fec_set_ready = enc2_fec_set_ready, .fec_is_active = enc2_fec_is_active, .get_dig_frontend = dcn10_get_dig_frontend, .get_dig_mode = dcn10_get_dig_mode, .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode, .get_max_link_cap = dcn31_link_encoder_get_max_link_cap, .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux, }; void dcn31_link_encoder_construct( struct dcn20_link_encoder *enc20, const struct encoder_init_data *init_data, const struct encoder_feature_support *enc_features, const struct dcn10_link_enc_registers *link_regs, const struct dcn10_link_enc_aux_registers *aux_regs, const struct dcn10_link_enc_hpd_registers *hpd_regs, const struct dcn10_link_enc_shift *link_shift, const struct dcn10_link_enc_mask *link_mask) { struct bp_encoder_cap_info bp_cap_info = {0}; const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; enum bp_result result = BP_RESULT_OK; struct dcn10_link_encoder *enc10 = &enc20->enc10; enc10->base.funcs = &dcn31_link_enc_funcs; enc10->base.ctx = init_data->ctx; enc10->base.id = init_data->encoder; enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; enc10->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin * while doing the DP sink detect */ /* if (dal_adapter_service_is_feature_supported(as, FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) enc10->base.features.flags.bits. DP_SINK_DETECT_POLL_DATA_PIN = true;*/ enc10->base.output_signals = SIGNAL_TYPE_DVI_SINGLE_LINK | SIGNAL_TYPE_DVI_DUAL_LINK | SIGNAL_TYPE_LVDS | SIGNAL_TYPE_DISPLAY_PORT | SIGNAL_TYPE_DISPLAY_PORT_MST | SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. * Prefer DIG assignment is decided by board design. * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design * and VBIOS will filter out 7 UNIPHY for DCE 8.0. * By this, adding DIGG should not hurt DCE 8.0. * This will let DCE 8.1 share DCE 8.0 as much as possible */ enc10->link_regs = link_regs; enc10->aux_regs = aux_regs; enc10->hpd_regs = hpd_regs; enc10->link_shift = link_shift; enc10->link_mask = link_mask; switch (enc10->base.transmitter) { case TRANSMITTER_UNIPHY_A: enc10->base.preferred_engine = ENGINE_ID_DIGA; break; case TRANSMITTER_UNIPHY_B: enc10->base.preferred_engine = ENGINE_ID_DIGB; break; case TRANSMITTER_UNIPHY_C: enc10->base.preferred_engine = ENGINE_ID_DIGC; break; case TRANSMITTER_UNIPHY_D: enc10->base.preferred_engine = ENGINE_ID_DIGD; break; case TRANSMITTER_UNIPHY_E: enc10->base.preferred_engine = ENGINE_ID_DIGE; break; case TRANSMITTER_UNIPHY_F: enc10->base.preferred_engine = ENGINE_ID_DIGF; break; default: ASSERT_CRITICAL(false); enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; } /* default to one to mirror Windows behavior */ enc10->base.features.flags.bits.HDMI_6GB_EN = 1; result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, enc10->base.id, &bp_cap_info); /* Override features with DCE-specific values */ if (result == BP_RESULT_OK) { enc10->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc10->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE; enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN; enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN; enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN; enc10->base.features.flags.bits.DP_IS_USB_C = bp_cap_info.DP_IS_USB_C; } else { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); } if (enc10->base.ctx->dc->debug.hdmi20_disable) { enc10->base.features.flags.bits.HDMI_6GB_EN = 0; } } void dcn31_link_encoder_construct_minimal( struct dcn20_link_encoder *enc20, struct dc_context *ctx, const struct encoder_feature_support *enc_features, const struct dcn10_link_enc_registers *link_regs, enum engine_id eng_id) { struct dcn10_link_encoder *enc10 = &enc20->enc10; enc10->base.funcs = &dcn31_link_enc_funcs; enc10->base.ctx = ctx; enc10->base.id.type = OBJECT_TYPE_ENCODER; enc10->base.hpd_source = HPD_SOURCEID_UNKNOWN; enc10->base.connector.type = OBJECT_TYPE_CONNECTOR; enc10->base.preferred_engine = eng_id; enc10->base.features = *enc_features; enc10->base.transmitter = TRANSMITTER_UNKNOWN; enc10->link_regs = link_regs; enc10->base.output_signals = SIGNAL_TYPE_DISPLAY_PORT | SIGNAL_TYPE_DISPLAY_PORT_MST | SIGNAL_TYPE_EDP; } /* DPIA equivalent of link_transmitter_control. */ static bool link_dpia_control(struct dc_context *dc_ctx, struct dmub_cmd_dig_dpia_control_data *dpia_control) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA; cmd.dig1_dpia_control.header.sub_type = DMUB_CMD__DPIA_DIG1_DPIA_CONTROL; cmd.dig1_dpia_control.header.payload_bytes = sizeof(cmd.dig1_dpia_control) - sizeof(cmd.dig1_dpia_control.header); cmd.dig1_dpia_control.dpia_control = *dpia_control; dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } static void link_encoder_disable(struct dcn10_link_encoder *enc10) { /* reset training complete */ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0); } void dcn31_link_encoder_enable_dp_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); /* Enable transmitter and encoder. */ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine); dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source); } else { struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; struct dc_link *link; link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); enc1_configure_encoder(enc10, link_settings); dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE; dpia_control.enc_id = enc->preferred_engine; dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */ dpia_control.lanenum = (uint8_t)link_settings->lane_count; dpia_control.symclk_10khz = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ / 10; /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin * unused by DPIA. */ dpia_control.hpdsel = 6; if (link) { dpia_control.dpia_id = link->ddc_hw_inst; dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link); } else { DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__); BREAK_TO_DEBUGGER(); return; } DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id); link_dpia_control(enc->ctx, &dpia_control); } } void dcn31_link_encoder_enable_dp_mst_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); /* Enable transmitter and encoder. */ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine); dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); } else { struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; struct dc_link *link; link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); enc1_configure_encoder(enc10, link_settings); dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE; dpia_control.enc_id = enc->preferred_engine; dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */ dpia_control.lanenum = (uint8_t)link_settings->lane_count; dpia_control.symclk_10khz = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ / 10; /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin * unused by DPIA. */ dpia_control.hpdsel = 6; if (link) { dpia_control.dpia_id = link->ddc_hw_inst; dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link); } else { DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__); BREAK_TO_DEBUGGER(); return; } DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id); link_dpia_control(enc->ctx, &dpia_control); } } void dcn31_link_encoder_disable_output( struct link_encoder *enc, enum signal_type signal) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); /* Disable transmitter and encoder. */ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine); dcn10_link_encoder_disable_output(enc, signal); } else { struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; struct dc_link *link; if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) return; link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE; dpia_control.enc_id = enc->preferred_engine; if (signal == SIGNAL_TYPE_DISPLAY_PORT) { dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */ } else if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */ } else { DC_LOG_ERROR("%s: USB4 DPIA only supports DisplayPort.\n", __func__); BREAK_TO_DEBUGGER(); } if (link) { dpia_control.dpia_id = link->ddc_hw_inst; } else { DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__); BREAK_TO_DEBUGGER(); return; } DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id); link_dpia_control(enc->ctx, &dpia_control); link_encoder_disable(enc10); } } bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); union dmub_rb_cmd cmd; uint32_t dp_alt_mode_disable; /* Only applicable to USB-C PHY. */ if (!enc->features.flags.bits.DP_IS_USB_C) return false; /* * Use the new interface from DMCUB if available. * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running. */ if (has_query_dp_alt(enc)) { if (!query_dp_alt_from_dmub(enc, &cmd)) return false; return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); } /* Legacy path, avoid if possible. */ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); } else { /* * B0 phys use a new set of registers to check whether alt mode is disabled. * if value == 1 alt mode is disabled, otherwise it is enabled. */ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); } else { REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); } } return (dp_alt_mode_disable == 0); } void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); union dmub_rb_cmd cmd; uint32_t is_in_usb_c_dp4_mode = 0; dcn10_link_encoder_get_max_link_cap(enc, link_settings); /* Take the link cap directly if not USB */ if (!enc->features.flags.bits.DP_IS_USB_C) return; /* * Use the new interface from DMCUB if available. * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running. */ if (has_query_dp_alt(enc)) { if (!query_dp_alt_from_dmub(enc, &cmd)) return; if (cmd.query_dp_alt.data.is_usb && cmd.query_dp_alt.data.is_dp4 == 0) link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); return; } /* Legacy path, avoid if possible. */ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); } else { if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); } else { REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); } } if (!is_in_usb_c_dp4_mode) link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
/* * Copyright 2012-20 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dce_calcs.h" #include "reg_helper.h" #include "basics/conversion.h" #include "dcn31_hubp.h" #define REG(reg)\ hubp2->hubp_regs->reg #define CTX \ hubp2->base.ctx #undef FN #define FN(reg_name, field_name) \ hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable); REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable); } void hubp31_soft_reset(struct hubp *hubp, bool reset) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset); } static void hubp31_program_extended_blank(struct hubp *hubp, unsigned int min_dst_y_next_start_optimized) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_UPDATE(BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, min_dst_y_next_start_optimized); } static struct hubp_funcs dcn31_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr, .hubp_program_surface_config = hubp3_program_surface_config, .hubp_is_flip_pending = hubp2_is_flip_pending, .hubp_setup = hubp3_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings, .set_blank = hubp2_set_blank, .dcc_control = hubp3_dcc_control, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp2_cursor_set_attributes, .set_cursor_position = hubp2_cursor_set_position, .hubp_clk_cntl = hubp2_clk_cntl, .hubp_vtg_sel = hubp2_vtg_sel, .dmdata_set_attributes = hubp3_dmdata_set_attributes, .dmdata_load = hubp2_dmdata_load, .dmdata_status_done = hubp2_dmdata_status_done, .hubp_read_state = hubp3_read_state, .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp3_init, .set_unbounded_requesting = hubp31_set_unbounded_requesting, .hubp_soft_reset = hubp31_soft_reset, .hubp_set_flip_int = hubp1_set_flip_int, .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank, }; bool hubp31_construct( struct dcn20_hubp *hubp2, struct dc_context *ctx, uint32_t inst, const struct dcn_hubp2_registers *hubp_regs, const struct dcn_hubp2_shift *hubp_shift, const struct dcn_hubp2_mask *hubp_mask) { hubp2->base.funcs = &dcn31_hubp_funcs; hubp2->base.ctx = ctx; hubp2->hubp_regs = hubp_regs; hubp2->hubp_shift = hubp_shift; hubp2->hubp_mask = hubp_mask; hubp2->base.inst = inst; hubp2->base.opp_id = OPP_ID_INVALID; hubp2->base.mpcc_id = 0xf; return true; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dm_helpers.h" #include "core_types.h" #include "resource.h" #include "dccg.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" #include "reg_helper.h" #include "abm.h" #include "hubp.h" #include "dchubbub.h" #include "timing_generator.h" #include "opp.h" #include "ipp.h" #include "mpc.h" #include "mcif_wb.h" #include "dc_dmub_srv.h" #include "dcn31_hwseq.h" #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" #include "link.h" #include "dcn10/dcn10_hw_sequencer.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" #include "dce/dce_i2c_hw.h" #define DC_LOGGER_INIT(logger) #define CTX \ hws->ctx #define REG(reg)\ hws->regs->reg #define DC_LOGGER \ dc->ctx->logger #undef FN #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name static void enable_memory_low_power(struct dc *dc) { struct dce_hwseq *hws = dc->hwseq; int i; if (dc->debug.enable_mem_low_power.bits.dmcu) { // Force ERAM to shutdown if DMCU is not enabled if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); } } // Set default OPTC memory power states if (dc->debug.enable_mem_low_power.bits.optc) { // Shutdown when unassigned and light sleep in VBLANK REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); } if (dc->debug.enable_mem_low_power.bits.vga) { // Power down VGA memory REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); } if (dc->debug.enable_mem_low_power.bits.mpc && dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode) dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc); if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { // Power down VPGs for (i = 0; i < dc->res_pool->stream_enc_count; i++) dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); #if defined(CONFIG_DRM_AMD_DC_FP) for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); #endif } } void dcn31_init_hw(struct dc *dc) { struct abm **abms = dc->res_pool->multiple_abms; struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); if (!dcb->funcs->is_accelerated_mode(dcb)) { hws->funcs.bios_golden_init(dc); if (hws->funcs.disable_vga) hws->funcs.disable_vga(dc->hwseq); } // Initialize the dccg if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); enable_memory_low_power(dc); if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; if (res_pool->dccg && res_pool->hubbub) { (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, &res_pool->ref_clocks.dccg_ref_clock_inKhz); (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, res_pool->ref_clocks.dccg_ref_clock_inKhz, &res_pool->ref_clocks.dchub_ref_clock_inKhz); } else { // Not all ASICs have DCCG sw component res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); for (i = 0; i < dc->link_count; i++) { /* Power up AND update implementation according to the * required signal (which may be different from the * default signal on connector). */ struct dc_link *link = dc->links[i]; if (link->ep_type != DISPLAY_ENDPOINT_PHY) continue; link->link_enc->funcs->hw_init(link->link_enc); /* Check for enabled DIG to identify enabled display */ if (link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { link->link_status.link_active = true; if (link->link_enc->funcs->fec_is_active && link->link_enc->funcs->fec_is_active(link->link_enc)) link->fec_state = dc_link_fec_enabled; } } /* we want to turn off all dp displays before doing detection */ dc->link_srv->blank_all_dp_displays(dc); if (hws->funcs.enable_power_gating_plane) hws->funcs.enable_power_gating_plane(dc->hwseq, true); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. * Otherwise, if taking control is not possible, we need to power * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { // we want to turn off edp displays if odm is enabled and no seamless boot if (!dc->caps.seamless_odm) { for (i = 0; i < dc->res_pool->timing_generator_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; uint32_t num_opps, opp_id_src0, opp_id_src1; num_opps = 1; if (tg) { if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) { tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); } } if (num_opps > 1) { dc->link_srv->blank_all_edp_displays(dc); break; } } } hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; audio->funcs->hw_init(audio); } for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; if (link->panel_cntl) backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL) abms[i]->funcs->abm_init(abms[i], backlight); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ REG_WRITE(DIO_MEM_PWR_CTRL, 0); // Set i2c to light sleep until engine is setup if (dc->debug.enable_mem_low_power.bits.i2c) REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); if (hws->funcs.setup_hpo_hw_control) hws->funcs.setup_hpo_hw_control(hws, false); if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); if (dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); if (dc->res_pool->hubbub->funcs->force_pstate_change_control) dc->res_pool->hubbub->funcs->force_pstate_change_control( dc->res_pool->hubbub, false, false); #if defined(CONFIG_DRM_AMD_DC_FP) if (dc->res_pool->hubbub->funcs->init_crb) dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); #endif // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } void dcn31_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; uint32_t org_ip_request_cntl = 0; if (hws->ctx->dc->debug.disable_dsc_power_gate) return; if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc && hws->ctx->dc->res_pool->dccg->funcs->enable_dsc && power_on) hws->ctx->dc->res_pool->dccg->funcs->enable_dsc( hws->ctx->dc->res_pool->dccg, dsc_inst); REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); switch (dsc_inst) { case 0: /* DSC0 */ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 1: /* DSC1 */ REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 2: /* DSC2 */ REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; default: BREAK_TO_DEBUGGER(); break; } if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) { if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on) hws->ctx->dc->res_pool->dccg->funcs->disable_dsc( hws->ctx->dc->res_pool->dccg, dsc_inst); } } void dcn31_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { bool force_on = true; /* disable power gating */ uint32_t org_ip_request_cntl = 0; if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate) force_on = false; REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); /* DCHUBP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); /* DPP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); force_on = true; /* disable power gating */ if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate) force_on = false; /* DCS0/1/2/3/4/5 */ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) { bool is_hdmi_tmds; bool is_dp; ASSERT(pipe_ctx->stream); if (pipe_ctx->stream_res.stream_enc == NULL) return; /* this is not root pipe */ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); if (!is_hdmi_tmds && !is_dp) return; if (is_hdmi_tmds) pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); else if (pipe_ctx->stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets( pipe_ctx->stream_res.hpo_dp_stream_enc, &pipe_ctx->stream_res.encoder_info_frame); return; } else { if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num) pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); } } void dcn31_z10_save_init(struct dc *dc) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_z10_restore(const struct dc *dc) { union dmub_rb_cmd cmd; /* * DMUB notifies whether restore is required. * Optimization to avoid sending commands when not required. */ if (!dc_dmub_srv_is_restore_required(dc->ctx->dmub_srv)) return; memset(&cmd, 0, sizeof(cmd)); cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; uint32_t org_ip_request_cntl; if (hws->ctx->dc->debug.disable_hubp_power_gate) return; if (REG(DOMAIN0_PG_CONFIG) == 0) return; REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); switch (hubp_inst) { case 0: REG_SET(DOMAIN0_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 1: REG_SET(DOMAIN1_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 2: REG_SET(DOMAIN2_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; case 3: REG_SET(DOMAIN3_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate); REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000); break; default: BREAK_TO_DEBUGGER(); break; } if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { struct dcn_hubbub_phys_addr_config config; config.system_aperture.fb_top = pa_config->system_aperture.fb_top; config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; config.system_aperture.fb_base = pa_config->system_aperture.fb_base; config.system_aperture.agp_top = pa_config->system_aperture.agp_top; config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; config.system_aperture.agp_base = pa_config->system_aperture.agp_base; config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; if (pa_config->gart_config.base_addr_is_mc_addr) { /* Convert from MC address to offset into FB */ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr - pa_config->system_aperture.fb_base + pa_config->system_aperture.fb_offset; } else config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); } static void dcn31_reset_back_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { struct dc_link *link; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { pipe_ctx->stream = NULL; return; } ASSERT(!pipe_ctx->top_pipe); dc->hwss.set_abm_immediate_disable(pipe_ctx); pipe_ctx->stream_res.tg->funcs->set_dsc_config( pipe_ctx->stream_res.tg, OPTC_DSC_DISABLED, 0, 0); pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; if (pipe_ctx->stream_res.tg->funcs->set_drr) pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, NULL); link = pipe_ctx->stream->link; /* DPMS may already disable or */ /* dpms_off status is incorrect due to fastboot * feature. When system resume from S4 with second * screen only, the dpms_off would be true but * VBIOS lit up eDP, so check link status too. */ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) dc->link_srv->set_dpms_off(pipe_ctx); else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); /* free acquired resources */ if (pipe_ctx->stream_res.audio) { /*disable az_endpoint*/ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); /*free audio*/ if (dc->caps.dynamic_audio == true) { /*we have to dynamic arbitrate the audio endpoints*/ /*we free the resource, need reset is_audio_acquired*/ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); pipe_ctx->stream_res.audio = NULL; } } pipe_ctx->stream = NULL; DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); } void dcn31_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context) { int i; struct dce_hwseq *hws = dc->hwseq; /* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (!pipe_ctx_old->stream) continue; if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) continue; if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { struct clock_source *old_clk = pipe_ctx_old->clock_source; dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } } /* New dc_state in the process of being applied to hardware. */ link_enc_cfg_set_transient_mode(dc, dc->current_state, context); } void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) { if (hws->ctx->dc->debug.hpo_optimization) REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc.h" #include "dcn31/dcn31_init.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn31_resource.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" #include "dml/dcn30/dcn30_fpu.h" #include "dcn10/dcn10_ipp.h" #include "dcn30/dcn30_hubbub.h" #include "dcn31/dcn31_hubbub.h" #include "dcn30/dcn30_mpc.h" #include "dcn31/dcn31_hubp.h" #include "irq/dcn31/irq_service_dcn31.h" #include "dcn30/dcn30_dpp.h" #include "dcn31/dcn31_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn30/dcn30_opp.h" #include "dcn20/dcn20_dsc.h" #include "dcn30/dcn30_vpg.h" #include "dcn30/dcn30_afmt.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn31/dcn31_vpg.h" #include "dcn31/dcn31_afmt.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "clk_mgr.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dml/dcn31/dcn31_fpu.h" #include "dcn31/dcn31_dccg.h" #include "dcn10/dcn10_resource.h" #include "dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" #include "dcn30/dcn30_mmhubbub.h" // TODO: change include headers /amd/include/asic_reg after upstream #include "yellow_carp_offset.h" #include "dcn/dcn_3_1_2_offset.h" #include "dcn/dcn_3_1_2_sh_mask.h" #include "nbio/nbio_7_2_0_offset.h" #include "dpcs/dpcs_4_2_0_offset.h" #include "dpcs/dpcs_4_2_0_sh_mask.h" #include "mmhub/mmhub_2_3_0_offset.h" #include "mmhub/mmhub_2_3_0_sh_mask.h" #define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 #define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 #define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 #define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dce/dmub_replay.h" #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) enum dcn31_clk_src_array_id { DCN31_CLK_SRC_PLL0, DCN31_CLK_SRC_PLL1, DCN31_CLK_SRC_PLL2, DCN31_CLK_SRC_PLL3, DCN31_CLK_SRC_PLL4, DCN30_CLK_SRC_TOTAL }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg #define BASE(seg) BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRI2(reg_name, block, id)\ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ reg ## reg_name #define SRIR(var_name, reg_name, block, id)\ .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII_MPC_RMU(reg_name, block, id)\ .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define SRII_DWB(reg_name, temp_name, block, id)\ .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## temp_name #define SF_DWB2(reg_name, block, id, field_name, post_fix) \ .field_name = reg_name ## __ ## field_name ## post_fix #define DCCG_SRII(reg_name, block, id)\ .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## id ## _ ## reg_name #define VUPDATE_SRII(reg_name, block, id)\ .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ reg ## reg_name ## _ ## block ## id /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIO_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ regBIF_BX1_ ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* CLOCK */ #define CLK_BASE_INNER(seg) \ CLK_BASE__INST0_SEG ## seg #define CLK_BASE(seg) \ CLK_BASE_INNER(seg) #define CLK_SRI(reg_name, block, inst)\ .reg_name = CLK_BASE(reg ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ reg ## block ## _ ## inst ## _ ## reg_name static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E) }; /*pll_id being rempped in dmub, in driver it is logical instance*/ static const struct dce110_clk_src_regs clk_src_regs_b0[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, F), clk_src_regs(3, G), clk_src_regs(4, E) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; #define abm_regs(id)\ [id] = {\ ABM_DCN302_REG_LIST(id)\ } static const struct dce_abm_registers abm_regs[] = { abm_regs(0), abm_regs(1), abm_regs(2), abm_regs(3), }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN30(_MASK) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6) }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define vpg_regs(id)\ [id] = {\ VPG_DCN31_REG_LIST(id)\ } static const struct dcn31_vpg_registers vpg_regs[] = { vpg_regs(0), vpg_regs(1), vpg_regs(2), vpg_regs(3), vpg_regs(4), vpg_regs(5), vpg_regs(6), vpg_regs(7), vpg_regs(8), vpg_regs(9), }; static const struct dcn31_vpg_shift vpg_shift = { DCN31_VPG_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_vpg_mask vpg_mask = { DCN31_VPG_MASK_SH_LIST(_MASK) }; #define afmt_regs(id)\ [id] = {\ AFMT_DCN31_REG_LIST(id)\ } static const struct dcn31_afmt_registers afmt_regs[] = { afmt_regs(0), afmt_regs(1), afmt_regs(2), afmt_regs(3), afmt_regs(4), afmt_regs(5) }; static const struct dcn31_afmt_shift afmt_shift = { DCN31_AFMT_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_afmt_mask afmt_mask = { DCN31_AFMT_MASK_SH_LIST(_MASK) }; #define apg_regs(id)\ [id] = {\ APG_DCN31_REG_LIST(id)\ } static const struct dcn31_apg_registers apg_regs[] = { apg_regs(0), apg_regs(1), apg_regs(2), apg_regs(3) }; static const struct dcn31_apg_shift apg_shift = { DCN31_APG_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_apg_mask apg_mask = { DCN31_APG_MASK_SH_LIST(_MASK) }; #define stream_enc_regs(id)\ [id] = {\ SE_DCN3_REG_LIST(id)\ } /* Some encoders won't be initialized here - but they're logical, not physical. */ static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4) }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define aux_regs(id)\ [id] = {\ DCN2_AUX_REG_LIST(id)\ } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4) }; #define link_regs(id, phyid)\ [id] = {\ LE_DCN31_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ DPCS_DCN31_REG_LIST(id), \ } static const struct dce110_aux_registers_shift aux_shift = { DCN_AUX_MASK_SH_LIST(__SHIFT) }; static const struct dce110_aux_registers_mask aux_mask = { DCN_AUX_MASK_SH_LIST(_MASK) }; static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0, A), link_regs(1, B), link_regs(2, C), link_regs(3, D), link_regs(4, E) }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ DPCS_DCN31_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ DPCS_DCN31_MASK_SH_LIST(_MASK) }; #define hpo_dp_stream_encoder_reg_list(id)\ [id] = {\ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ } static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { hpo_dp_stream_encoder_reg_list(0), hpo_dp_stream_encoder_reg_list(1), hpo_dp_stream_encoder_reg_list(2), hpo_dp_stream_encoder_reg_list(3), }; static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) }; #define hpo_dp_link_encoder_reg_list(id)\ [id] = {\ DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ DCN3_1_RDPCSTX_REG_LIST(0),\ DCN3_1_RDPCSTX_REG_LIST(1),\ DCN3_1_RDPCSTX_REG_LIST(2),\ DCN3_1_RDPCSTX_REG_LIST(3),\ DCN3_1_RDPCSTX_REG_LIST(4)\ } static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { hpo_dp_link_encoder_reg_list(0), hpo_dp_link_encoder_reg_list(1), }; static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) }; static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) }; #define dpp_regs(id)\ [id] = {\ DPP_REG_LIST_DCN30(id),\ } static const struct dcn3_dpp_registers dpp_regs[] = { dpp_regs(0), dpp_regs(1), dpp_regs(2), dpp_regs(3) }; static const struct dcn3_dpp_shift tf_shift = { DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) }; static const struct dcn3_dpp_mask tf_mask = { DPP_REG_LIST_SH_MASK_DCN30(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_REG_LIST_DCN30(id),\ } static const struct dcn20_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3) }; static const struct dcn20_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn20_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN20(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST0(id), \ .AUXN_IMPCAL = 0, \ .AUXP_IMPCAL = 0, \ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4) }; #define dwbc_regs_dcn3(id)\ [id] = {\ DWBC_COMMON_REG_LIST_DCN30(id),\ } static const struct dcn30_dwbc_registers dwbc30_regs[] = { dwbc_regs_dcn3(0), }; static const struct dcn30_dwbc_shift dwbc30_shift = { DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_dwbc_mask dwbc30_mask = { DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define mcif_wb_regs_dcn3(id)\ [id] = {\ MCIF_WB_COMMON_REG_LIST_DCN30(id),\ } static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { mcif_wb_regs_dcn3(0) }; static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ } static const struct dcn20_dsc_registers dsc_regs[] = { dsc_regsDCN20(0), dsc_regsDCN20(1), dsc_regsDCN20(2) }; static const struct dcn20_dsc_shift dsc_shift = { DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; static const struct dcn30_mpc_registers mpc_regs = { MPC_REG_LIST_DCN3_0(0), MPC_REG_LIST_DCN3_0(1), MPC_REG_LIST_DCN3_0(2), MPC_REG_LIST_DCN3_0(3), MPC_OUT_MUX_REG_LIST_DCN3_0(0), MPC_OUT_MUX_REG_LIST_DCN3_0(1), MPC_OUT_MUX_REG_LIST_DCN3_0(2), MPC_OUT_MUX_REG_LIST_DCN3_0(3), MPC_RMU_GLOBAL_REG_LIST_DCN3AG, MPC_RMU_REG_LIST_DCN3AG(0), MPC_RMU_REG_LIST_DCN3AG(1), //MPC_RMU_REG_LIST_DCN3AG(2), MPC_DWB_MUX_REG_LIST_DCN3_0(0), }; static const struct dcn30_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dcn30_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) }; #define optc_regs(id)\ [id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} static const struct dcn_optc_registers optc_regs[] = { optc_regs(0), optc_regs(1), optc_regs(2), optc_regs(3) }; static const struct dcn_optc_shift optc_shift = { OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) }; static const struct dcn_optc_mask optc_mask = { OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) }; #define hubp_regs(id)\ [id] = {\ HUBP_REG_LIST_DCN30(id)\ } static const struct dcn_hubp2_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1), hubp_regs(2), hubp_regs(3) }; static const struct dcn_hubp2_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN31(__SHIFT) }; static const struct dcn_hubp2_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN31(_MASK) }; static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN31(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN31(_MASK) }; static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN31() }; static const struct dccg_shift dccg_shift = { DCCG_MASK_SH_LIST_DCN31(__SHIFT) }; static const struct dccg_mask dccg_mask = { DCCG_MASK_SH_LIST_DCN31(_MASK) }; #define SRII2(reg_name_pre, reg_name_post, id)\ .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ ## id ## _ ## reg_name_post ## _BASE_IDX) + \ reg ## reg_name_pre ## id ## _ ## reg_name_post #define HWSEQ_DCN31_REG_LIST()\ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ SR(DIO_MEM_PWR_CTRL), \ SR(ODM_MEM_PWR_CTRL3), \ SR(DMU_MEM_PWR_CNTL), \ SR(MMHUBBUB_MEM_PWR_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL2), \ SR(DCFCLK_CNTL),\ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ SRII(PIXEL_RATE_CNTL, OTG, 0), \ SRII(PIXEL_RATE_CNTL, OTG, 1),\ SRII(PIXEL_RATE_CNTL, OTG, 2),\ SRII(PIXEL_RATE_CNTL, OTG, 3),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ SR(MICROSECOND_TIME_BASE_DIV), \ SR(MILLISECOND_TIME_BASE_DIV), \ SR(DISPCLK_FREQ_CHANGE_CNTL), \ SR(RBBMIF_TIMEOUT_DIS), \ SR(RBBMIF_TIMEOUT_DIS_2), \ SR(DCHUBBUB_CRC_CTRL), \ SR(DPP_TOP0_DPP_CRC_CTRL), \ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ SR(MPC_CRC_CTRL), \ SR(MPC_CRC_RESULT_GB), \ SR(MPC_CRC_RESULT_C), \ SR(MPC_CRC_RESULT_AR), \ SR(DOMAIN0_PG_CONFIG), \ SR(DOMAIN1_PG_CONFIG), \ SR(DOMAIN2_PG_CONFIG), \ SR(DOMAIN3_PG_CONFIG), \ SR(DOMAIN16_PG_CONFIG), \ SR(DOMAIN17_PG_CONFIG), \ SR(DOMAIN18_PG_CONFIG), \ SR(DOMAIN0_PG_STATUS), \ SR(DOMAIN1_PG_STATUS), \ SR(DOMAIN2_PG_STATUS), \ SR(DOMAIN3_PG_STATUS), \ SR(DOMAIN16_PG_STATUS), \ SR(DOMAIN17_PG_STATUS), \ SR(DOMAIN18_PG_STATUS), \ SR(D1VGA_CONTROL), \ SR(D2VGA_CONTROL), \ SR(D3VGA_CONTROL), \ SR(D4VGA_CONTROL), \ SR(D5VGA_CONTROL), \ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ SR(AZALIA_AUDIO_DTO), \ SR(AZALIA_CONTROLLER_CLOCK_GATING), \ SR(HPO_TOP_HW_CONTROL) static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN31_REG_LIST() }; #define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN31_MASK_SH_LIST(_MASK) }; #define vmid_regs(id)\ [id] = {\ DCN20_VMID_REG_LIST(id)\ } static const struct dcn_vmid_registers vmid_regs[] = { vmid_regs(0), vmid_regs(1), vmid_regs(2), vmid_regs(3), vmid_regs(4), vmid_regs(5), vmid_regs(6), vmid_regs(7), vmid_regs(8), vmid_regs(9), vmid_regs(10), vmid_regs(11), vmid_regs(12), vmid_regs(13), vmid_regs(14), vmid_regs(15) }; static const struct dcn20_vmid_shift vmid_shifts = { DCN20_VMID_MASK_SH_LIST(__SHIFT) }; static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; static const struct resource_caps res_cap_dcn31 = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, .num_audio = 5, .num_stream_encoder = 5, .num_dig_link_enc = 5, .num_hpo_dp_stream_encoder = 4, .num_hpo_dp_link_encoder = 2, .num_pll = 5, .num_dwb = 1, .num_ddc = 5, .num_vmid = 16, .num_mpc_3dlut = 2, .num_dsc = 3, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true, .p010 = true, .ayuv = false, }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 16000 }, // 6:1 downscaling ratio: 1000/6 = 166.666 .max_downscale_factor = { .argb8888 = 167, .nv12 = 167, .fp16 = 167 }, 64, 64 }; static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .pstate_enabled = true, .use_max_lb = true, .enable_mem_low_power = { .bits = { .vga = true, .i2c = true, .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled .dscl = true, .cm = true, .mpc = true, .optc = true, .vpg = true, .afmt = true, } }, .disable_z10 = true, .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, }, }; static void dcn31_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); *dpp = NULL; } static struct dpp *dcn31_dpp_create( struct dc_context *ctx, uint32_t inst) { struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); if (!dpp) return NULL; if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) return &dpp->base; BREAK_TO_DEBUGGER(); kfree(dpp); return NULL; } static struct output_pixel_processor *dcn31_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } static struct dce_aux *dcn31_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) }; static struct dce_i2c_hw *dcn31_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct mpc *dcn31_mpc_create( struct dc_context *ctx, int num_mpcc, int num_rmu) { struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); if (!mpc30) return NULL; dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); return &mpc30->base; } static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) { int i; struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub3) return NULL; hubbub31_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask, dcn3_1_ip.det_buffer_size_kbytes, dcn3_1_ip.pixel_chunk_size_kbytes, dcn3_1_ip.config_return_buffer_size_in_kbytes); for (i = 0; i < res_cap_dcn31.num_vmid; i++) { struct dcn20_vmid *vmid = &hubbub3->vmid[i]; vmid->ctx = ctx; vmid->regs = &vmid_regs[i]; vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } return &hubbub3->base; } static struct timing_generator *dcn31_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &optc_regs[instance]; tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; dcn31_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; static struct link_encoder *dcn31_link_encoder_create( struct dc_context *ctx, const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; dcn31_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc20->enc10.base; } /* Create a minimal link encoder object not associated with a particular * physical connector. * resource_funcs.link_enc_create_minimal */ static struct link_encoder *dcn31_link_enc_create_minimal( struct dc_context *ctx, enum engine_id eng_id) { struct dcn20_link_encoder *enc20; if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) return NULL; enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; dcn31_link_encoder_construct_minimal( enc20, ctx, &link_enc_feature, &link_enc_regs[eng_id - ENGINE_ID_DIGA], eng_id); return &enc20->enc10.base; } static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dcn31_panel_cntl *panel_cntl = kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); if (!panel_cntl) return NULL; dcn31_panel_cntl_construct(panel_cntl, init_data); return &panel_cntl->base; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *dcn31_create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct vpg *dcn31_vpg_create( struct dc_context *ctx, uint32_t inst) { struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); if (!vpg31) return NULL; vpg31_construct(vpg31, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); return &vpg31->base; } static struct afmt *dcn31_afmt_create( struct dc_context *ctx, uint32_t inst) { struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); if (!afmt31) return NULL; afmt31_construct(afmt31, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); // Light sleep by default, no need to power down here return &afmt31->base; } static struct apg *dcn31_apg_create( struct dc_context *ctx, uint32_t inst) { struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); if (!apg31) return NULL; apg31_construct(apg31, ctx, inst, &apg_regs[inst], &apg_shift, &apg_mask); return &apg31->base; } static struct stream_encoder *dcn31_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; struct afmt *afmt; int vpg_inst; int afmt_inst; /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ if (eng_id <= ENGINE_ID_DIGF) { vpg_inst = eng_id; afmt_inst = eng_id; } else return NULL; enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); vpg = dcn31_vpg_create(ctx, vpg_inst); afmt = dcn31_afmt_create(ctx, afmt_inst); if (!enc1 || !vpg || !afmt) { kfree(enc1); kfree(vpg); kfree(afmt); return NULL; } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; struct vpg *vpg; struct apg *apg; uint32_t hpo_dp_inst; uint32_t vpg_inst; uint32_t apg_inst; ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; /* Mapping of VPG register blocks to HPO DP block instance: * VPG[6] -> HPO_DP[0] * VPG[7] -> HPO_DP[1] * VPG[8] -> HPO_DP[2] * VPG[9] -> HPO_DP[3] */ vpg_inst = hpo_dp_inst + 6; /* Mapping of APG register blocks to HPO DP block instance: * APG[0] -> HPO_DP[0] * APG[1] -> HPO_DP[1] * APG[2] -> HPO_DP[2] * APG[3] -> HPO_DP[3] */ apg_inst = hpo_dp_inst; /* allocate HPO stream encoder and create VPG sub-block */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); vpg = dcn31_vpg_create(ctx, vpg_inst); apg = dcn31_apg_create(ctx, apg_inst); if (!hpo_dp_enc31 || !vpg || !apg) { kfree(hpo_dp_enc31); kfree(vpg); kfree(apg); return NULL; } dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, hpo_dp_inst, eng_id, vpg, apg, &hpo_dp_stream_enc_regs[hpo_dp_inst], &hpo_dp_se_shift, &hpo_dp_se_mask); return &hpo_dp_enc31->base; } static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( uint8_t inst, struct dc_context *ctx) { struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], &hpo_dp_le_shift, &hpo_dp_le_mask); return &hpo_dp_enc31->base; } static struct dce_hwseq *dcn31_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn31_create_audio, .create_stream_encoder = dcn31_stream_encoder_create, .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, .create_hwseq = dcn31_hwseq_create, }; static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { if (pool->base.stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); pool->base.stream_enc[i]->vpg = NULL; } if (pool->base.stream_enc[i]->afmt != NULL) { kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); pool->base.stream_enc[i]->afmt = NULL; } kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { if (pool->base.hpo_dp_stream_enc[i] != NULL) { if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); pool->base.hpo_dp_stream_enc[i]->vpg = NULL; } if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); pool->base.hpo_dp_stream_enc[i]->apg = NULL; } kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); pool->base.hpo_dp_stream_enc[i] = NULL; } } for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { if (pool->base.hpo_dp_link_enc[i] != NULL) { kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); pool->base.hpo_dp_link_enc[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(pool->base.hubbub); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.dpps[i] != NULL) dcn31_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN20_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_opp; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dwb; i++) { if (pool->base.dwbc[i] != NULL) { kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); pool->base.dwbc[i] = NULL; } if (pool->base.mcif_wb[i] != NULL) { kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); pool->base.mcif_wb[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn20_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { if (pool->base.mpc_lut[i] != NULL) { dc_3dlut_func_release(pool->base.mpc_lut[i]); pool->base.mpc_lut[i] = NULL; } if (pool->base.mpc_shaper[i] != NULL) { dc_transfer_func_release(pool->base.mpc_shaper[i]); pool->base.mpc_shaper[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn20_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.multiple_abms[i] != NULL) dce_abm_destroy(&pool->base.multiple_abms[i]); } if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); if (pool->base.replay != NULL) dmub_replay_destroy(&pool->base.replay); if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); } static struct hubp *dcn31_hubp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; if (hubp31_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) return &hubp2->base; BREAK_TO_DEBUGGER(); kfree(hubp2); return NULL; } static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; for (i = 0; i < pipe_count; i++) { struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); if (!dwbc30) { dm_error("DC: failed to create dwbc30!\n"); return false; } dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); pool->dwbc[i] = &dwbc30->base; } return true; } static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; for (i = 0; i < pipe_count; i++) { struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); if (!mcif_wb30) { dm_error("DC: failed to create mcif_wb30!\n"); return false; } dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); pool->mcif_wb[i] = &mcif_wb30->base; } return true; } static struct display_stream_compressor *dcn31_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); return NULL; } dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } static void dcn31_destroy_resource_pool(struct resource_pool **pool) { struct dcn31_resource_pool *dcn31_pool = TO_DCN31_RES_POOL(*pool); dcn31_resource_destruct(dcn31_pool); kfree(dcn31_pool); *pool = NULL; } static struct clock_source *dcn31_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn3_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static bool is_dual_plane(enum surface_pixel_format format) { return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); for (i = 0; i < pipe_cnt; i++) { pipes[i].pipe.src.gpuvm = 1; if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE) { //pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled; } else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE) pipes[i].pipe.src.hostvm = false; else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE) pipes[i].pipe.src.hostvm = true; } return pipe_cnt; } int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; bool upscaled = false; DC_FP_START(); dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; if (!res_ctx->pipe_ctx[i].stream) continue; pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; if (pipe->plane_state && (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width)) upscaled = true; /* * Immediate flip can be set dynamically after enabling the plane. * We need to require support for immediate flip or underflow can be * intermittently experienced depending on peak b/w requirements. */ pipes[pipe_cnt].pipe.src.immediate_flip = true; pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.src.gpuvm = true; pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; pipes[pipe_cnt].pipe.src.dcc_rate = 3; pipes[pipe_cnt].dout.dsc_input_bpc = 0; DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); DC_FP_END(); if (pipes[pipe_cnt].dout.dsc_enable) { switch (timing->display_color_depth) { case COLOR_DEPTH_888: pipes[pipe_cnt].dout.dsc_input_bpc = 8; break; case COLOR_DEPTH_101010: pipes[pipe_cnt].dout.dsc_input_bpc = 10; break; case COLOR_DEPTH_121212: pipes[pipe_cnt].dout.dsc_input_bpc = 12; break; default: ASSERT(0); break; } } pipe_cnt++; } context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE; dc->config.enable_4to1MPC = false; if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { if (is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { dc->config.enable_4to1MPC = true; } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; pipes[0].pipe.src.unbounded_req_mode = true; } } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) { context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64; } else if (context->stream_count >= 3 && upscaled) { context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; } return pipe_cnt; } void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel) { DC_FP_START(); dcn31_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel); DC_FP_END(); } void dcn31_populate_dml_writeback_from_context(struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) { DC_FP_START(); dcn30_populate_dml_writeback_from_context(dc, res_ctx, pipes); DC_FP_END(); } void dcn31_set_mcif_arb_params(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt) { DC_FP_START(); dcn30_set_mcif_arb_params(dc, context, pipes, pipe_cnt); DC_FP_END(); } bool dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { bool out = false; BW_VAL_TRACE_SETUP(); int vlevel = 0; int pipe_cnt = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); DC_FP_START(); out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg if (pipe_cnt == 0) fast_validate = false; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); if (fast_validate) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } if (dc->res_pool->funcs->calculate_wm_and_dlg) dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); goto validate_out; validate_fail: DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); BW_VAL_TRACE_SKIP(fail); out = false; validate_out: kfree(pipes); BW_VAL_TRACE_FINISH(); return out; } static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config) { *panel_config = panel_config_defaults; } static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; static struct resource_funcs dcn31_res_pool_funcs = { .destroy = dcn31_destroy_resource_pool, .link_enc_create = dcn31_link_encoder_create, .link_enc_create_minimal = dcn31_link_enc_create_minimal, .link_encs_assign = link_enc_cfg_link_encs_assign, .link_enc_unassign = link_enc_cfg_link_enc_unassign, .panel_cntl_create = dcn31_panel_cntl_create, .validate_bandwidth = dcn31_validate_bandwidth, .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn31_populate_dml_pipes_from_context, .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context, .set_mcif_arb_params = dcn31_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, .update_bw_bounding_box = dcn31_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn31_get_panel_config_defaults, }; static struct clock_source *dcn30_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn31_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } static bool dcn31_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn31_resource_pool *pool) { int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_dcn31; pool->base.funcs = &dcn31_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = pool->base.res_cap->num_timing_generator; pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/ dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.max_slave_planes = 2; dc->caps.max_slave_yuv_planes = 2; dc->caps.max_slave_rgb_planes = 2; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; if (dc->config.forceHBR2CP2520) dc->caps.force_dp_tps4_for_cp2520 = false; dc->caps.dp_hpo = true; dc->caps.dp_hdmi21_pcon_support = true; dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.is_apu = true; dc->caps.zstate_support = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.input_lut_shared = 0; dc->caps.color.dpp.icsc = 1; dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr dc->caps.color.dpp.dgam_rom_caps.srgb = 1; dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; dc->caps.color.dpp.dgam_rom_caps.pq = 1; dc->caps.color.dpp.dgam_rom_caps.hlg = 1; dc->caps.color.dpp.post_csc = 1; dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; dc->caps.color.dpp.hw_3d_lut = 1; dc->caps.color.dpp.ogam_ram = 1; // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.dpp.ogam_rom_caps.pq = 0; dc->caps.color.dpp.ogam_rom_caps.hlg = 0; dc->caps.color.dpp.ocsc = 0; dc->caps.color.mpc.gamut_remap = 1; dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 dc->caps.color.mpc.ogam_ram = 1; dc->caps.color.mpc.ogam_rom_caps.srgb = 0; dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; dc->config.use_old_fixed_vs_sequence = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; uint8_t is_vbios_lttpr_enable = 0; bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; } /* interop bit is implicit */ { dc->caps.vbios_lttpr_aware = true; } } if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); /************************************************* * Create resources * *************************************************/ /* Clock Sources for Pixel Clock*/ pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); /*move phypllx_pixclk_resync to dmub next*/ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs_b0[2], false); pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs_b0[3], false); } else { pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); } pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; /* todo: not reuse phy_pll registers */ pool->base.dp_clock_source = dcn31_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } } /* TODO: DCCG */ pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); if (pool->base.dccg == NULL) { dm_error("DC: failed to create dccg!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* TODO: IRQ */ init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn31_create(&init_data); if (!pool->base.irqs) goto create_fail; /* HUBBUB */ pool->base.hubbub = dcn31_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto create_fail; } /* HUBPs, DPPs, OPPs and TGs */ for (i = 0; i < pool->base.pipe_count; i++) { pool->base.hubps[i] = dcn31_hubp_create(ctx, i); if (pool->base.hubps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create hubps!\n"); goto create_fail; } pool->base.dpps[i] = dcn31_dpp_create(ctx, i); if (pool->base.dpps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpps!\n"); goto create_fail; } } for (i = 0; i < pool->base.res_cap->num_opp; i++) { pool->base.opps[i] = dcn31_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto create_fail; } } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.timing_generators[i] = dcn31_timing_generator_create( ctx, i); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; } } pool->base.timing_generator_count = i; /* PSR */ pool->base.psr = dmub_psr_create(ctx); if (pool->base.psr == NULL) { dm_error("DC: failed to create psr obj!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* Replay */ pool->base.replay = dmub_replay_create(ctx); if (pool->base.replay == NULL) { dm_error("DC: failed to create replay obj!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } /* ABM */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); if (pool->base.multiple_abms[i] == NULL) { dm_error("DC: failed to create abm for pipe %d!\n", i); BREAK_TO_DEBUGGER(); goto create_fail; } } /* MPC and DSC */ pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto create_fail; } for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn31_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create display stream compressor %d!\n", i); goto create_fail; } } /* DWB and MMHUBBUB */ if (!dcn31_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dwbc!\n"); goto create_fail; } if (!dcn31_mmhubbub_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mcif_wb!\n"); goto create_fail; } /* AUX and I2C */ for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto create_fail; } pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto create_fail; } pool->base.sw_i2cs[i] = NULL; } if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && !dc->debug.dpia_debug.bits.disable_dpia) { /* YELLOW CARP B0 has 4 DPIA's */ pool->base.usb4_dpia_count = 4; } if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1) pool->base.usb4_dpia_count = 4; /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; return true; create_fail: dcn31_resource_destruct(pool); return false; } struct resource_pool *dcn31_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn31_resource_pool *pool = kzalloc(sizeof(struct dcn31_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dcn31_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); kfree(pool); return NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "dcn31_hpo_dp_stream_encoder.h" #include "reg_helper.h" #include "dc.h" #define DC_LOGGER \ enc3->base.ctx->logger #define REG(reg)\ (enc3->regs->reg) #undef FN #define FN(reg_name, field_name) \ enc3->hpo_se_shift->field_name, enc3->hpo_se_mask->field_name #define CTX \ enc3->base.ctx enum dp2_pixel_encoding { DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444, DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422, DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420, DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY }; enum dp2_uncompressed_component_depth { DP_SYM32_ENC_COMPONENT_DEPTH_6BPC, DP_SYM32_ENC_COMPONENT_DEPTH_8BPC, DP_SYM32_ENC_COMPONENT_DEPTH_10BPC, DP_SYM32_ENC_COMPONENT_DEPTH_12BPC }; static void dcn31_hpo_dp_stream_enc_enable_stream( struct hpo_dp_stream_encoder *enc) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); /* Enable all clocks in the DP_STREAM_ENC */ REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, 1); /* Assert reset to the DP_SYM32_ENC logic */ REG_UPDATE(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, 1); /* Wait for reset to complete (to assert) */ REG_WAIT(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, 1, 1, 10); /* De-assert reset to the DP_SYM32_ENC logic */ REG_UPDATE(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, 0); /* Wait for reset to de-assert */ REG_WAIT(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, 0, 1, 10); /* Enable idle pattern generation */ REG_UPDATE(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, 1); } static void dcn31_hpo_dp_stream_enc_dp_unblank( struct hpo_dp_stream_encoder *enc, uint32_t stream_source) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); /* Set the input mux for video stream source */ REG_UPDATE(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, stream_source); /* Enable video transmission in main framer */ REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, 1); /* Reset and Enable Pixel to Symbol FIFO */ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, 1); REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 1, 1, 10); REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, 0); REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, /* Disable Clock Ramp Adjuster FIFO */ PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 0, 1, 10); REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, 1); /* Reset and Enable Clock Ramp Adjuster FIFO */ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, 1); REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, 1, 1, 10); REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, 0); REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, 0, 1, 10); /* For Debug -- Enable CRC */ REG_UPDATE_2(DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, 1, CRC_CONT_MODE_ENABLE, 1); REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, 1); } static void dcn31_hpo_dp_stream_enc_dp_blank( struct hpo_dp_stream_encoder *enc) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); /* Disable video transmission */ REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, 0); /* Wait for video stream transmission disabled * Larger delay to wait until VBLANK - use max retry of * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode + * a little more because we may not trust delay accuracy. */ REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_STATUS, 0, 10, 5000); /* Disable SDP transmission */ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 0); /* Disable Pixel to Symbol FIFO */ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, 0); /* Disable Clock Ramp Adjuster FIFO */ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, 0); } static void dcn31_hpo_dp_stream_enc_disable( struct hpo_dp_stream_encoder *enc) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); /* Disable DP_SYM32_ENC */ REG_UPDATE(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, 0); /* Disable clocks in the DP_STREAM_ENC */ REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, 0); } static void dcn31_hpo_dp_stream_enc_set_stream_attribute( struct hpo_dp_stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, bool use_vsc_sdp_for_colorimetry, bool compressed_format, bool double_buffer_en) { enum dp2_pixel_encoding pixel_encoding; enum dp2_uncompressed_component_depth component_depth; uint32_t h_active_start; uint32_t v_active_start; uint32_t h_blank; uint32_t h_back_porch; uint32_t h_width; uint32_t v_height; uint64_t v_freq; uint8_t misc0 = 0; uint8_t misc1 = 0; uint8_t hsp; uint8_t vsp; struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); struct dc_crtc_timing hw_crtc_timing = *crtc_timing; /* MISC0[0] = 0 video and link clocks are asynchronous * MISC1[0] = 0 interlace not supported * MISC1[2:1] = 0 stereo field is handled by hardware * MISC1[5:3] = 0 Reserved */ /* Interlaced not supported */ if (hw_crtc_timing.flags.INTERLACE) { BREAK_TO_DEBUGGER(); } /* Double buffer enable for MSA and pixel format registers * Only double buffer for changing stream attributes for active streams * Do not double buffer when initially enabling a stream */ REG_UPDATE(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, MSA_DOUBLE_BUFFER_ENABLE, double_buffer_en); REG_UPDATE(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, double_buffer_en); /* Pixel Encoding */ switch (hw_crtc_timing.pixel_encoding) { case PIXEL_ENCODING_YCBCR422: pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422; misc0 = misc0 | 0x2; // MISC0[2:1] = 01 break; case PIXEL_ENCODING_YCBCR444: pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; misc0 = misc0 | 0x4; // MISC0[2:1] = 10 if (hw_crtc_timing.flags.Y_ONLY) { pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY; if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) { /* HW testing only, no use case yet. * Color depth of Y-only could be * 8, 10, 12, 16 bits */ misc1 = misc1 | 0x80; // MISC1[7] = 1 } } break; case PIXEL_ENCODING_YCBCR420: pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420; misc1 = misc1 | 0x40; // MISC1[6] = 1 break; case PIXEL_ENCODING_RGB: default: pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; break; } /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used. * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). */ if (use_vsc_sdp_for_colorimetry) misc1 = misc1 | 0x40; else misc1 = misc1 & ~0x40; /* Color depth */ switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; // MISC0[7:5] = 000 break; case COLOR_DEPTH_888: component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_8BPC; misc0 = misc0 | 0x20; // MISC0[7:5] = 001 break; case COLOR_DEPTH_101010: component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_10BPC; misc0 = misc0 | 0x40; // MISC0[7:5] = 010 break; case COLOR_DEPTH_121212: component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_12BPC; misc0 = misc0 | 0x60; // MISC0[7:5] = 011 break; default: component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; break; } REG_UPDATE_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, compressed_format, UNCOMPRESSED_PIXEL_ENCODING, pixel_encoding, UNCOMPRESSED_COMPONENT_DEPTH, component_depth); switch (output_color_space) { case COLOR_SPACE_SRGB: misc1 = misc1 & ~0x80; /* bit7 = 0*/ break; case COLOR_SPACE_SRGB_LIMITED: misc0 = misc0 | 0x8; /* bit3=1 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_2020_RGB_LIMITEDRANGE: case COLOR_SPACE_2020_RGB_FULLRANGE: case COLOR_SPACE_2020_YCBCR: case COLOR_SPACE_XR_RGB: case COLOR_SPACE_MSREF_SCRGB: case COLOR_SPACE_ADOBERGB: case COLOR_SPACE_DCIP3: case COLOR_SPACE_XV_YCC_709: case COLOR_SPACE_XV_YCC_601: case COLOR_SPACE_DISPLAYNATIVE: case COLOR_SPACE_DOLBYVISION: case COLOR_SPACE_APPCTRL: case COLOR_SPACE_CUSTOMPOINTS: case COLOR_SPACE_UNKNOWN: case COLOR_SPACE_YCBCR709_BLACK: /* do nothing */ break; } /* calculate from vesa timing parameters * h_active_start related to leading edge of sync */ h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; h_back_porch = h_blank - hw_crtc_timing.h_front_porch - hw_crtc_timing.h_sync_width; /* start at beginning of left border */ h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - hw_crtc_timing.v_front_porch; h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right; v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom; hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80; vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80; v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100; /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1 * * Lane 0 Lane 1 Lane 2 Lane 3 * MSA[0] = { 0, 0, 0, VFREQ[47:40]} * MSA[1] = { 0, 0, 0, VFREQ[39:32]} * MSA[2] = { 0, 0, 0, VFREQ[31:24]} * MSA[3] = { HTotal[15:8], HStart[15:8], HWidth[15:8], VFREQ[23:16]} * MSA[4] = { HTotal[ 7:0], HStart[ 7:0], HWidth[ 7:0], VFREQ[15: 8]} * MSA[5] = { VTotal[15:8], VStart[15:8], VHeight[15:8], VFREQ[ 7: 0]} * MSA[6] = { VTotal[ 7:0], VStart[ 7:0], VHeight[ 7:0], MISC0[ 7: 0]} * MSA[7] = { HSP|HSW[14:8], VSP|VSW[14:8], 0, MISC1[ 7: 0]} * MSA[8] = { HSW[ 7:0], VSW[ 7:0], 0, 0} */ REG_SET_4(DP_SYM32_ENC_VID_MSA0, 0, MSA_DATA_LANE_0, 0, MSA_DATA_LANE_1, 0, MSA_DATA_LANE_2, 0, MSA_DATA_LANE_3, v_freq >> 40); REG_SET_4(DP_SYM32_ENC_VID_MSA1, 0, MSA_DATA_LANE_0, 0, MSA_DATA_LANE_1, 0, MSA_DATA_LANE_2, 0, MSA_DATA_LANE_3, (v_freq >> 32) & 0xff); REG_SET_4(DP_SYM32_ENC_VID_MSA2, 0, MSA_DATA_LANE_0, 0, MSA_DATA_LANE_1, 0, MSA_DATA_LANE_2, 0, MSA_DATA_LANE_3, (v_freq >> 24) & 0xff); REG_SET_4(DP_SYM32_ENC_VID_MSA3, 0, MSA_DATA_LANE_0, hw_crtc_timing.h_total >> 8, MSA_DATA_LANE_1, h_active_start >> 8, MSA_DATA_LANE_2, h_width >> 8, MSA_DATA_LANE_3, (v_freq >> 16) & 0xff); REG_SET_4(DP_SYM32_ENC_VID_MSA4, 0, MSA_DATA_LANE_0, hw_crtc_timing.h_total & 0xff, MSA_DATA_LANE_1, h_active_start & 0xff, MSA_DATA_LANE_2, h_width & 0xff, MSA_DATA_LANE_3, (v_freq >> 8) & 0xff); REG_SET_4(DP_SYM32_ENC_VID_MSA5, 0, MSA_DATA_LANE_0, hw_crtc_timing.v_total >> 8, MSA_DATA_LANE_1, v_active_start >> 8, MSA_DATA_LANE_2, v_height >> 8, MSA_DATA_LANE_3, v_freq & 0xff); REG_SET_4(DP_SYM32_ENC_VID_MSA6, 0, MSA_DATA_LANE_0, hw_crtc_timing.v_total & 0xff, MSA_DATA_LANE_1, v_active_start & 0xff, MSA_DATA_LANE_2, v_height & 0xff, MSA_DATA_LANE_3, misc0); REG_SET_4(DP_SYM32_ENC_VID_MSA7, 0, MSA_DATA_LANE_0, hsp | (hw_crtc_timing.h_sync_width >> 8), MSA_DATA_LANE_1, vsp | (hw_crtc_timing.v_sync_width >> 8), MSA_DATA_LANE_2, 0, MSA_DATA_LANE_3, misc1); REG_SET_4(DP_SYM32_ENC_VID_MSA8, 0, MSA_DATA_LANE_0, hw_crtc_timing.h_sync_width & 0xff, MSA_DATA_LANE_1, hw_crtc_timing.v_sync_width & 0xff, MSA_DATA_LANE_2, 0, MSA_DATA_LANE_3, 0); } static void dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num( struct hpo_dp_stream_encoder *enc, struct encoder_info_frame *info_frame) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); if (info_frame->adaptive_sync.valid == true && info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, 1); REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, info_frame->sdp_line_num.adaptive_sync_line_num); } } static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( struct hpo_dp_stream_encoder *enc, const struct encoder_info_frame *info_frame) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); uint32_t dmdata_packet_enabled = 0; if (info_frame->vsc.valid) enc->vpg->funcs->update_generic_info_packet( enc->vpg, 0, /* packetIndex */ &info_frame->vsc, true); if (info_frame->spd.valid) enc->vpg->funcs->update_generic_info_packet( enc->vpg, 2, /* packetIndex */ &info_frame->spd, true); if (info_frame->hdrsmd.valid) enc->vpg->funcs->update_generic_info_packet( enc->vpg, 3, /* packetIndex */ &info_frame->hdrsmd, true); if (info_frame->adaptive_sync.valid) enc->vpg->funcs->update_generic_info_packet( enc->vpg, 5, /* packetIndex */ &info_frame->adaptive_sync, true); /* enable/disable transmission of packet(s). * If enabled, packet transmission begins on the next frame */ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid); REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid); REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->adaptive_sync.valid); /* check if dynamic metadata packet transmission is enabled */ REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, METADATA_PACKET_ENABLE, &dmdata_packet_enabled); /* Enable secondary data path */ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 1); } static void dcn31_hpo_dp_stream_enc_stop_dp_info_packets( struct hpo_dp_stream_encoder *enc) { /* stop generic packets on DP */ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); uint32_t asp_enable = 0; uint32_t atp_enable = 0; uint32_t aip_enable = 0; uint32_t acm_enable = 0; REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); /* Disable secondary data path if audio is also disabled */ REG_GET_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, &asp_enable, ATP_ENABLE, &atp_enable, AIP_ENABLE, &aip_enable, ACM_ENABLE, &acm_enable); if (!(asp_enable || atp_enable || aip_enable || acm_enable)) REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 0); } static uint32_t hpo_dp_is_gsp_enabled( struct hpo_dp_stream_encoder *enc) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); uint32_t gsp0_enabled = 0; uint32_t gsp2_enabled = 0; uint32_t gsp3_enabled = 0; uint32_t gsp11_enabled = 0; REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp0_enabled); REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp2_enabled); REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp3_enabled); REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp11_enabled); return (gsp0_enabled || gsp2_enabled || gsp3_enabled || gsp11_enabled); } static void dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet( struct hpo_dp_stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps, bool immediate_update) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); if (enable) { struct dc_info_packet pps_sdp; int i; /* Configure for PPS packet size (128 bytes) */ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_PAYLOAD_SIZE, 3); /* Load PPS into infoframe (SDP) registers */ pps_sdp.valid = true; pps_sdp.hb0 = 0; pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS; pps_sdp.hb2 = 127; pps_sdp.hb3 = 0; for (i = 0; i < 4; i++) { memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32); enc3->base.vpg->funcs->update_generic_info_packet( enc3->base.vpg, 11 + i, &pps_sdp, immediate_update); } /* SW should make sure VBID[6] update line number is bigger * than PPS transmit line number */ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_TRANSMISSION_LINE_NUMBER, 2); REG_UPDATE_2(DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, 0, VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, 3); /* Send PPS data at the line number specified above. */ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 1); REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 1); } else { /* Disable Generic Stream Packet 11 (GSP) transmission */ REG_UPDATE_2(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0, GSP_PAYLOAD_SIZE, 0); } } static void dcn31_hpo_dp_stream_enc_map_stream_to_link( struct hpo_dp_stream_encoder *enc, uint32_t stream_enc_inst, uint32_t link_enc_inst) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); ASSERT(stream_enc_inst < 4 && link_enc_inst < 2); switch (stream_enc_inst) { case 0: REG_UPDATE(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, link_enc_inst); break; case 1: REG_UPDATE(DP_STREAM_MAPPER_CONTROL1, DP_STREAM_LINK_TARGET, link_enc_inst); break; case 2: REG_UPDATE(DP_STREAM_MAPPER_CONTROL2, DP_STREAM_LINK_TARGET, link_enc_inst); break; case 3: REG_UPDATE(DP_STREAM_MAPPER_CONTROL3, DP_STREAM_LINK_TARGET, link_enc_inst); break; } } static void dcn31_hpo_dp_stream_enc_audio_setup( struct hpo_dp_stream_encoder *enc, unsigned int az_inst, struct audio_info *info) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); /* Set the input mux for video stream source */ REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst); ASSERT(enc->apg); enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info); } static void dcn31_hpo_dp_stream_enc_audio_enable( struct hpo_dp_stream_encoder *enc) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); /* Enable Audio packets */ REG_UPDATE(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 1); /* Program the ATP and AIP next */ REG_UPDATE_2(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ATP_ENABLE, 1, AIP_ENABLE, 1); /* Enable secondary data path */ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 1); /* Enable APG block */ enc->apg->funcs->enable_apg(enc->apg); } static void dcn31_hpo_dp_stream_enc_audio_disable( struct hpo_dp_stream_encoder *enc) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); /* Disable Audio packets */ REG_UPDATE_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 0, ATP_ENABLE, 0, AIP_ENABLE, 0, ACM_ENABLE, 0); /* Disable STP Stream Enable if other SDP GSP are also disabled */ if (!(hpo_dp_is_gsp_enabled(enc))) REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 0); /* Disable APG block */ enc->apg->funcs->disable_apg(enc->apg); } static void dcn31_hpo_dp_stream_enc_read_state( struct hpo_dp_stream_encoder *enc, struct hpo_dp_stream_encoder_state *s) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); REG_GET(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, &s->stream_enc_enabled); REG_GET(DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, &s->vid_stream_enabled); REG_GET(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, &s->otg_inst); REG_GET_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, &s->compressed_format, UNCOMPRESSED_PIXEL_ENCODING, &s->pixel_encoding, UNCOMPRESSED_COMPONENT_DEPTH, &s->component_depth); REG_GET(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, &s->sdp_enabled); switch (enc->inst) { case 0: REG_GET(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); break; case 1: REG_GET(DP_STREAM_MAPPER_CONTROL1, DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); break; case 2: REG_GET(DP_STREAM_MAPPER_CONTROL2, DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); break; case 3: REG_GET(DP_STREAM_MAPPER_CONTROL3, DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); break; } } static void dcn31_set_hblank_min_symbol_width( struct hpo_dp_stream_encoder *enc, uint16_t width) { struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); REG_SET(DP_SYM32_ENC_HBLANK_CONTROL, 0, HBLANK_MINIMUM_SYMBOL_WIDTH, width); } static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream, .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank, .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank, .disable = dcn31_hpo_dp_stream_enc_disable, .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute, .update_dp_info_packets_sdp_line_num = dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets, .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets, .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet, .map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link, .dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup, .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable, .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable, .read_state = dcn31_hpo_dp_stream_enc_read_state, .set_hblank_min_symbol_width = dcn31_set_hblank_min_symbol_width, }; void dcn31_hpo_dp_stream_encoder_construct( struct dcn31_hpo_dp_stream_encoder *enc3, struct dc_context *ctx, struct dc_bios *bp, uint32_t inst, enum engine_id eng_id, struct vpg *vpg, struct apg *apg, const struct dcn31_hpo_dp_stream_encoder_registers *regs, const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask) { enc3->base.funcs = &dcn30_str_enc_funcs; enc3->base.ctx = ctx; enc3->base.inst = inst; enc3->base.id = eng_id; enc3->base.bp = bp; enc3->base.vpg = vpg; enc3->base.apg = apg; enc3->regs = regs; enc3->hpo_se_shift = hpo_se_shift; enc3->hpo_se_mask = hpo_se_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn30/dcn30_hubbub.h" #include "dcn31_hubbub.h" #include "dm_services.h" #include "reg_helper.h" #define CTX \ hubbub2->base.ctx #define DC_LOGGER \ hubbub2->base.ctx->logger #define REG(reg)\ hubbub2->regs->reg #undef FN #define FN(reg_name, field_name) \ hubbub2->shifts->field_name, hubbub2->masks->field_name #ifdef NUM_VMID #undef NUM_VMID #endif #define NUM_VMID 16 #define DCN31_CRB_SEGMENT_SIZE_KB 64 static void dcn31_init_crb(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &hubbub2->det0_size); REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &hubbub2->det1_size); REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &hubbub2->det2_size); REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &hubbub2->det3_size); REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, &hubbub2->compbuf_size_segments); REG_SET_2(COMPBUF_RESERVED_SPACE, 0, COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x17F); } static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB; switch (hubp_inst) { case 0: REG_UPDATE(DCHUBBUB_DET0_CTRL, DET0_SIZE, det_size_segments); hubbub2->det0_size = det_size_segments; break; case 1: REG_UPDATE(DCHUBBUB_DET1_CTRL, DET1_SIZE, det_size_segments); hubbub2->det1_size = det_size_segments; break; case 2: REG_UPDATE(DCHUBBUB_DET2_CTRL, DET2_SIZE, det_size_segments); hubbub2->det2_size = det_size_segments; break; case 3: REG_UPDATE(DCHUBBUB_DET3_CTRL, DET3_SIZE, det_size_segments); hubbub2->det3_size = det_size_segments; break; default: break; } DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments); /* Should never be hit, if it is we have an erroneous hw config*/ ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); } static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); unsigned int compbuf_size_segments = (compbuf_size_kb + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB; if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { if (compbuf_size_segments > hubbub2->compbuf_size_segments) { REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); } /* Should never be hit, if it is we have an erroneous hw config*/ ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); hubbub2->compbuf_size_segments = compbuf_size_segments; ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); } } static uint32_t convert_and_clamp( uint32_t wm_ns, uint32_t refclk_mhz, uint32_t clamp_value) { uint32_t ret_val = 0; ret_val = wm_ns * refclk_mhz; ret_val /= 1000; if (ret_val > clamp_value) { /* clamping WMs is abnormal, unexpected and may lead to underflow*/ ASSERT(0); ret_val = clamp_value; } return ret_val; } static bool hubbub31_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; bool wm_pending = false; /* Repeat for water mark set A, B, C and D. */ /* clock state A */ if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.urgent_ns, prog_wm_value); } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->a.frac_urg_bw_flip > hubbub2->watermarks.a.frac_urg_bw_flip) { hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); } else if (watermarks->a.frac_urg_bw_flip < hubbub2->watermarks.a.frac_urg_bw_flip) wm_pending = true; if (safe_to_lower || watermarks->a.frac_urg_bw_nom > hubbub2->watermarks.a.frac_urg_bw_nom) { hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); } else if (watermarks->a.frac_urg_bw_nom < hubbub2->watermarks.a.frac_urg_bw_nom) wm_pending = true; if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.urgent_ns, prog_wm_value); } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->b.frac_urg_bw_flip > hubbub2->watermarks.b.frac_urg_bw_flip) { hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); } else if (watermarks->b.frac_urg_bw_flip < hubbub2->watermarks.b.frac_urg_bw_flip) wm_pending = true; if (safe_to_lower || watermarks->b.frac_urg_bw_nom > hubbub2->watermarks.b.frac_urg_bw_nom) { hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); } else if (watermarks->b.frac_urg_bw_nom < hubbub2->watermarks.b.frac_urg_bw_nom) wm_pending = true; if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.urgent_ns, prog_wm_value); } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->c.frac_urg_bw_flip > hubbub2->watermarks.c.frac_urg_bw_flip) { hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); } else if (watermarks->c.frac_urg_bw_flip < hubbub2->watermarks.c.frac_urg_bw_flip) wm_pending = true; if (safe_to_lower || watermarks->c.frac_urg_bw_nom > hubbub2->watermarks.c.frac_urg_bw_nom) { hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); } else if (watermarks->c.frac_urg_bw_nom < hubbub2->watermarks.c.frac_urg_bw_nom) wm_pending = true; if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.urgent_ns, prog_wm_value); } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->d.frac_urg_bw_flip > hubbub2->watermarks.d.frac_urg_bw_flip) { hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); } else if (watermarks->d.frac_urg_bw_flip < hubbub2->watermarks.d.frac_urg_bw_flip) wm_pending = true; if (safe_to_lower || watermarks->d.frac_urg_bw_nom > hubbub2->watermarks.d.frac_urg_bw_nom) { hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); } else if (watermarks->d.frac_urg_bw_nom < hubbub2->watermarks.d.frac_urg_bw_nom) wm_pending = true; if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) wm_pending = true; return wm_pending; } static bool hubbub31_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; bool wm_pending = false; /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = watermarks->a.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.cstate_exit_ns < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) { hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) wm_pending = true; if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns > hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) { hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns = watermarks->a.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_z8_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns < hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = watermarks->b.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.cstate_exit_ns < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) { hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) wm_pending = true; if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns > hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) { hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns = watermarks->b.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_z8_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns < hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = watermarks->c.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.cstate_exit_ns < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) { hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) wm_pending = true; if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns > hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) { hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns = watermarks->c.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_z8_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns < hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = watermarks->d.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.cstate_exit_ns < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) wm_pending = true; if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) { hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) wm_pending = true; if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns > hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) { hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns = watermarks->d.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_z8_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns < hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) wm_pending = true; return wm_pending; } static bool hubbub31_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; bool wm_pending = false; /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = watermarks->a.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); } else if (watermarks->a.cstate_pstate.pstate_change_ns < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = watermarks->b.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); } else if (watermarks->b.cstate_pstate.pstate_change_ns < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) wm_pending = false; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = watermarks->c.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); } else if (watermarks->c.cstate_pstate.pstate_change_ns < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = watermarks->d.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); } else if (watermarks->d.cstate_pstate.pstate_change_ns < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) wm_pending = true; return wm_pending; } static bool hubbub31_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { bool wm_pending = false; if (hubbub31_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; if (hubbub31_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; if (hubbub31_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; /* * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. * If the memory controller is fully utilized and the DCHub requestors are * well ahead of their amortized schedule, then it is safe to prevent the next winner * from being committed and sent to the fabric. * The utilization of the memory controller is approximated by ensuring that * the number of outstanding requests is greater than a threshold specified * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. * * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) * to turn off it for now. */ /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); return wm_pending; } static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, unsigned int bytes_per_element) { /* copied from DML. might want to refactor DML to leverage from DML */ /* DML : get_blk256_size */ if (bytes_per_element == 1) { *blk256_width = 16; *blk256_height = 16; } else if (bytes_per_element == 2) { *blk256_width = 16; *blk256_height = 8; } else if (bytes_per_element == 4) { *blk256_width = 8; *blk256_height = 8; } else if (bytes_per_element == 8) { *blk256_width = 8; *blk256_height = 4; } } static void hubbub31_det_request_size( unsigned int detile_buf_size, unsigned int height, unsigned int width, unsigned int bpe, bool *req128_horz_wc, bool *req128_vert_wc) { unsigned int blk256_height = 0; unsigned int blk256_width = 0; unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe); swath_bytes_horz_wc = width * blk256_height * bpe; swath_bytes_vert_wc = height * blk256_width * bpe; *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? false : /* full 256B request */ true; /* half 128b request */ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? false : /* full 256B request */ true; /* half 128b request */ } static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { struct dc *dc = hubbub->ctx->dc; enum dcc_control dcc_control; unsigned int bpe; enum segment_order segment_order_horz, segment_order_vert; bool req128_horz_wc, req128_vert_wc; memset(output, 0, sizeof(*output)); if (dc->debug.disable_dcc == DCC_DISABLE) return false; if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe)) return false; if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, &segment_order_horz, &segment_order_vert)) return false; hubbub31_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, input->surface_size.height, input->surface_size.width, bpe, &req128_horz_wc, &req128_vert_wc); if (!req128_horz_wc && !req128_vert_wc) { dcc_control = dcc_control__256_256_xxx; } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { if (!req128_horz_wc) dcc_control = dcc_control__256_256_xxx; else if (segment_order_horz == segment_order__contiguous) dcc_control = dcc_control__128_128_xxx; else dcc_control = dcc_control__256_64_64; } else if (input->scan == SCAN_DIRECTION_VERTICAL) { if (!req128_vert_wc) dcc_control = dcc_control__256_256_xxx; else if (segment_order_vert == segment_order__contiguous) dcc_control = dcc_control__128_128_xxx; else dcc_control = dcc_control__256_64_64; } else { if ((req128_horz_wc && segment_order_horz == segment_order__non_contiguous) || (req128_vert_wc && segment_order_vert == segment_order__non_contiguous)) /* access_dir not known, must use most constraining */ dcc_control = dcc_control__256_64_64; else /* reg128 is true for either horz and vert * but segment_order is contiguous */ dcc_control = dcc_control__128_128_xxx; } /* Exception for 64KB_R_X */ if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) dcc_control = dcc_control__128_128_xxx; if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && dcc_control != dcc_control__256_256_xxx) return false; switch (dcc_control) { case dcc_control__256_256_xxx: output->grph.rgb.max_uncompressed_blk_size = 256; output->grph.rgb.max_compressed_blk_size = 256; output->grph.rgb.independent_64b_blks = false; output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1; output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; break; case dcc_control__128_128_xxx: output->grph.rgb.max_uncompressed_blk_size = 128; output->grph.rgb.max_compressed_blk_size = 128; output->grph.rgb.independent_64b_blks = false; output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1; output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; break; case dcc_control__256_64_64: output->grph.rgb.max_uncompressed_blk_size = 256; output->grph.rgb.max_compressed_blk_size = 64; output->grph.rgb.independent_64b_blks = true; output->grph.rgb.dcc_controls.dcc_256_64_64 = 1; break; case dcc_control__256_128_128: output->grph.rgb.max_uncompressed_blk_size = 256; output->grph.rgb.max_compressed_blk_size = 128; output->grph.rgb.independent_64b_blks = false; output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; break; } output->capable = true; output->const_color_support = true; return true; } int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, AGP_BASE, pa_config->system_aperture.agp_base >> 24); if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; phys_config.depth = 0; phys_config.block_size = 0; // Init VMID 0 based on PA config dcn20_vmid_setup(&hubbub2->vmid[0], &phys_config); dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config); } dcn21_dchvm_init(hubbub); return NUM_VMID; } static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub, unsigned int dccg_ref_freq_inKhz, unsigned int *dchub_ref_freq_inKhz) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); uint32_t ref_div = 0; uint32_t ref_en = 0; unsigned int dc_refclk_khz = 24000; REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); if (ref_en) { if (ref_div == 2) *dchub_ref_freq_inKhz = dc_refclk_khz / 2; else *dchub_ref_freq_inKhz = dc_refclk_khz; /* * The external Reference Clock may change based on the board or * platform requirements and the programmable integer divide must * be programmed to provide a suitable DLG RefClk frequency between * a minimum of 20MHz and maximum of 50MHz */ if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000) ASSERT_CRITICAL(false); return; } else { *dchub_ref_freq_inKhz = dc_refclk_khz; // HUBBUB global timer must be enabled. ASSERT_CRITICAL(false); return; } } static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); /* * Pstate latency is ~20us so if we wait over 40us and pstate allow * still not asserted, we are probably stuck and going to hang */ const unsigned int pstate_wait_timeout_us = 100; const unsigned int pstate_wait_expected_timeout_us = 40; static unsigned int max_sampled_pstate_wait_us; /* data collection */ static bool forced_pstate_allow; /* help with revert wa */ unsigned int debug_data = 0; unsigned int i; if (forced_pstate_allow) { /* we hacked to force pstate allow to prevent hang last time * we verify_allow_pstate_change_high. so disable force * here so we can check status */ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); forced_pstate_allow = false; } REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate); for (i = 0; i < pstate_wait_timeout_us; i++) { debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); /* Debug bit is specific to ASIC. */ if (debug_data & (1 << 26)) { if (i > pstate_wait_expected_timeout_us) DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); return true; } if (max_sampled_pstate_wait_us < i) max_sampled_pstate_wait_us = i; udelay(1); } /* force pstate allow to prevent system hang * and break to debugger to investigate */ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); forced_pstate_allow = true; DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", debug_data); return false; } void hubbub31_init(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); /*Enable clock gate*/ if (hubbub->ctx->dc->debug.disable_clock_gate) { /*done in hwseq*/ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, 1, DCFCLK_R_DCHUBBUB_GATE_DIS, 1); } /* only the DCN will determine when to connect the SDP port */ REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1); } static const struct hubbub_funcs hubbub31_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub3_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub31_get_dcc_compression_cap, .wm_read_state = hubbub21_wm_read_state, .get_dchub_ref_freq = hubbub31_get_dchub_ref_freq, .program_watermarks = hubbub31_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, .program_det_size = dcn31_program_det_size, .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, .hubbub_read_state = hubbub2_read_state, }; void hubbub31_construct(struct dcn20_hubbub *hubbub31, struct dc_context *ctx, const struct dcn_hubbub_registers *hubbub_regs, const struct dcn_hubbub_shift *hubbub_shift, const struct dcn_hubbub_mask *hubbub_mask, int det_size_kb, int pixel_chunk_size_kb, int config_return_buffer_size_kb) { hubbub3_construct(hubbub31, ctx, hubbub_regs, hubbub_shift, hubbub_mask); hubbub31->base.funcs = &hubbub31_funcs; hubbub31->detile_buf_size = det_size_kb * 1024; hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024; hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB; hubbub31->debug_test_index_pstate = 0x6; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20/dcn20_hwseq.h" #include "dcn21/dcn21_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dcn301/dcn301_hwseq.h" #include "dcn31/dcn31_hwseq.h" #include "dcn31_init.h" static const struct hw_sequencer_funcs dcn31_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn31_init_hw, .power_down_on_boot = dcn10_power_down_on_boot, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, .program_output_csc = dcn20_program_output_csc, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, .update_info_frame = dcn31_update_info_frame, .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, .enable_stream = dcn20_enable_stream, .disable_stream = dce110_disable_stream, .unblank_stream = dcn20_unblank_stream, .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn30_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_T12 = dce110_edp_wait_for_T12, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, .program_triplebuffer = dcn20_program_triple_buffer, .enable_writeback = dcn30_enable_writeback, .disable_writeback = dcn30_disable_writeback, .update_writeback = dcn30_update_writeback, .mmhubbub_warmup = dcn30_mmhubbub_warmup, .dmdata_status_done = dcn20_dmdata_status_done, .program_dmdata_engine = dcn30_program_dmdata_engine, .set_dmdata_attributes = dcn20_set_dmdata_attributes, .init_sys_ctx = dcn31_init_sys_ctx, .init_vm_ctx = dcn20_init_vm_ctx, .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, .power_down = dce110_power_down, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn31_private_funcs = { .init_pipes = dcn10_init_pipes, .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn30_set_input_transfer_func, .set_output_transfer_func = dcn30_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn20_init_blank, .disable_vga = dcn20_disable_vga, .bios_golden_init = dcn10_bios_golden_init, .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn10_plane_atomic_power_down, .enable_power_gating_plane = dcn31_enable_power_gating_plane, .hubp_pg_control = dcn31_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn31_dsc_pg_control, .set_hdr_multiplier = dcn10_set_hdr_multiplier, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn30_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, }; void dcn31_hw_sequencer_construct(struct dc *dc) { dc->hwss = dcn31_funcs; dc->hwseq->funcs = dcn31_private_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "dcn31_hpo_dp_link_encoder.h" #include "reg_helper.h" #include "stream_encoder.h" #define DC_LOGGER \ enc3->base.ctx->logger #define REG(reg)\ (enc3->regs->reg) #undef FN #define FN(reg_name, field_name) \ enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name #define CTX \ enc3->base.ctx enum { DP_SAT_UPDATE_MAX_RETRY = 200 }; void dcn31_hpo_dp_link_enc_enable( struct hpo_dp_link_encoder *enc, enum dc_lane_count num_lanes) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); uint32_t dp_link_enabled; /* get current status of link enabled */ REG_GET(DP_DPHY_SYM32_STATUS, STATUS, &dp_link_enabled); /* Enable clocks first */ REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 1); /* Reset DPHY. Only reset if going from disable to enable */ if (!dp_link_enabled) { REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 1); REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 0); } /* Configure DPHY settings */ REG_UPDATE_3(DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, 1, PRECODER_ENABLE, 1, NUM_LANES, num_lanes == LANE_COUNT_ONE ? 0 : num_lanes == LANE_COUNT_TWO ? 1 : 3); } void dcn31_hpo_dp_link_enc_disable( struct hpo_dp_link_encoder *enc) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); /* Configure DPHY settings */ REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, 0); /* Shut down clock last */ REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 0); } void dcn31_hpo_dp_link_enc_set_link_test_pattern( struct hpo_dp_link_encoder *enc, struct encoder_set_dp_phy_pattern_param *tp_params) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); uint32_t tp_custom; switch (tp_params->dp_phy_pattern) { case DP_TEST_PATTERN_VIDEO_MODE: REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_LINK_ACTIVE); break; case DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE: REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_LINK_TRAINING_TPS1); break; case DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE: REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_LINK_TRAINING_TPS2); break; case DP_TEST_PATTERN_128b_132b_TPS1: REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_TPS1, TP_SELECT1, DP_DPHY_TP_SELECT_TPS1, TP_SELECT2, DP_DPHY_TP_SELECT_TPS1, TP_SELECT3, DP_DPHY_TP_SELECT_TPS1); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_128b_132b_TPS2: REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_TPS2, TP_SELECT1, DP_DPHY_TP_SELECT_TPS2, TP_SELECT2, DP_DPHY_TP_SELECT_TPS2, TP_SELECT3, DP_DPHY_TP_SELECT_TPS2); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_PRBS7: REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, DP_DPHY_TP_PRBS7, TP_PRBS_SEL1, DP_DPHY_TP_PRBS7, TP_PRBS_SEL2, DP_DPHY_TP_PRBS7, TP_PRBS_SEL3, DP_DPHY_TP_PRBS7); REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_PRBS9: REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, DP_DPHY_TP_PRBS9, TP_PRBS_SEL1, DP_DPHY_TP_PRBS9, TP_PRBS_SEL2, DP_DPHY_TP_PRBS9, TP_PRBS_SEL3, DP_DPHY_TP_PRBS9); REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_PRBS11: REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, DP_DPHY_TP_PRBS11, TP_PRBS_SEL1, DP_DPHY_TP_PRBS11, TP_PRBS_SEL2, DP_DPHY_TP_PRBS11, TP_PRBS_SEL3, DP_DPHY_TP_PRBS11); REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_PRBS15: REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, DP_DPHY_TP_PRBS15, TP_PRBS_SEL1, DP_DPHY_TP_PRBS15, TP_PRBS_SEL2, DP_DPHY_TP_PRBS15, TP_PRBS_SEL3, DP_DPHY_TP_PRBS15); REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_PRBS23: REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, DP_DPHY_TP_PRBS23, TP_PRBS_SEL1, DP_DPHY_TP_PRBS23, TP_PRBS_SEL2, DP_DPHY_TP_PRBS23, TP_PRBS_SEL3, DP_DPHY_TP_PRBS23); REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_PRBS31: REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, DP_DPHY_TP_PRBS31, TP_PRBS_SEL1, DP_DPHY_TP_PRBS31, TP_PRBS_SEL2, DP_DPHY_TP_PRBS31, TP_PRBS_SEL3, DP_DPHY_TP_PRBS31); REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_264BIT_CUSTOM: tp_custom = (tp_params->custom_pattern[2] << 16) | (tp_params->custom_pattern[1] << 8) | tp_params->custom_pattern[0]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM0, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[5] << 16) | (tp_params->custom_pattern[4] << 8) | tp_params->custom_pattern[3]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM1, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[8] << 16) | (tp_params->custom_pattern[7] << 8) | tp_params->custom_pattern[6]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM2, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[11] << 16) | (tp_params->custom_pattern[10] << 8) | tp_params->custom_pattern[9]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM3, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[14] << 16) | (tp_params->custom_pattern[13] << 8) | tp_params->custom_pattern[12]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM4, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[17] << 16) | (tp_params->custom_pattern[16] << 8) | tp_params->custom_pattern[15]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM5, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[20] << 16) | (tp_params->custom_pattern[19] << 8) | tp_params->custom_pattern[18]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM6, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[23] << 16) | (tp_params->custom_pattern[22] << 8) | tp_params->custom_pattern[21]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM7, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[26] << 16) | (tp_params->custom_pattern[25] << 8) | tp_params->custom_pattern[24]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM8, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[29] << 16) | (tp_params->custom_pattern[28] << 8) | tp_params->custom_pattern[27]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM9, 0, TP_CUSTOM, tp_custom); tp_custom = (tp_params->custom_pattern[32] << 16) | (tp_params->custom_pattern[31] << 8) | tp_params->custom_pattern[30]; REG_SET(DP_DPHY_SYM32_TP_CUSTOM10, 0, TP_CUSTOM, tp_custom); REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_CUSTOM, TP_SELECT1, DP_DPHY_TP_SELECT_CUSTOM, TP_SELECT2, DP_DPHY_TP_SELECT_CUSTOM, TP_SELECT3, DP_DPHY_TP_SELECT_CUSTOM); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; case DP_TEST_PATTERN_SQUARE: case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0, TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]); REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, DP_DPHY_TP_SELECT_SQUARE, TP_SELECT1, DP_DPHY_TP_SELECT_SQUARE, TP_SELECT2, DP_DPHY_TP_SELECT_SQUARE, TP_SELECT3, DP_DPHY_TP_SELECT_SQUARE); REG_UPDATE(DP_DPHY_SYM32_CONTROL, MODE, DP2_TEST_PATTERN); break; default: break; } } static void fill_stream_allocation_row_info( const struct link_mst_stream_allocation *stream_allocation, uint32_t *src, uint32_t *slots) { const struct hpo_dp_stream_encoder *stream_enc = stream_allocation->hpo_dp_stream_enc; if (stream_enc && (stream_enc->id >= ENGINE_ID_HPO_DP_0)) { *src = stream_enc->id - ENGINE_ID_HPO_DP_0; *slots = stream_allocation->slot_count; } else { *src = 0; *slots = 0; } } /* programs DP VC payload allocation */ void dcn31_hpo_dp_link_enc_update_stream_allocation_table( struct hpo_dp_link_encoder *enc, const struct link_mst_stream_allocation_table *table) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); uint32_t slots = 0; uint32_t src = 0; /* --- Set MSE Stream Attribute - * Setup VC Payload Table on Tx Side, * Issue allocation change trigger * to commit payload on both tx and rx side */ /* we should clean-up table each time */ if (table->stream_count >= 1) { fill_stream_allocation_row_info( &table->stream_allocations[0], &src, &slots); } else { src = 0; slots = 0; } REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, src, SAT_SLOT_COUNT, slots); if (table->stream_count >= 2) { fill_stream_allocation_row_info( &table->stream_allocations[1], &src, &slots); } else { src = 0; slots = 0; } REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC1, SAT_STREAM_SOURCE, src, SAT_SLOT_COUNT, slots); if (table->stream_count >= 3) { fill_stream_allocation_row_info( &table->stream_allocations[2], &src, &slots); } else { src = 0; slots = 0; } REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC2, SAT_STREAM_SOURCE, src, SAT_SLOT_COUNT, slots); if (table->stream_count >= 4) { fill_stream_allocation_row_info( &table->stream_allocations[3], &src, &slots); } else { src = 0; slots = 0; } REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC3, SAT_STREAM_SOURCE, src, SAT_SLOT_COUNT, slots); /* --- wait for transaction finish */ /* send allocation change trigger (ACT) * this step first sends the ACT, * then double buffers the SAT into the hardware * making the new allocation active on the DP MST mode link */ /* SAT_UPDATE: * 0 - No Action * 1 - Update SAT with trigger * 2 - Update SAT without trigger */ REG_UPDATE(DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, 1); /* wait for update to complete * (i.e. SAT_UPDATE_PENDING field is set to 0) * No need for HW to enforce keepout. */ /* Best case and worst case wait time for SAT_UPDATE_PENDING * best: 109 us * worst: 868 us */ REG_WAIT(DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, 0, 10, DP_SAT_UPDATE_MAX_RETRY); } void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( struct hpo_dp_link_encoder *enc, uint32_t stream_encoder_inst, struct fixed31_32 avg_time_slots_per_mtp) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); uint32_t x = dc_fixpt_floor( avg_time_slots_per_mtp); uint32_t y = dc_fixpt_ceil( dc_fixpt_shl( dc_fixpt_sub_int( avg_time_slots_per_mtp, x), 25)); switch (stream_encoder_inst) { case 0: REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0, STREAM_VC_RATE_X, x, STREAM_VC_RATE_Y, y); break; case 1: REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, 0, STREAM_VC_RATE_X, x, STREAM_VC_RATE_Y, y); break; case 2: REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, 0, STREAM_VC_RATE_X, x, STREAM_VC_RATE_Y, y); break; case 3: REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, 0, STREAM_VC_RATE_X, x, STREAM_VC_RATE_Y, y); break; default: ASSERT(0); } /* Best case and worst case wait time for RATE_UPDATE_PENDING * best: 116 ns * worst: 903 ns */ /* wait for update to be completed on the link */ REG_WAIT(DP_DPHY_SYM32_STATUS, RATE_UPDATE_PENDING, 0, 1, 10); } static bool dcn31_hpo_dp_link_enc_is_in_alt_mode( struct hpo_dp_link_encoder *enc) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); uint32_t dp_alt_mode_disable = 0; ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E)); /* if value == 1 alt mode is disabled, otherwise it is enabled */ REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); return (dp_alt_mode_disable == 0); } void dcn31_hpo_dp_link_enc_read_state( struct hpo_dp_link_encoder *enc, struct hpo_dp_link_enc_state *state) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); ASSERT(state); REG_GET(DP_DPHY_SYM32_STATUS, STATUS, &state->link_enc_enabled); REG_GET(DP_DPHY_SYM32_CONTROL, NUM_LANES, &state->lane_count); REG_GET(DP_DPHY_SYM32_CONTROL, MODE, (uint32_t *)&state->link_mode); REG_GET_2(DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, &state->stream_src[0], SAT_SLOT_COUNT, &state->slot_count[0]); REG_GET_2(DP_DPHY_SYM32_SAT_VC1, SAT_STREAM_SOURCE, &state->stream_src[1], SAT_SLOT_COUNT, &state->slot_count[1]); REG_GET_2(DP_DPHY_SYM32_SAT_VC2, SAT_STREAM_SOURCE, &state->stream_src[2], SAT_SLOT_COUNT, &state->slot_count[2]); REG_GET_2(DP_DPHY_SYM32_SAT_VC3, SAT_STREAM_SOURCE, &state->stream_src[3], SAT_SLOT_COUNT, &state->slot_count[3]); REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, &state->vc_rate_x[0], STREAM_VC_RATE_Y, &state->vc_rate_y[0]); REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, STREAM_VC_RATE_X, &state->vc_rate_x[1], STREAM_VC_RATE_Y, &state->vc_rate_y[1]); REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, STREAM_VC_RATE_X, &state->vc_rate_x[2], STREAM_VC_RATE_Y, &state->vc_rate_y[2]); REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, STREAM_VC_RATE_X, &state->vc_rate_x[3], STREAM_VC_RATE_Y, &state->vc_rate_y[3]); } static enum bp_result link_transmitter_control( struct dcn31_hpo_dp_link_encoder *enc3, struct bp_transmitter_control *cntl) { enum bp_result result; struct dc_bios *bp = enc3->base.ctx->dc_bios; result = bp->funcs->transmitter_control(bp, cntl); return result; } /* enables DP PHY output for 128b132b encoding */ void dcn31_hpo_dp_link_enc_enable_dp_output( struct hpo_dp_link_encoder *enc, const struct dc_link_settings *link_settings, enum transmitter transmitter, enum hpd_source_id hpd_source) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* Set the transmitter */ enc3->base.transmitter = transmitter; /* Set the hpd source */ enc3->base.hpd_source = hpd_source; /* Enable the PHY */ cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = ENGINE_ID_UNKNOWN; cntl.transmitter = enc3->base.transmitter; //cntl.pll_id = clock_source; cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; cntl.lanes_number = link_settings->lane_count; cntl.hpd_sel = enc3->base.hpd_source; cntl.pixel_clock = link_settings->link_rate * 1000; cntl.color_depth = COLOR_DEPTH_UNDEFINED; cntl.hpo_engine_id = enc->inst + ENGINE_ID_HPO_DP_0; result = link_transmitter_control(enc3, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); } } void dcn31_hpo_dp_link_enc_disable_output( struct hpo_dp_link_encoder *enc, enum signal_type signal) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* disable transmitter */ cntl.action = TRANSMITTER_CONTROL_DISABLE; cntl.transmitter = enc3->base.transmitter; cntl.hpd_sel = enc3->base.hpd_source; cntl.signal = signal; result = link_transmitter_control(enc3, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); return; } /* disable encoder */ dcn31_hpo_dp_link_enc_disable(enc); } void dcn31_hpo_dp_link_enc_set_ffe( struct hpo_dp_link_encoder *enc, const struct dc_link_settings *link_settings, uint8_t ffe_preset) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* disable transmitter */ cntl.transmitter = enc3->base.transmitter; cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; cntl.lanes_number = link_settings->lane_count; cntl.pixel_clock = link_settings->link_rate * 1000; cntl.lane_settings = ffe_preset; result = link_transmitter_control(enc3, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); return; } } static struct hpo_dp_link_encoder_funcs dcn31_hpo_dp_link_encoder_funcs = { .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output, .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output, .link_enable = dcn31_hpo_dp_link_enc_enable, .link_disable = dcn31_hpo_dp_link_enc_disable, .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern, .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table, .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size, .is_in_alt_mode = dcn31_hpo_dp_link_enc_is_in_alt_mode, .read_state = dcn31_hpo_dp_link_enc_read_state, .set_ffe = dcn31_hpo_dp_link_enc_set_ffe, }; void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, struct dc_context *ctx, uint32_t inst, const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask) { enc31->base.ctx = ctx; enc31->base.inst = inst; enc31->base.funcs = &dcn31_hpo_dp_link_encoder_funcs; enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN; enc31->base.transmitter = TRANSMITTER_UNKNOWN; enc31->regs = hpo_le_regs; enc31->hpo_le_shift = hpo_le_shift; enc31->hpo_le_mask = hpo_le_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "dc_dmub_srv.h" #include "dcn31_panel_cntl.h" #include "atom.h" #define TO_DCN31_PANEL_CNTL(panel_cntl)\ container_of(panel_cntl, struct dcn31_panel_cntl, base) #define CTX \ dcn31_panel_cntl->base.ctx #define DC_LOGGER \ dcn31_panel_cntl->base.ctx->logger static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub_rb_cmd *cmd) { struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl); struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv; if (!dc_dmub_srv) return false; memset(cmd, 0, sizeof(*cmd)); cmd->panel_cntl.header.type = DMUB_CMD__PANEL_CNTL; cmd->panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO; cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data); cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst; return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); } static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; if (!dcn31_query_backlight_info(panel_cntl, &cmd)) return 0; return cmd.panel_cntl.data.current_backlight; } static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) { struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl); struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv; union dmub_rb_cmd cmd; if (!dc_dmub_srv) return 0; memset(&cmd, 0, sizeof(cmd)); cmd.panel_cntl.header.type = DMUB_CMD__PANEL_CNTL; cmd.panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_HW_INIT; cmd.panel_cntl.header.payload_bytes = sizeof(cmd.panel_cntl.data); cmd.panel_cntl.data.inst = dcn31_panel_cntl->base.inst; cmd.panel_cntl.data.bl_pwm_cntl = panel_cntl->stored_backlight_registers.BL_PWM_CNTL; cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL; cmd.panel_cntl.data.bl_pwm_ref_div1 = panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; cmd.panel_cntl.data.bl_pwm_ref_div2 = panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2; if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return 0; panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl; panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 = 0; /* unused */ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = cmd.panel_cntl.data.bl_pwm_period_cntl; panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = cmd.panel_cntl.data.bl_pwm_ref_div1; panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2 = cmd.panel_cntl.data.bl_pwm_ref_div2; return cmd.panel_cntl.data.current_backlight; } static void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl) { struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(*panel_cntl); kfree(dcn31_panel_cntl); *panel_cntl = NULL; } static bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; if (!dcn31_query_backlight_info(panel_cntl, &cmd)) return false; return cmd.panel_cntl.data.is_backlight_on; } static bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; if (!dcn31_query_backlight_info(panel_cntl, &cmd)) return false; return cmd.panel_cntl.data.is_powered_on; } static void dcn31_store_backlight_level(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; if (!dcn31_query_backlight_info(panel_cntl, &cmd)) return; panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl; panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 = 0; /* unused */ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = cmd.panel_cntl.data.bl_pwm_period_cntl; panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = cmd.panel_cntl.data.bl_pwm_ref_div1; } static const struct panel_cntl_funcs dcn31_link_panel_cntl_funcs = { .destroy = dcn31_panel_cntl_destroy, .hw_init = dcn31_panel_cntl_hw_init, .is_panel_backlight_on = dcn31_is_panel_backlight_on, .is_panel_powered_on = dcn31_is_panel_powered_on, .store_backlight_level = dcn31_store_backlight_level, .get_current_backlight = dcn31_get_16_bit_backlight_from_pwm, }; void dcn31_panel_cntl_construct( struct dcn31_panel_cntl *dcn31_panel_cntl, const struct panel_cntl_init_data *init_data) { dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs; dcn31_panel_cntl->base.ctx = init_data->ctx; dcn31_panel_cntl->base.inst = init_data->inst; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "hw_shared.h" #include "dcn31_apg.h" #include "reg_helper.h" #define DC_LOGGER \ apg31->base.ctx->logger #define REG(reg)\ (apg31->regs->reg) #undef FN #define FN(reg_name, field_name) \ apg31->apg_shift->field_name, apg31->apg_mask->field_name #define CTX \ apg31->base.ctx static void apg31_enable( struct apg *apg) { struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); /* Reset APG */ REG_UPDATE(APG_CONTROL, APG_RESET, 1); REG_WAIT(APG_CONTROL, APG_RESET_DONE, 1, 1, 10); REG_UPDATE(APG_CONTROL, APG_RESET, 0); REG_WAIT(APG_CONTROL, APG_RESET_DONE, 0, 1, 10); /* Enable APG */ REG_UPDATE(APG_CONTROL2, APG_ENABLE, 1); } static void apg31_disable( struct apg *apg) { struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); /* Disable APG */ REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0); } static void apg31_se_audio_setup( struct apg *apg, unsigned int az_inst, struct audio_info *audio_info) { struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); ASSERT(audio_info); /* This should not happen.it does so we don't get BSOD*/ if (audio_info == NULL) return; /* DisplayPort only allows for one audio stream with stream ID 0 */ REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0); /* When running in "pair mode", pairs of audio channels have their own enable * this is for really old audio drivers */ REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF); /* Disable forced mem power off */ REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0); } static struct apg_funcs dcn31_apg_funcs = { .se_audio_setup = apg31_se_audio_setup, .enable_apg = apg31_enable, .disable_apg = apg31_disable, }; void apg31_construct(struct dcn31_apg *apg31, struct dc_context *ctx, uint32_t inst, const struct dcn31_apg_registers *apg_regs, const struct dcn31_apg_shift *apg_shift, const struct dcn31_apg_mask *apg_mask) { apg31->base.ctx = ctx; apg31->base.inst = inst; apg31->base.funcs = &dcn31_apg_funcs; apg31->regs = apg_regs; apg31->apg_shift = apg_shift; apg31->apg_mask = apg_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "dcn30/dcn30_vpg.h" #include "dcn31_vpg.h" #include "reg_helper.h" #include "dc/dc.h" #define DC_LOGGER \ vpg31->base.ctx->logger #define REG(reg)\ (vpg31->regs->reg) #undef FN #define FN(reg_name, field_name) \ vpg31->vpg_shift->field_name, vpg31->vpg_mask->field_name #define CTX \ vpg31->base.ctx static struct vpg_funcs dcn31_vpg_funcs = { .update_generic_info_packet = vpg3_update_generic_info_packet, .vpg_poweron = vpg31_poweron, .vpg_powerdown = vpg31_powerdown, }; void vpg31_powerdown(struct vpg *vpg) { struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) return; REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 0, VPG_GSP_LIGHT_SLEEP_FORCE, 1); } void vpg31_poweron(struct vpg *vpg) { struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) return; REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0); } void vpg31_construct(struct dcn31_vpg *vpg31, struct dc_context *ctx, uint32_t inst, const struct dcn31_vpg_registers *vpg_regs, const struct dcn31_vpg_shift *vpg_shift, const struct dcn31_vpg_mask *vpg_mask) { vpg31->base.ctx = ctx; vpg31->base.inst = inst; vpg31->base.funcs = &dcn31_vpg_funcs; vpg31->regs = vpg_regs; vpg31->vpg_shift = vpg_shift; vpg31->vpg_mask = vpg_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dcn31_optc.h" #include "dcn30/dcn30_optc.h" #include "reg_helper.h" #include "dc.h" #include "dcn_calc_math.h" #define REG(reg)\ optc1->tg_regs->reg #define CTX \ optc1->base.ctx #undef FN #define FN(reg_name, field_name) \ optc1->tg_shift->field_name, optc1->tg_mask->field_name static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) / opp_cnt; uint32_t memory_mask = 0; int mem_count_per_opp = (mpcc_hactive + 2559) / 2560; /* Assume less than 6 pipes */ if (opp_cnt == 4) { if (mem_count_per_opp == 1) memory_mask = 0xf; else { ASSERT(mem_count_per_opp == 2); memory_mask = 0xff; } } else if (mem_count_per_opp == 1) memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); else if (mem_count_per_opp == 2) memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); else if (mem_count_per_opp == 3) memory_mask = 0x77; else if (mem_count_per_opp == 4) memory_mask = 0xff; if (REG(OPTC_MEMORY_CONFIG)) REG_SET(OPTC_MEMORY_CONFIG, 0, OPTC_MEM_SEL, memory_mask); if (opp_cnt == 2) { REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 1, OPTC_SEG0_SRC_SEL, opp_id[0], OPTC_SEG1_SRC_SEL, opp_id[1]); } else if (opp_cnt == 4) { REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 3, OPTC_SEG0_SRC_SEL, opp_id[0], OPTC_SEG1_SRC_SEL, opp_id[1], OPTC_SEG2_SRC_SEL, opp_id[2], OPTC_SEG3_SRC_SEL, opp_id[3]); } REG_UPDATE(OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mpcc_hactive); REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); optc1->opp_count = opp_cnt; } /* * Enable CRTC - call ASIC Control Object to enable Timing generator. */ static bool optc31_enable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ REG_UPDATE(OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, optc->inst); /* VTG enable first is for HW workaround */ REG_UPDATE(CONTROL, VTG0_ENABLE, 1); REG_SEQ_START(); /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 2, OTG_MASTER_EN, 1); REG_SEQ_SUBMIT(); REG_SEQ_WAIT_DONE(); return true; } /* disable_crtc - call ASIC Control Object to disable Timing generator. */ static bool optc31_disable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); /* disable otg request until end of the first line * in the vertical blank region */ REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); REG_UPDATE(CONTROL, VTG0_ENABLE, 0); /* CRTC disabled, so disable clock. */ REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); optc1_clear_optc_underflow(optc); return true; } bool optc31_immediate_disable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0); REG_UPDATE(CONTROL, VTG0_ENABLE, 0); /* CRTC disabled, so disable clock. */ REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); /* clear the false state */ optc1_clear_optc_underflow(optc); return true; } void optc31_set_drr( struct timing_generator *optc, const struct drr_params *params) { struct optc *optc1 = DCN10TG_FROM_TG(optc); if (params != NULL && params->vertical_total_max > 0 && params->vertical_total_min > 0) { if (params->vertical_total_mid != 0) { REG_SET(OTG_V_TOTAL_MID, 0, OTG_V_TOTAL_MID, params->vertical_total_mid - 1); REG_UPDATE_2(OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, OTG_VTOTAL_MID_FRAME_NUM, (uint8_t)params->vertical_total_mid_frame_num); } optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); /* * MIN_MASK_EN is gone and MASK is now always enabled. * * To get it to it work with manual trigger we need to make sure * we program the correct bit. */ REG_UPDATE_4(OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, 1, OTG_V_TOTAL_MAX_SEL, 1, OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ // Setup manual flow control for EOF via TRIG_A optc->funcs->setup_manual_trigger(optc); } else { REG_UPDATE_4(OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, 0, OTG_V_TOTAL_MIN_SEL, 0, OTG_V_TOTAL_MAX_SEL, 0, OTG_FORCE_LOCK_ON_EVENT, 0); optc->funcs->set_vtotal_min_max(optc, 0, 0); } } void optc3_init_odm(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 0, OPTC_SEG0_SRC_SEL, optc->inst, OPTC_SEG1_SRC_SEL, 0xf, OPTC_SEG2_SRC_SEL, 0xf, OPTC_SEG3_SRC_SEL, 0xf ); REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, 0); REG_SET(OPTC_MEMORY_CONFIG, 0, OPTC_MEM_SEL, 0); optc1->opp_count = 1; } static struct timing_generator_funcs dcn31_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, .program_global_sync = optc1_program_global_sync, .enable_crtc = optc31_enable_crtc, .disable_crtc = optc31_disable_crtc, .immediate_disable_crtc = optc31_immediate_disable_crtc, /* used by enable_timing_synchronization. Not need for FPGA */ .is_counter_moving = optc1_is_counter_moving, .get_position = optc1_get_position, .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, .set_blank_color = optc3_program_blank_color, .did_triggered_reset_occur = optc1_did_triggered_reset_occur, .triplebuffer_lock = optc3_triplebuffer_lock, .triplebuffer_unlock = optc2_triplebuffer_unlock, .enable_reset_trigger = optc1_enable_reset_trigger, .enable_crtc_reset = optc1_enable_crtc_reset, .disable_reset_trigger = optc1_disable_reset_trigger, .lock = optc3_lock, .unlock = optc1_unlock, .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, .enable_optc_clock = optc1_enable_optc_clock, .set_drr = optc31_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc1_set_vtotal_min_max, .set_static_screen_control = optc1_set_static_screen_control, .program_stereo = optc1_program_stereo, .is_stereo_left_eye = optc1_is_stereo_left_eye, .tg_init = optc3_tg_init, .is_tg_enabled = optc1_is_tg_enabled, .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, .clear_optc_underflow = optc1_clear_optc_underflow, .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc31_set_odm_combine, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, .set_drr_trigger_window = optc3_set_drr_trigger_window, .set_vtotal_change_limit = optc3_set_vtotal_change_limit, .set_gsl = optc2_set_gsl, .set_gsl_source_select = optc2_set_gsl_source_select, .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .init_odm = optc3_init_odm, }; void dcn31_timing_generator_init(struct optc *optc1) { optc1->base.funcs = &dcn31_tg_funcs; optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; optc1->min_h_blank = 32; optc1->min_v_blank = 3; optc1->min_v_blank_interlace = 5; optc1->min_h_sync_width = 4; optc1->min_v_sync_width = 1; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "hw_shared.h" #include "dcn30/dcn30_afmt.h" #include "dcn31_afmt.h" #include "reg_helper.h" #include "dc/dc.h" #define DC_LOGGER \ afmt31->base.ctx->logger #define REG(reg)\ (afmt31->regs->reg) #undef FN #define FN(reg_name, field_name) \ afmt31->afmt_shift->field_name, afmt31->afmt_mask->field_name #define CTX \ afmt31->base.ctx static struct afmt_funcs dcn31_afmt_funcs = { .setup_hdmi_audio = afmt3_setup_hdmi_audio, .se_audio_setup = afmt3_se_audio_setup, .audio_mute_control = afmt3_audio_mute_control, .audio_info_immediate_update = afmt3_audio_info_immediate_update, .setup_dp_audio = afmt3_setup_dp_audio, .afmt_powerdown = afmt31_powerdown, .afmt_poweron = afmt31_poweron }; void afmt31_powerdown(struct afmt *afmt) { struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt); if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false) return; REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 0, AFMT_MEM_PWR_FORCE, 1); } void afmt31_poweron(struct afmt *afmt) { struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt); if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false) return; REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 1, AFMT_MEM_PWR_FORCE, 0); } void afmt31_construct(struct dcn31_afmt *afmt31, struct dc_context *ctx, uint32_t inst, const struct dcn31_afmt_registers *afmt_regs, const struct dcn31_afmt_shift *afmt_shift, const struct dcn31_afmt_mask *afmt_mask) { afmt31->base.ctx = ctx; afmt31->base.inst = inst; afmt31->base.funcs = &dcn31_afmt_funcs; afmt31->regs = afmt_regs; afmt31->afmt_shift = afmt_shift; afmt31->afmt_mask = afmt_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "dcn31_dccg.h" #include "dal_asic_id.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) #define REG(reg) \ (dccg_dcn->regs->reg) #undef FN #define FN(reg_name, field_name) \ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name #define CTX \ dccg_dcn->base.ctx #define DC_LOGGER \ dccg->ctx->logger void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (dccg->dpp_clock_gated[dpp_inst]) { /* * Do not update the DPPCLK DTO if the clock is stopped. * It is treated the same as if the pipe itself were in PG. */ return; } if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; int modulo, phase; // phase / modulo = dpp pipe clk / dpp global clk modulo = 0xff; // use FF at the end phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk; if (phase > 0xff) { ASSERT(false); phase = 0xff; } REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, phase, DPPCLK0_DTO_MODULO, modulo); REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1); } else { REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0); } dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } static enum phyd32clk_clock_source get_phy_mux_symclk( struct dcn_dccg *dccg_dcn, enum phyd32clk_clock_source src) { if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (src == PHYD32CLKC) src = PHYD32CLKF; if (src == PHYD32CLKD) src = PHYD32CLKG; } return src; } static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); /* enabled to select one of the DTBCLKs for pipe */ switch (otg_inst) { case 0: REG_UPDATE(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE0_EN, 1); break; case 1: REG_UPDATE(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE1_EN, 1); break; case 2: REG_UPDATE(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE2_EN, 1); break; case 3: REG_UPDATE(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE3_EN, 1); break; default: BREAK_TO_DEBUGGER(); return; } if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, 1, DPSTREAMCLK_ROOT_GATE_DISABLE, 1); } static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, 0, DPSTREAMCLK_GATE_DISABLE, 0); switch (otg_inst) { case 0: REG_UPDATE(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE0_EN, 0); break; case 1: REG_UPDATE(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE1_EN, 0); break; case 2: REG_UPDATE(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE2_EN, 0); break; case 3: REG_UPDATE(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE3_EN, 0); break; default: BREAK_TO_DEBUGGER(); return; } } void dccg31_set_dpstreamclk( struct dccg *dccg, enum streamclk_source src, int otg_inst, int dp_hpo_inst) { if (src == REFCLK) dccg31_disable_dpstreamclk(dccg, otg_inst); else dccg31_enable_dpstreamclk(dccg, otg_inst); } void dccg31_enable_symclk32_se( struct dccg *dccg, int hpo_se_inst, enum phyd32clk_clock_source phyd32clk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk); /* select one of the PHYD32CLKs as the source for symclk32_se */ switch (hpo_se_inst) { case 0: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, 1, SYMCLK32_ROOT_SE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, phyd32clk, SYMCLK32_SE0_EN, 1); break; case 1: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, 1, SYMCLK32_ROOT_SE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, phyd32clk, SYMCLK32_SE1_EN, 1); break; case 2: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, 1, SYMCLK32_ROOT_SE2_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, phyd32clk, SYMCLK32_SE2_EN, 1); break; case 3: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, 1, SYMCLK32_ROOT_SE3_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, phyd32clk, SYMCLK32_SE3_EN, 1); break; default: BREAK_TO_DEBUGGER(); return; } } void dccg31_disable_symclk32_se( struct dccg *dccg, int hpo_se_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); /* set refclk as the source for symclk32_se */ switch (hpo_se_inst) { case 0: REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, 0, SYMCLK32_SE0_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, 0, SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, 0, SYMCLK32_SE1_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, 0, SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); break; case 2: REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, 0, SYMCLK32_SE2_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, 0, SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); break; case 3: REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, 0, SYMCLK32_SE3_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, 0, SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); break; default: BREAK_TO_DEBUGGER(); return; } } void dccg31_enable_symclk32_le( struct dccg *dccg, int hpo_le_inst, enum phyd32clk_clock_source phyd32clk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk); /* select one of the PHYD32CLKs as the source for symclk32_le */ switch (hpo_le_inst) { case 0: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, phyd32clk, SYMCLK32_LE0_EN, 1); break; case 1: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, phyd32clk, SYMCLK32_LE1_EN, 1); break; default: BREAK_TO_DEBUGGER(); return; } } void dccg31_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); /* set refclk as the source for symclk32_le */ switch (hpo_le_inst) { case 0: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, 0, SYMCLK32_LE0_EN, 0); break; case 1: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, 0, SYMCLK32_LE1_EN, 0); break; default: BREAK_TO_DEBUGGER(); return; } } void dccg31_set_symclk32_le_root_clock_gating( struct dccg *dccg, int hpo_le_inst, bool enable) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) return; switch (hpo_le_inst) { case 0: REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0, SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0); break; case 1: REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0, SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0); break; default: BREAK_TO_DEBUGGER(); return; } } void dccg31_disable_dscclk(struct dccg *dccg, int inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) return; //DTO must be enabled to generate a 0 Hz clock output switch (inst) { case 0: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, 1); REG_UPDATE_2(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 1); break; case 1: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, 1); REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 1); break; case 2: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, 1); REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 1); break; case 3: if (REG(DSCCLK3_DTO_PARAM)) { REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_DTO_ENABLE, 1); REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 1); } break; default: BREAK_TO_DEBUGGER(); return; } } void dccg31_enable_dscclk(struct dccg *dccg, int inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) return; //Disable DTO switch (inst) { case 0: REG_UPDATE_2(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, 0); break; case 1: REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, 0); break; case 2: REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, 0); break; case 3: if (REG(DSCCLK3_DTO_PARAM)) { REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_DTO_ENABLE, 0); REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 0); } break; default: BREAK_TO_DEBUGGER(); return; } } void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, enum physymclk_clock_source clk_src, bool force_enable) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); /* Force PHYSYMCLK on and Select phyd32clk as the source of clock which is output to PHY through DCIO */ switch (phy_inst) { case 0: if (force_enable) { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, 1, PHYASYMCLK_FORCE_SRC_SEL, clk_src); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, 0, PHYASYMCLK_FORCE_SRC_SEL, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, 0); } break; case 1: if (force_enable) { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, 1, PHYBSYMCLK_FORCE_SRC_SEL, clk_src); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, 0, PHYBSYMCLK_FORCE_SRC_SEL, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, 0); } break; case 2: if (force_enable) { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, 1, PHYCSYMCLK_FORCE_SRC_SEL, clk_src); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, 0, PHYCSYMCLK_FORCE_SRC_SEL, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, 0); } break; case 3: if (force_enable) { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, 1, PHYDSYMCLK_FORCE_SRC_SEL, clk_src); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, 0, PHYDSYMCLK_FORCE_SRC_SEL, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, 0); } break; case 4: if (force_enable) { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, 1, PHYESYMCLK_FORCE_SRC_SEL, clk_src); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, 0, PHYESYMCLK_FORCE_SRC_SEL, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, 0); } break; default: BREAK_TO_DEBUGGER(); return; } } /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ void dccg31_set_dtbclk_dto( struct dccg *dccg, const struct dtbclk_dto_params *params) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); int req_dtbclk_khz = params->pixclk_khz; uint32_t dtbdto_div; /* Mode DTBDTO Rate DTBCLK_DTO<x>_DIV Register * ODM 4:1 combine pixel rate/4 2 * ODM 2:1 combine pixel rate/2 4 * non-DSC 4:2:0 mode pixel rate/2 4 * DSC native 4:2:0 pixel rate/2 4 * DSC native 4:2:2 pixel rate/2 4 * Other modes pixel rate 8 */ if (params->num_odm_segments == 4) { dtbdto_div = 2; req_dtbclk_khz = params->pixclk_khz / 4; } else if ((params->num_odm_segments == 2) || (params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || (params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !params->timing->dsc_cfg.ycbcr422_simple)) { dtbdto_div = 4; req_dtbclk_khz = params->pixclk_khz / 2; } else dtbdto_div = 8; if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; // phase / modulo = dtbclk / dtbclk ref modulo = params->ref_dtbclk_khz * 1000; phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1), params->ref_dtbclk_khz); REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo); REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase); REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_ENABLE[params->otg_inst], 1); REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1, 1, 100); /* The recommended programming sequence to enable DTBCLK DTO to generate * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should * be set only after DTO is enabled */ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], PIPE_DTO_SRC_SEL[params->otg_inst], 1); } else { REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_ENABLE[params->otg_inst], 0, PIPE_DTO_SRC_SEL[params->otg_inst], 0, DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); } } void dccg31_set_audio_dtbclk_dto( struct dccg *dccg, const struct dtbclk_dto_params *params) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (params->ref_dtbclk_khz && params->req_audio_dtbclk_khz) { uint32_t modulo, phase; // phase / modulo = dtbclk / dtbclk ref modulo = params->ref_dtbclk_khz * 1000; phase = div_u64((((unsigned long long)modulo * params->req_audio_dtbclk_khz) + params->ref_dtbclk_khz - 1), params->ref_dtbclk_khz); REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, modulo); REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, phase); //REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, // DCCG_AUDIO_DTBCLK_DTO_USE_512FBR_DTO, 1); REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 4); // 04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK } else { REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0); REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0); REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 3); // 03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO } } void dccg31_get_dccg_ref_freq(struct dccg *dccg, unsigned int xtalin_freq_inKhz, unsigned int *dccg_ref_freq_inKhz) { /* * Assume refclk is sourced from xtalin * expect 24MHz */ *dccg_ref_freq_inKhz = xtalin_freq_inKhz; return; } void dccg31_set_dispclk_change_mode( struct dccg *dccg, enum dentist_dispclk_change_mode change_mode) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, change_mode == DISPCLK_CHANGE_MODE_RAMPING ? 2 : 0); } void dccg31_init(struct dccg *dccg) { /* Set HPO stream encoder to use refclk to avoid case where PHY is * disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which * will cause DCN to hang. */ dccg31_disable_symclk32_se(dccg, 0); dccg31_disable_symclk32_se(dccg, 1); dccg31_disable_symclk32_se(dccg, 2); dccg31_disable_symclk32_se(dccg, 3); dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false); dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false); if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { dccg31_disable_dpstreamclk(dccg, 0); dccg31_disable_dpstreamclk(dccg, 1); dccg31_disable_dpstreamclk(dccg, 2); dccg31_disable_dpstreamclk(dccg, 3); } if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) { dccg31_set_physymclk(dccg, 0, PHYSYMCLK_FORCE_SRC_SYMCLK, false); dccg31_set_physymclk(dccg, 1, PHYSYMCLK_FORCE_SRC_SYMCLK, false); dccg31_set_physymclk(dccg, 2, PHYSYMCLK_FORCE_SRC_SYMCLK, false); dccg31_set_physymclk(dccg, 3, PHYSYMCLK_FORCE_SRC_SYMCLK, false); dccg31_set_physymclk(dccg, 4, PHYSYMCLK_FORCE_SRC_SYMCLK, false); } } void dccg31_otg_add_pixel(struct dccg *dccg, uint32_t otg_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst], OTG_ADD_PIXEL[otg_inst], 1); } void dccg31_otg_drop_pixel(struct dccg *dccg, uint32_t otg_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst], OTG_DROP_PIXEL[otg_inst], 1); } static const struct dccg_funcs dccg31_funcs = { .update_dpp_dto = dccg31_update_dpp_dto, .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, .dccg_init = dccg31_init, .set_dpstreamclk = dccg31_set_dpstreamclk, .enable_symclk32_se = dccg31_enable_symclk32_se, .disable_symclk32_se = dccg31_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, .set_physymclk = dccg31_set_physymclk, .set_dtbclk_dto = dccg31_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .otg_add_pixel = dccg31_otg_add_pixel, .otg_drop_pixel = dccg31_otg_drop_pixel, .set_dispclk_change_mode = dccg31_set_dispclk_change_mode, .disable_dsc = dccg31_disable_dscclk, .enable_dsc = dccg31_enable_dscclk, }; struct dccg *dccg31_create( struct dc_context *ctx, const struct dccg_registers *regs, const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask) { struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); struct dccg *base; if (dccg_dcn == NULL) { BREAK_TO_DEBUGGER(); return NULL; } base = &dccg_dcn->base; base->ctx = ctx; base->funcs = &dccg31_funcs; dccg_dcn->regs = regs; dccg_dcn->dccg_shift = dccg_shift; dccg_dcn->dccg_mask = dccg_mask; return &dccg_dcn->base; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD */ #include "dm_services.h" #include "dc.h" #include "core_status.h" #include "core_types.h" #include "hw_sequencer.h" #include "dce/dce_hwseq.h" #include "resource.h" #include "gpio_service_interface.h" #include "clk_mgr.h" #include "clock_source.h" #include "dc_bios_types.h" #include "bios_parser_interface.h" #include "bios/bios_parser_helper.h" #include "include/irq_service_interface.h" #include "transform.h" #include "dmcu.h" #include "dpp.h" #include "timing_generator.h" #include "abm.h" #include "virtual/virtual_link_encoder.h" #include "hubp.h" #include "link_hwss.h" #include "link_encoder.h" #include "link_enc_cfg.h" #include "link.h" #include "dm_helpers.h" #include "mem_input.h" #include "dc_dmub_srv.h" #include "dsc.h" #include "vm_helper.h" #include "dce/dce_i2c.h" #include "dmub/dmub_srv.h" #include "dce/dmub_psr.h" #include "dce/dmub_hw_lock_mgr.h" #include "dc_trace.h" #include "hw_sequencer_private.h" #include "dce/dmub_outbox.h" #define CTX \ dc->ctx #define DC_LOGGER \ dc->ctx->logger static const char DC_BUILD_ID[] = "production-build"; /** * DOC: Overview * * DC is the OS-agnostic component of the amdgpu DC driver. * * DC maintains and validates a set of structs representing the state of the * driver and writes that state to AMD hardware * * Main DC HW structs: * * struct dc - The central struct. One per driver. Created on driver load, * destroyed on driver unload. * * struct dc_context - One per driver. * Used as a backpointer by most other structs in dc. * * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP * plugpoints). Created on driver load, destroyed on driver unload. * * struct dc_sink - One per display. Created on boot or hotplug. * Destroyed on shutdown or hotunplug. A dc_link can have a local sink * (the display directly attached). It may also have one or more remote * sinks (in the Multi-Stream Transport case) * * struct resource_pool - One per driver. Represents the hw blocks not in the * main pipeline. Not directly accessible by dm. * * Main dc state structs: * * These structs can be created and destroyed as needed. There is a full set of * these structs in dc->current_state representing the currently programmed state. * * struct dc_state - The global DC state to track global state information, * such as bandwidth values. * * struct dc_stream_state - Represents the hw configuration for the pipeline from * a framebuffer to a display. Maps one-to-one with dc_sink. * * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, * and may have more in the Multi-Plane Overlay case. * * struct resource_context - Represents the programmable state of everything in * the resource_pool. Not directly accessible by dm. * * struct pipe_ctx - A member of struct resource_context. Represents the * internal hardware pipeline components. Each dc_plane_state has either * one or two (in the pipe-split case). */ /* Private functions */ static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) { if (new > *original) *original = new; } static void destroy_links(struct dc *dc) { uint32_t i; for (i = 0; i < dc->link_count; i++) { if (NULL != dc->links[i]) dc->link_srv->destroy_link(&dc->links[i]); } } static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) { int i; uint32_t count = 0; for (i = 0; i < num_links; i++) { if (links[i]->connector_signal == SIGNAL_TYPE_EDP || links[i]->is_internal_display) count++; } return count; } static int get_seamless_boot_stream_count(struct dc_state *ctx) { uint8_t i; uint8_t seamless_boot_stream_count = 0; for (i = 0; i < ctx->stream_count; i++) if (ctx->streams[i]->apply_seamless_boot_optimization) seamless_boot_stream_count++; return seamless_boot_stream_count; } static bool create_links( struct dc *dc, uint32_t num_virtual_links) { int i; int connectors_num; struct dc_bios *bios = dc->ctx->dc_bios; dc->link_count = 0; connectors_num = bios->funcs->get_connectors_number(bios); DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); if (connectors_num > ENUM_ID_COUNT) { dm_error( "DC: Number of connectors %d exceeds maximum of %d!\n", connectors_num, ENUM_ID_COUNT); return false; } dm_output_to_console( "DC: %s: connectors_num: physical:%d, virtual:%d\n", __func__, connectors_num, num_virtual_links); for (i = 0; i < connectors_num; i++) { struct link_init_data link_init_params = {0}; struct dc_link *link; DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); link_init_params.ctx = dc->ctx; /* next BIOS object table connector */ link_init_params.connector_index = i; link_init_params.link_index = dc->link_count; link_init_params.dc = dc; link = dc->link_srv->create_link(&link_init_params); if (link) { dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; } } DC_LOG_DC("BIOS object table - end"); /* Create a link for each usb4 dpia port */ for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { struct link_init_data link_init_params = {0}; struct dc_link *link; link_init_params.ctx = dc->ctx; link_init_params.connector_index = i; link_init_params.link_index = dc->link_count; link_init_params.dc = dc; link_init_params.is_dpia_link = true; link = dc->link_srv->create_link(&link_init_params); if (link) { dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; } } for (i = 0; i < num_virtual_links; i++) { struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); struct encoder_init_data enc_init = {0}; if (link == NULL) { BREAK_TO_DEBUGGER(); goto failed_alloc; } link->link_index = dc->link_count; dc->links[dc->link_count] = link; dc->link_count++; link->ctx = dc->ctx; link->dc = dc; link->connector_signal = SIGNAL_TYPE_VIRTUAL; link->link_id.type = OBJECT_TYPE_CONNECTOR; link->link_id.id = CONNECTOR_ID_VIRTUAL; link->link_id.enum_id = ENUM_ID_1; link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); if (!link->link_enc) { BREAK_TO_DEBUGGER(); goto failed_alloc; } link->link_status.dpcd_caps = &link->dpcd_caps; enc_init.ctx = dc->ctx; enc_init.channel = CHANNEL_ID_UNKNOWN; enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; enc_init.transmitter = TRANSMITTER_UNKNOWN; enc_init.connector = link->link_id; enc_init.encoder.type = OBJECT_TYPE_ENCODER; enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; enc_init.encoder.enum_id = ENUM_ID_1; virtual_link_encoder_construct(link->link_enc, &enc_init); } dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); return true; failed_alloc: return false; } /* Create additional DIG link encoder objects if fewer than the platform * supports were created during link construction. This can happen if the * number of physical connectors is less than the number of DIGs. */ static bool create_link_encoders(struct dc *dc) { bool res = true; unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; int i; /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG * link encoders and physical display endpoints and does not require * additional link encoder objects. */ if (num_usb4_dpia == 0) return res; /* Create as many link encoder objects as the platform supports. DPIA * endpoints can be programmably mapped to any DIG. */ if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { for (i = 0; i < num_dig_link_enc; i++) { struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, (enum engine_id)(ENGINE_ID_DIGA + i)); if (link_enc) { dc->res_pool->link_encoders[i] = link_enc; dc->res_pool->dig_link_enc_count++; } else { res = false; } } } } return res; } /* Destroy any additional DIG link encoder objects created by * create_link_encoders(). * NB: Must only be called after destroy_links(). */ static void destroy_link_encoders(struct dc *dc) { unsigned int num_usb4_dpia; unsigned int num_dig_link_enc; int i; if (!dc->res_pool) return; num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG * link encoders and physical display endpoints and does not require * additional link encoder objects. */ if (num_usb4_dpia == 0) return; for (i = 0; i < num_dig_link_enc; i++) { struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; if (link_enc) { link_enc->funcs->destroy(&link_enc); dc->res_pool->link_encoders[i] = NULL; dc->res_pool->dig_link_enc_count--; } } } static struct dc_perf_trace *dc_perf_trace_create(void) { return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); } static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) { kfree(*perf_trace); *perf_trace = NULL; } /** * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR * @dc: dc reference * @stream: Initial dc stream state * @adjust: Updated parameters for vertical_total_min and vertical_total_max * * Looks up the pipe context of dc_stream_state and updates the * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh * Rate, which is a power-saving feature that targets reducing panel * refresh rate while the screen is static * * Return: %true if the pipe context is found and adjusted; * %false if the pipe context is not found. */ bool dc_stream_adjust_vmin_vmax(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) { int i; /* * Don't adjust DRR while there's bandwidth optimizations pending to * avoid conflicting with firmware updates. */ if (dc->ctx->dce_version > DCE_VERSION_MAX) if (dc->optimized_required || dc->wm_optimized_required) return false; stream->adjust.v_total_max = adjust->v_total_max; stream->adjust.v_total_mid = adjust->v_total_mid; stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; stream->adjust.v_total_min = adjust->v_total_min; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.tg) { dc->hwss.set_drr(&pipe, 1, *adjust); return true; } } return false; } /** * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) * * @dc: [in] dc reference * @stream: [in] Initial dc stream state * @refresh_rate: [in] new refresh_rate * * Return: %true if the pipe context is found and there is an associated * timing_generator for the DC; * %false if the pipe context is not found or there is no * timing_generator for the DC. */ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, struct dc_stream_state *stream, uint32_t *refresh_rate) { bool status = false; int i = 0; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.tg) { /* Only execute if a function pointer has been defined for * the DC version in question */ if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); status = true; break; } } } return status; } bool dc_stream_get_crtc_position(struct dc *dc, struct dc_stream_state **streams, int num_streams, unsigned int *v_pos, unsigned int *nom_v_pos) { /* TODO: Support multiple streams */ const struct dc_stream_state *stream = streams[0]; int i; bool ret = false; struct crtc_position position; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.stream_enc) { dc->hwss.get_position(&pipe, 1, &position); *v_pos = position.vertical_count; *nom_v_pos = position.nominal_vcount; ret = true; } } return ret; } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) static inline void dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) { union dmub_rb_cmd cmd = {0}; cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; if (is_stop) { cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; } else { cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; cmd.secure_display.roi_info.x_start = rect->x; cmd.secure_display.roi_info.y_start = rect->y; cmd.secure_display.roi_info.x_end = rect->x + rect->width; cmd.secure_display.roi_info.y_end = rect->y + rect->height; } dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } static inline void dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) { if (is_stop) dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); else dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); } bool dc_stream_forward_crc_window(struct dc_stream_state *stream, struct rect *rect, bool is_stop) { struct dmcu *dmcu; struct dc_dmub_srv *dmub_srv; struct otg_phy_mux mux_mapping; struct pipe_ctx *pipe; int i; struct dc *dc = stream->ctx->dc; for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) break; } /* Stream not found */ if (i == MAX_PIPES) return false; mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; mux_mapping.otg_output_num = pipe->stream_res.tg->inst; dmcu = dc->res_pool->dmcu; dmub_srv = dc->ctx->dmub_srv; /* forward to dmub */ if (dmub_srv) dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); /* forward to dmcu */ else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); else return false; return true; } #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ /** * dc_stream_configure_crc() - Configure CRC capture for the given stream. * @dc: DC Object * @stream: The stream to configure CRC on. * @enable: Enable CRC if true, disable otherwise. * @crc_window: CRC window (x/y start/end) information * @continuous: Capture CRC on every frame if true. Otherwise, only capture * once. * * By default, only CRC0 is configured, and the entire frame is used to * calculate the CRC. * * Return: %false if the stream is not found or CRC capture is not supported; * %true if the stream has been configured. */ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, struct crc_params *crc_window, bool enable, bool continuous) { struct pipe_ctx *pipe; struct crc_params param; struct timing_generator *tg; pipe = resource_get_otg_master_for_stream( &dc->current_state->res_ctx, stream); /* Stream not found */ if (pipe == NULL) return false; /* By default, capture the full frame */ param.windowa_x_start = 0; param.windowa_y_start = 0; param.windowa_x_end = pipe->stream->timing.h_addressable; param.windowa_y_end = pipe->stream->timing.v_addressable; param.windowb_x_start = 0; param.windowb_y_start = 0; param.windowb_x_end = pipe->stream->timing.h_addressable; param.windowb_y_end = pipe->stream->timing.v_addressable; if (crc_window) { param.windowa_x_start = crc_window->windowa_x_start; param.windowa_y_start = crc_window->windowa_y_start; param.windowa_x_end = crc_window->windowa_x_end; param.windowa_y_end = crc_window->windowa_y_end; param.windowb_x_start = crc_window->windowb_x_start; param.windowb_y_start = crc_window->windowb_y_start; param.windowb_x_end = crc_window->windowb_x_end; param.windowb_y_end = crc_window->windowb_y_end; } param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; param.odm_mode = pipe->next_odm_pipe ? 1:0; /* Default to the union of both windows */ param.selection = UNION_WINDOW_A_B; param.continuous_mode = continuous; param.enable = enable; tg = pipe->stream_res.tg; /* Only call if supported */ if (tg->funcs->configure_crc) return tg->funcs->configure_crc(tg, &param); DC_LOG_WARNING("CRC capture not supported."); return false; } /** * dc_stream_get_crc() - Get CRC values for the given stream. * * @dc: DC object. * @stream: The DC stream state of the stream to get CRCs from. * @r_cr: CRC value for the red component. * @g_y: CRC value for the green component. * @b_cb: CRC value for the blue component. * * dc_stream_configure_crc needs to be called beforehand to enable CRCs. * * Return: * %false if stream is not found, or if CRCs are not enabled. */ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) { int i; struct pipe_ctx *pipe; struct timing_generator *tg; for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream) break; } /* Stream not found */ if (i == MAX_PIPES) return false; tg = pipe->stream_res.tg; if (tg->funcs->get_crc) return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); DC_LOG_WARNING("CRC capture not supported."); return false; } void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, enum dc_dynamic_expansion option) { /* OPP FMT dyn expansion updates*/ int i; struct pipe_ctx *pipe_ctx; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx->stream_res.opp->dyn_expansion = option; pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601, stream->timing.display_color_depth, stream->signal); } } } void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option) { struct bit_depth_reduction_params params; struct dc_link *link = stream->link; struct pipe_ctx *pipes = NULL; int i; for (i = 0; i < MAX_PIPES; i++) { if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; break; } } if (!pipes) return; if (option > DITHER_OPTION_MAX) return; stream->dither_option = option; memset(&params, 0, sizeof(params)); resource_build_bit_depth_reduction_params(stream, &params); stream->bit_depth_params = params; if (pipes->plane_res.xfm && pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( pipes->plane_res.xfm, pipes->plane_res.scl_data.lb_params.depth, &stream->bit_depth_params); } pipes->stream_res.opp->funcs-> opp_program_bit_depth_reduction(pipes->stream_res.opp, &params); } bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) { int i; bool ret = false; struct pipe_ctx *pipes; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { pipes = &dc->current_state->res_ctx.pipe_ctx[i]; dc->hwss.program_gamut_remap(pipes); ret = true; } } return ret; } bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) { int i; bool ret = false; struct pipe_ctx *pipes; for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { pipes = &dc->current_state->res_ctx.pipe_ctx[i]; dc->hwss.program_output_csc(dc, pipes, stream->output_color_space, stream->csc_color_matrix.matrix, pipes->stream_res.opp->inst); ret = true; } } return ret; } void dc_stream_set_static_screen_params(struct dc *dc, struct dc_stream_state **streams, int num_streams, const struct dc_static_screen_params *params) { int i, j; struct pipe_ctx *pipes_affected[MAX_PIPES]; int num_pipes_affected = 0; for (i = 0; i < num_streams; i++) { struct dc_stream_state *stream = streams[i]; for (j = 0; j < MAX_PIPES; j++) { if (dc->current_state->res_ctx.pipe_ctx[j].stream == stream) { pipes_affected[num_pipes_affected++] = &dc->current_state->res_ctx.pipe_ctx[j]; } } } dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); } static void dc_destruct(struct dc *dc) { // reset link encoder assignment table on destruct if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) link_enc_cfg_init(dc, dc->current_state); if (dc->current_state) { dc_release_state(dc->current_state); dc->current_state = NULL; } destroy_links(dc); destroy_link_encoders(dc); if (dc->clk_mgr) { dc_destroy_clk_mgr(dc->clk_mgr); dc->clk_mgr = NULL; } dc_destroy_resource_pool(dc); if (dc->link_srv) link_destroy_link_service(&dc->link_srv); if (dc->ctx->gpio_service) dal_gpio_service_destroy(&dc->ctx->gpio_service); if (dc->ctx->created_bios) dal_bios_parser_destroy(&dc->ctx->dc_bios); dc_perf_trace_destroy(&dc->ctx->perf_trace); kfree(dc->ctx); dc->ctx = NULL; kfree(dc->bw_vbios); dc->bw_vbios = NULL; kfree(dc->bw_dceip); dc->bw_dceip = NULL; kfree(dc->dcn_soc); dc->dcn_soc = NULL; kfree(dc->dcn_ip); dc->dcn_ip = NULL; kfree(dc->vm_helper); dc->vm_helper = NULL; } static bool dc_construct_ctx(struct dc *dc, const struct dc_init_data *init_params) { struct dc_context *dc_ctx; dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); if (!dc_ctx) return false; dc_ctx->cgs_device = init_params->cgs_device; dc_ctx->driver_context = init_params->driver; dc_ctx->dc = dc; dc_ctx->asic_id = init_params->asic_id; dc_ctx->dc_sink_id_count = 0; dc_ctx->dc_stream_id_count = 0; dc_ctx->dce_environment = init_params->dce_environment; dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; /* Create logger */ dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); dc_ctx->perf_trace = dc_perf_trace_create(); if (!dc_ctx->perf_trace) { kfree(dc_ctx); ASSERT_CRITICAL(false); return false; } dc->ctx = dc_ctx; dc->link_srv = link_create_link_service(); if (!dc->link_srv) return false; return true; } static bool dc_construct(struct dc *dc, const struct dc_init_data *init_params) { struct dc_context *dc_ctx; struct bw_calcs_dceip *dc_dceip; struct bw_calcs_vbios *dc_vbios; struct dcn_soc_bounding_box *dcn_soc; struct dcn_ip_params *dcn_ip; dc->config = init_params->flags; // Allocate memory for the vm_helper dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); if (!dc->vm_helper) { dm_error("%s: failed to create dc->vm_helper\n", __func__); goto fail; } memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); if (!dc_dceip) { dm_error("%s: failed to create dceip\n", __func__); goto fail; } dc->bw_dceip = dc_dceip; dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); if (!dc_vbios) { dm_error("%s: failed to create vbios\n", __func__); goto fail; } dc->bw_vbios = dc_vbios; dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); if (!dcn_soc) { dm_error("%s: failed to create dcn_soc\n", __func__); goto fail; } dc->dcn_soc = dcn_soc; dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); if (!dcn_ip) { dm_error("%s: failed to create dcn_ip\n", __func__); goto fail; } dc->dcn_ip = dcn_ip; if (!dc_construct_ctx(dc, init_params)) { dm_error("%s: failed to create ctx\n", __func__); goto fail; } dc_ctx = dc->ctx; /* Resource should construct all asic specific resources. * This should be the only place where we need to parse the asic id */ if (init_params->vbios_override) dc_ctx->dc_bios = init_params->vbios_override; else { /* Create BIOS parser */ struct bp_init_data bp_init_data; bp_init_data.ctx = dc_ctx; bp_init_data.bios = init_params->asic_id.atombios_base_address; dc_ctx->dc_bios = dal_bios_parser_create( &bp_init_data, dc_ctx->dce_version); if (!dc_ctx->dc_bios) { ASSERT_CRITICAL(false); goto fail; } dc_ctx->created_bios = true; } dc->vendor_signature = init_params->vendor_signature; /* Create GPIO service */ dc_ctx->gpio_service = dal_gpio_service_create( dc_ctx->dce_version, dc_ctx->dce_environment, dc_ctx); if (!dc_ctx->gpio_service) { ASSERT_CRITICAL(false); goto fail; } dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); if (!dc->res_pool) goto fail; /* set i2c speed if not done by the respective dcnxxx__resource.c */ if (dc->caps.i2c_speed_in_khz_hdcp == 0) dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); if (!dc->clk_mgr) goto fail; #ifdef CONFIG_DRM_AMD_DC_FP dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; if (dc->res_pool->funcs->update_bw_bounding_box) { DC_FP_START(); dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); DC_FP_END(); } #endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because * on creation it copies the contents of dc->dml */ dc->current_state = dc_create_state(dc); if (!dc->current_state) { dm_error("%s: failed to create validate ctx\n", __func__); goto fail; } if (!create_links(dc, init_params->num_virtual_links)) goto fail; /* Create additional DIG link encoder objects if fewer than the platform * supports were created during link construction. */ if (!create_link_encoders(dc)) goto fail; dc_resource_state_construct(dc, dc->current_state); return true; fail: return false; } static void disable_all_writeback_pipes_for_stream( const struct dc *dc, struct dc_stream_state *stream, struct dc_state *context) { int i; for (i = 0; i < stream->num_wb_info; i++) stream->writeback_info[i].wb_enabled = false; } static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock) { int i; /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ if (dc->hwss.interdependent_update_lock) dc->hwss.interdependent_update_lock(dc, context, lock); else { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; // Copied conditions that were previously in dce110_apply_ctx_for_surface if (stream == pipe_ctx->stream) { if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); } } } } static void phantom_pipe_blank( struct dc *dc, struct timing_generator *tg, int width, int height) { struct dce_hwseq *hws = dc->hwseq; enum dc_color_space color_space; struct tg_color black_color = {0}; struct output_pixel_processor *opp = NULL; uint32_t num_opps, opp_id_src0, opp_id_src1; uint32_t otg_active_width, otg_active_height; uint32_t i; /* program opp dpg blank color */ color_space = COLOR_SPACE_SRGB; color_space_to_black_color(dc, color_space, &black_color); otg_active_width = width; otg_active_height = height; /* get the OPTC source */ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) { opp = dc->res_pool->opps[i]; break; } } if (opp && opp->funcs->opp_set_disp_pattern_generator) opp->funcs->opp_set_disp_pattern_generator( opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, otg_active_height, 0); if (tg->funcs->is_tg_enabled(tg)) hws->funcs.wait_for_blank_complete(opp); } static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { if (dc->ctx->dce_version >= DCN_VERSION_1_0) { memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else { if (dc->ctx->dce_version < DCN_VERSION_2_0) color_space_to_black_color( dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); } if (dc->ctx->dce_version >= DCN_VERSION_2_0) { if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); } } } static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; struct dc_state *dangling_context = dc_create_state(dc); struct dc_state *current_ctx; struct pipe_ctx *pipe; struct timing_generator *tg; if (dangling_context == NULL) return; dc_resource_state_copy_construct(dc->current_state, dangling_context); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct dc_stream_state *old_stream = dc->current_state->res_ctx.pipe_ctx[i].stream; bool should_disable = true; bool pipe_split_change = false; if ((context->res_ctx.pipe_ctx[i].top_pipe) && (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; else pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe; for (j = 0; j < context->stream_count; j++) { if (old_stream == context->streams[j]) { should_disable = false; break; } } if (!should_disable && pipe_split_change && dc->current_state->stream_count != context->stream_count) should_disable = true; if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { struct pipe_ctx *old_pipe, *new_pipe; old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; new_pipe = &context->res_ctx.pipe_ctx[i]; if (old_pipe->plane_state && !new_pipe->plane_state) should_disable = true; } if (should_disable && old_stream) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; tg = pipe->stream_res.tg; /* When disabling plane for a phantom pipe, we must turn on the * phantom OTG so the disable programming gets the double buffer * update. Otherwise the pipe will be left in a partially disabled * state that can result in underflow or hang when enabling it * again for different use. */ if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { if (tg->funcs->enable_crtc) { int main_pipe_width, main_pipe_height; main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width; main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height; phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height); tg->funcs->enable_crtc(tg); } } dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); if (pipe->stream && pipe->plane_state) dc_update_viusal_confirm_color(dc, context, pipe); if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); dc->hwss.post_unlock_program_front_end(dc, dangling_context); } if (dc->hwss.program_front_end_for_ctx) { dc->hwss.interdependent_update_lock(dc, dc->current_state, true); dc->hwss.program_front_end_for_ctx(dc, dangling_context); dc->hwss.interdependent_update_lock(dc, dc->current_state, false); dc->hwss.post_unlock_program_front_end(dc, dangling_context); } /* We need to put the phantom OTG back into it's default (disabled) state or we * can get corruption when transition from one SubVP config to a different one. * The OTG is set to disable on falling edge of VUPDATE so the plane disable * will still get it's double buffer update. */ if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { if (tg->funcs->disable_phantom_crtc) tg->funcs->disable_phantom_crtc(tg); } } } current_ctx = dc->current_state; dc->current_state = dangling_context; dc_release_state(current_ctx); } static void disable_vbios_mode_if_required( struct dc *dc, struct dc_state *context) { unsigned int i, j; /* check if timing_changed, disable stream*/ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct dc_stream_state *stream = NULL; struct dc_link *link = NULL; struct pipe_ctx *pipe = NULL; pipe = &context->res_ctx.pipe_ctx[i]; stream = pipe->stream; if (stream == NULL) continue; // only looking for first odm pipe if (pipe->prev_odm_pipe) continue; if (stream->link->local_sink && stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { link = stream->link; } if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { unsigned int enc_inst, tg_inst = 0; unsigned int pix_clk_100hz; enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (enc_inst != ENGINE_ID_UNKNOWN) { for (j = 0; j < dc->res_pool->stream_enc_count; j++) { if (dc->res_pool->stream_enc[j]->id == enc_inst) { tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( dc->res_pool->stream_enc[j]); break; } } dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( dc->res_pool->dp_clock_source, tg_inst, &pix_clk_100hz); if (link->link_status.link_active) { uint32_t requested_pix_clk_100hz = pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; if (pix_clk_100hz != requested_pix_clk_100hz) { dc->link_srv->set_dpms_off(pipe); pipe->stream->dpms_off = false; } } } } } } static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; PERF_TRACE(); for (i = 0; i < MAX_PIPES; i++) { int count = 0; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) continue; /* Timeout 100 ms */ while (count < 100000) { /* Must set to false to start with, due to OR in update function */ pipe->plane_state->status.is_flip_pending = false; dc->hwss.update_pending_status(pipe); if (!pipe->plane_state->status.is_flip_pending) break; udelay(1); count++; } ASSERT(!pipe->plane_state->status.is_flip_pending); } PERF_TRACE(); } /* Public functions */ struct dc *dc_create(const struct dc_init_data *init_params) { struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); unsigned int full_pipe_count; if (!dc) return NULL; if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { if (!dc_construct_ctx(dc, init_params)) goto destruct_dc; } else { if (!dc_construct(dc, init_params)) goto destruct_dc; full_pipe_count = dc->res_pool->pipe_count; if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) full_pipe_count--; dc->caps.max_streams = min( full_pipe_count, dc->res_pool->stream_enc_count); dc->caps.max_links = dc->link_count; dc->caps.max_audios = dc->res_pool->audio_count; dc->caps.linear_pitch_alignment = 64; dc->caps.max_dp_protocol_version = DP_VERSION_1_4; dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; if (dc->res_pool->dmcu != NULL) dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; } dc->dcn_reg_offsets = init_params->dcn_reg_offsets; dc->nbio_reg_offsets = init_params->nbio_reg_offsets; /* Populate versioning information */ dc->versions.dc_ver = DC_VER; dc->build_id = DC_BUILD_ID; DC_LOG_DC("Display Core initialized\n"); return dc; destruct_dc: dc_destruct(dc); kfree(dc); return NULL; } static void detect_edp_presence(struct dc *dc) { struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_link *edp_link = NULL; enum dc_connection_type type; int i; int edp_num; dc_get_edp_links(dc, edp_links, &edp_num); if (!edp_num) return; for (i = 0; i < edp_num; i++) { edp_link = edp_links[i]; if (dc->config.edp_not_connected) { edp_link->edp_sink_present = false; } else { dc_link_detect_connection_type(edp_link, &type); edp_link->edp_sink_present = (type != dc_connection_none); } } } void dc_hardware_init(struct dc *dc) { detect_edp_presence(dc); if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) dc->hwss.init_hw(dc); } void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params) { dc->ctx->cp_psp = init_params->cp_psp; } void dc_deinit_callbacks(struct dc *dc) { memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); } void dc_destroy(struct dc **dc) { dc_destruct(*dc); kfree(*dc); *dc = NULL; } static void enable_timing_multisync( struct dc *dc, struct dc_state *ctx) { int i, multisync_count = 0; int pipe_count = dc->res_pool->pipe_count; struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; for (i = 0; i < pipe_count; i++) { if (!ctx->res_ctx.pipe_ctx[i].stream || !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) continue; if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) continue; multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; multisync_count++; } if (multisync_count > 0) { dc->hwss.enable_per_frame_crtc_position_reset( dc, multisync_count, multisync_pipes); } } static void program_timing_sync( struct dc *dc, struct dc_state *ctx) { int i, j, k; int group_index = 0; int num_group = 0; int pipe_count = dc->res_pool->pipe_count; struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; for (i = 0; i < pipe_count; i++) { if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) continue; unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; } for (i = 0; i < pipe_count; i++) { int group_size = 1; enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; struct pipe_ctx *pipe_set[MAX_PIPES]; if (!unsynced_pipes[i]) continue; pipe_set[0] = unsynced_pipes[i]; unsynced_pipes[i] = NULL; /* Add tg to the set, search rest of the tg's for ones with * same timing, add all tgs with same timing to the group */ for (j = i + 1; j < pipe_count; j++) { if (!unsynced_pipes[j]) continue; if (sync_type != TIMING_SYNCHRONIZABLE && dc->hwss.enable_vblanks_synchronization && unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && resource_are_vblanks_synchronizable( unsynced_pipes[j]->stream, pipe_set[0]->stream)) { sync_type = VBLANK_SYNCHRONIZABLE; pipe_set[group_size] = unsynced_pipes[j]; unsynced_pipes[j] = NULL; group_size++; } else if (sync_type != VBLANK_SYNCHRONIZABLE && resource_are_streams_timing_synchronizable( unsynced_pipes[j]->stream, pipe_set[0]->stream)) { sync_type = TIMING_SYNCHRONIZABLE; pipe_set[group_size] = unsynced_pipes[j]; unsynced_pipes[j] = NULL; group_size++; } } /* set first unblanked pipe as master */ for (j = 0; j < group_size; j++) { bool is_blanked; if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) is_blanked = pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); else is_blanked = pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); if (!is_blanked) { if (j == 0) break; swap(pipe_set[0], pipe_set[j]); break; } } for (k = 0; k < group_size; k++) { struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); status->timing_sync_info.group_id = num_group; status->timing_sync_info.group_size = group_size; if (k == 0) status->timing_sync_info.master = true; else status->timing_sync_info.master = false; } /* remove any other pipes that are already been synced */ if (dc->config.use_pipe_ctx_sync_logic) { /* check pipe's syncd to decide which pipe to be removed */ for (j = 1; j < group_size; j++) { if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { group_size--; pipe_set[j] = pipe_set[group_size]; j--; } else /* link slave pipe's syncd with master pipe */ pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; } } else { for (j = j + 1; j < group_size; j++) { bool is_blanked; if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) is_blanked = pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); else is_blanked = pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); if (!is_blanked) { group_size--; pipe_set[j] = pipe_set[group_size]; j--; } } } if (group_size > 1) { if (sync_type == TIMING_SYNCHRONIZABLE) { dc->hwss.enable_timing_synchronization( dc, group_index, group_size, pipe_set); } else if (sync_type == VBLANK_SYNCHRONIZABLE) { dc->hwss.enable_vblanks_synchronization( dc, group_index, group_size, pipe_set); } group_index++; } num_group++; } } static bool streams_changed(struct dc *dc, struct dc_stream_state *streams[], uint8_t stream_count) { uint8_t i; if (stream_count != dc->current_state->stream_count) return true; for (i = 0; i < dc->current_state->stream_count; i++) { if (dc->current_state->streams[i] != streams[i]) return true; if (!streams[i]->link->link_state_valid) return true; } return false; } bool dc_validate_boot_timing(const struct dc *dc, const struct dc_sink *sink, struct dc_crtc_timing *crtc_timing) { struct timing_generator *tg; struct stream_encoder *se = NULL; struct dc_crtc_timing hw_crtc_timing = {0}; struct dc_link *link = sink->link; unsigned int i, enc_inst, tg_inst = 0; /* Support seamless boot on EDP displays only */ if (sink->sink_signal != SIGNAL_TYPE_EDP) { return false; } if (dc->debug.force_odm_combine) return false; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return false; enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (enc_inst == ENGINE_ID_UNKNOWN) return false; for (i = 0; i < dc->res_pool->stream_enc_count; i++) { if (dc->res_pool->stream_enc[i]->id == enc_inst) { se = dc->res_pool->stream_enc[i]; tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( dc->res_pool->stream_enc[i]); break; } } // tg_inst not found if (i == dc->res_pool->stream_enc_count) return false; if (tg_inst >= dc->res_pool->timing_generator_count) return false; if (tg_inst != link->link_enc->preferred_engine) return false; tg = dc->res_pool->timing_generators[tg_inst]; if (!tg->funcs->get_hw_timing) return false; if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) return false; if (crtc_timing->h_total != hw_crtc_timing.h_total) return false; if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) return false; if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) return false; if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) return false; if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) return false; if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) return false; if (crtc_timing->v_total != hw_crtc_timing.v_total) return false; if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) return false; if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) return false; if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) return false; if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) return false; if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) return false; /* block DSC for now, as VBIOS does not currently support DSC timings */ if (crtc_timing->flags.DSC) return false; if (dc_is_dp_signal(link->connector_signal)) { unsigned int pix_clk_100hz; uint32_t numOdmPipes = 1; uint32_t id_src[4] = {0}; dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( dc->res_pool->dp_clock_source, tg_inst, &pix_clk_100hz); if (tg->funcs->get_optc_source) tg->funcs->get_optc_source(tg, &numOdmPipes, &id_src[0], &id_src[1]); if (numOdmPipes == 2) pix_clk_100hz *= 2; if (numOdmPipes == 4) pix_clk_100hz *= 4; // Note: In rare cases, HW pixclk may differ from crtc's pixclk // slightly due to rounding issues in 10 kHz units. if (crtc_timing->pix_clk_100hz != pix_clk_100hz) return false; if (!se->funcs->dp_get_pixel_format) return false; if (!se->funcs->dp_get_pixel_format( se, &hw_crtc_timing.pixel_encoding, &hw_crtc_timing.display_color_depth)) return false; if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) return false; if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) return false; } if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { return false; } if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); return false; } return true; } static inline bool should_update_pipe_for_stream( struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream) { return (pipe_ctx->stream && pipe_ctx->stream == stream); } static inline bool should_update_pipe_for_plane( struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state) { return (pipe_ctx->plane_state == plane_state); } void dc_enable_stereo( struct dc *dc, struct dc_state *context, struct dc_stream_state *streams[], uint8_t stream_count) { int i, j; struct pipe_ctx *pipe; for (i = 0; i < MAX_PIPES; i++) { if (context != NULL) { pipe = &context->res_ctx.pipe_ctx[i]; } else { context = dc->current_state; pipe = &dc->current_state->res_ctx.pipe_ctx[i]; } for (j = 0; pipe && j < stream_count; j++) { if (should_update_pipe_for_stream(context, pipe, streams[j]) && dc->hwss.setup_stereo) dc->hwss.setup_stereo(pipe, dc); } } } void dc_trigger_sync(struct dc *dc, struct dc_state *context) { if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { enable_timing_multisync(dc, context); program_timing_sync(dc, context); } } static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) { int i; unsigned int stream_mask = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].stream) stream_mask |= 1 << i; } return stream_mask; } void dc_z10_restore(const struct dc *dc) { if (dc->hwss.z10_restore) dc->hwss.z10_restore(dc); } void dc_z10_save_init(struct dc *dc) { if (dc->hwss.z10_save_init) dc->hwss.z10_save_init(dc); } /** * dc_commit_state_no_check - Apply context to the hardware * * @dc: DC object with the current status to be updated * @context: New state that will become the current status at the end of this function * * Applies given context to the hardware and copy it into current context. * It's up to the user to release the src context afterwards. * * Return: an enum dc_status result code for the operation */ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) { struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status result = DC_ERROR_UNEXPECTED; struct pipe_ctx *pipe; int i, k, l; struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; struct dc_state *old_state; bool subvp_prev_use = false; dc_z10_restore(dc); dc_allow_idle_optimizations(dc, false); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; /* Check old context for SubVP */ subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); if (subvp_prev_use) break; } for (i = 0; i < context->stream_count; i++) dc_streams[i] = context->streams[i]; if (!dcb->funcs->is_accelerated_mode(dcb)) { disable_vbios_mode_if_required(dc, context); dc->hwss.enable_accelerated_mode(dc, context); } if (context->stream_count > get_seamless_boot_stream_count(context) || context->stream_count == 0) dc->hwss.prepare_bandwidth(dc, context); /* When SubVP is active, all HW programming must be done while * SubVP lock is acquired */ if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); if (dc->debug.enable_double_buffered_dsc_pg_support) dc->hwss.update_dsc_pg(dc, context, false); disable_dangling_plane(dc, context); /* re-program planes for existing stream, in case we need to * free up plane resource for later use */ if (dc->hwss.apply_ctx_for_surface) { for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->mode_changed) continue; apply_ctx_interdependent_lock(dc, context, context->streams[i], true); dc->hwss.apply_ctx_for_surface( dc, context->streams[i], context->stream_status[i].plane_count, context); /* use new pipe config in new context */ apply_ctx_interdependent_lock(dc, context, context->streams[i], false); dc->hwss.post_unlock_program_front_end(dc, context); } } /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); } result = dc->hwss.apply_ctx_to_hw(dc, context); if (result != DC_OK) { /* Application of dc_state to hardware stopped. */ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; return result; } dc_trigger_sync(dc, context); /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ for (i = 0; i < context->stream_count; i++) { uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; context->streams[i]->update_flags.raw = 0xFFFFFFFF; context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; } /* Program all planes within new context*/ if (dc->hwss.program_front_end_for_ctx) { dc->hwss.interdependent_update_lock(dc, context, true); dc->hwss.program_front_end_for_ctx(dc, context); dc->hwss.interdependent_update_lock(dc, context, false); dc->hwss.post_unlock_program_front_end(dc, context); } if (dc->hwss.commit_subvp_config) dc->hwss.commit_subvp_config(dc, context); if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; if (!context->streams[i]->mode_changed) continue; if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, context, context->streams[i], true); dc->hwss.apply_ctx_for_surface( dc, context->streams[i], context->stream_status[i].plane_count, context); apply_ctx_interdependent_lock(dc, context, context->streams[i], false); dc->hwss.post_unlock_program_front_end(dc, context); } /* * enable stereo * TODO rework dc_enable_stereo call to work with validation sets? */ for (k = 0; k < MAX_PIPES; k++) { pipe = &context->res_ctx.pipe_ctx[k]; for (l = 0 ; pipe && l < context->stream_count; l++) { if (context->streams[l] && context->streams[l] == pipe->stream && dc->hwss.setup_stereo) dc->hwss.setup_stereo(pipe, dc); } } CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", context->streams[i]->timing.h_addressable, context->streams[i]->timing.v_addressable, context->streams[i]->timing.h_total, context->streams[i]->timing.v_total, context->streams[i]->timing.pix_clk_100hz / 10); } dc_enable_stereo(dc, context, dc_streams, context->stream_count); if (context->stream_count > get_seamless_boot_stream_count(context) || context->stream_count == 0) { /* Must wait for no flips to be pending before doing optimize bw */ wait_for_no_pipes_pending(dc, context); /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); } if (dc->debug.enable_double_buffered_dsc_pg_support) dc->hwss.update_dsc_pg(dc, context, true); if (dc->ctx->dce_version >= DCE_VERSION_MAX) TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); else TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); context->stream_mask = get_stream_mask(dc, context); if (context->stream_mask != dc->current_state->stream_mask) dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; /* Clear update flags that were set earlier to avoid redundant programming */ for (i = 0; i < context->stream_count; i++) { context->streams[i]->update_flags.raw = 0x0; } old_state = dc->current_state; dc->current_state = context; dc_release_state(old_state); dc_retain_state(dc->current_state); return result; } static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context); /** * dc_commit_streams - Commit current stream state * * @dc: DC object with the commit state to be configured in the hardware * @streams: Array with a list of stream state * @stream_count: Total of streams * * Function responsible for commit streams change to the hardware. * * Return: * Return DC_OK if everything work as expected, otherwise, return a dc_status * code. */ enum dc_status dc_commit_streams(struct dc *dc, struct dc_stream_state *streams[], uint8_t stream_count) { int i, j; struct dc_state *context; enum dc_status res = DC_OK; struct dc_validation_set set[MAX_STREAMS] = {0}; struct pipe_ctx *pipe; bool handle_exit_odm2to1 = false; if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) return res; if (!streams_changed(dc, streams, stream_count)) return res; DC_LOG_DC("%s: %d streams\n", __func__, stream_count); for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; struct dc_stream_status *status = dc_stream_get_status(stream); dc_stream_log(dc, stream); set[i].stream = stream; if (status) { set[i].plane_count = status->plane_count; for (j = 0; j < status->plane_count; j++) set[i].plane_states[j] = status->plane_states[j]; } } /* ODM Combine 2:1 power optimization is only applied for single stream * scenario, it uses extra pipes than needed to reduce power consumption * We need to switch off this feature to make room for new streams. */ if (stream_count > dc->current_state->stream_count && dc->current_state->stream_count == 1) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->next_odm_pipe) handle_exit_odm2to1 = true; } } if (handle_exit_odm2to1) res = commit_minimal_transition_state(dc, dc->current_state); context = dc_create_state(dc); if (!context) goto context_alloc_fail; dc_resource_state_copy_construct_current(dc, context); res = dc_validate_with_context(dc, set, stream_count, context, false); if (res != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; } res = dc_commit_state_no_check(dc, context); for (i = 0; i < stream_count; i++) { for (j = 0; j < context->stream_count; j++) { if (streams[i]->stream_id == context->streams[j]->stream_id) streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; if (dc_is_embedded_signal(streams[i]->signal)) { struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); if (dc->hwss.is_abm_supported) status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); else status->is_abm_supported = true; } } } fail: dc_release_state(context); context_alloc_fail: DC_LOG_DC("%s Finished.\n", __func__); return res; } bool dc_acquire_release_mpc_3dlut( struct dc *dc, bool acquire, struct dc_stream_state *stream, struct dc_3dlut **lut, struct dc_transfer_func **shaper) { int pipe_idx; bool ret = false; bool found_pipe_idx = false; const struct resource_pool *pool = dc->res_pool; struct resource_context *res_ctx = &dc->current_state->res_ctx; int mpcc_id = 0; if (pool && res_ctx) { if (acquire) { /*find pipe idx for the given stream*/ for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { found_pipe_idx = true; mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; break; } } } else found_pipe_idx = true;/*for release pipe_idx is not required*/ if (found_pipe_idx) { if (acquire && pool->funcs->acquire_post_bldn_3dlut) ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); else if (!acquire && pool->funcs->release_post_bldn_3dlut) ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); } } return ret; } static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) { int i; struct pipe_ctx *pipe; for (i = 0; i < MAX_PIPES; i++) { pipe = &context->res_ctx.pipe_ctx[i]; // Don't check flip pending on phantom pipes if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) continue; /* Must set to false to start with, due to OR in update function */ pipe->plane_state->status.is_flip_pending = false; dc->hwss.update_pending_status(pipe); if (pipe->plane_state->status.is_flip_pending) return true; } return false; } /* Perform updates here which need to be deferred until next vupdate * * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered * but forcing lut memory to shutdown state is immediate. This causes * single frame corruption as lut gets disabled mid-frame unless shutdown * is deferred until after entering bypass. */ static void process_deferred_updates(struct dc *dc) { int i = 0; if (dc->debug.enable_mem_low_power.bits.cm) { ASSERT(dc->dcn_ip->max_num_dpp); for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); } } void dc_post_update_surfaces_to_stream(struct dc *dc) { int i; struct dc_state *context = dc->current_state; if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) return; post_surface_trace(dc); /* * Only relevant for DCN behavior where we can guarantee the optimization * is safe to apply - retain the legacy behavior for DCE. */ if (dc->ctx->dce_version < DCE_VERSION_MAX) TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); else { TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); if (is_flip_pending_in_pipes(dc, context)) return; for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { context->res_ctx.pipe_ctx[i].pipe_idx = i; dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); } process_deferred_updates(dc); dc->hwss.optimize_bandwidth(dc, context); if (dc->debug.enable_double_buffered_dsc_pg_support) dc->hwss.update_dsc_pg(dc, context, true); } dc->optimized_required = false; dc->wm_optimized_required = false; } static void init_state(struct dc *dc, struct dc_state *context) { /* Each context must have their own instance of VBA and in order to * initialize and obtain IP and SOC the base DML instance from DC is * initially copied into every context */ memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); } struct dc_state *dc_create_state(struct dc *dc) { struct dc_state *context = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); if (!context) return NULL; init_state(dc, context); kref_init(&context->refcount); return context; } struct dc_state *dc_copy_state(struct dc_state *src_ctx) { int i, j; struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); if (!new_ctx) return NULL; memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; if (cur_pipe->top_pipe) cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; if (cur_pipe->bottom_pipe) cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; if (cur_pipe->prev_odm_pipe) cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; if (cur_pipe->next_odm_pipe) cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; } for (i = 0; i < new_ctx->stream_count; i++) { dc_stream_retain(new_ctx->streams[i]); for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) dc_plane_state_retain( new_ctx->stream_status[i].plane_states[j]); } kref_init(&new_ctx->refcount); return new_ctx; } void dc_retain_state(struct dc_state *context) { kref_get(&context->refcount); } static void dc_state_free(struct kref *kref) { struct dc_state *context = container_of(kref, struct dc_state, refcount); dc_resource_state_destruct(context); kvfree(context); } void dc_release_state(struct dc_state *context) { kref_put(&context->refcount, dc_state_free); } bool dc_set_generic_gpio_for_stereo(bool enable, struct gpio_service *gpio_service) { enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; struct gpio_pin_info pin_info; struct gpio *generic; struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), GFP_KERNEL); if (!config) return false; pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { kfree(config); return false; } else { generic = dal_gpio_service_create_generic_mux( gpio_service, pin_info.offset, pin_info.mask); } if (!generic) { kfree(config); return false; } gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); config->enable_output_from_mux = enable; config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; if (gpio_result == GPIO_RESULT_OK) gpio_result = dal_mux_setup_config(generic, config); if (gpio_result == GPIO_RESULT_OK) { dal_gpio_close(generic); dal_gpio_destroy_generic_mux(&generic); kfree(config); return true; } else { dal_gpio_close(generic); dal_gpio_destroy_generic_mux(&generic); kfree(config); return false; } } static bool is_surface_in_context( const struct dc_state *context, const struct dc_plane_state *plane_state) { int j; for (j = 0; j < MAX_PIPES; j++) { const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (plane_state == pipe_ctx->plane_state) { return true; } } return false; } static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; enum surface_update_type update_type = UPDATE_TYPE_FAST; if (!u->plane_info) return UPDATE_TYPE_FAST; if (u->plane_info->color_space != u->surface->color_space) { update_flags->bits.color_space_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); } if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { update_flags->bits.horizontal_mirror_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); } if (u->plane_info->rotation != u->surface->rotation) { update_flags->bits.rotation_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_FULL); } if (u->plane_info->format != u->surface->format) { update_flags->bits.pixel_format_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_FULL); } if (u->plane_info->stereo_format != u->surface->stereo_format) { update_flags->bits.stereo_format_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_FULL); } if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { update_flags->bits.per_pixel_alpha_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); } if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { update_flags->bits.global_alpha_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); } if (u->plane_info->dcc.enable != u->surface->dcc.enable || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { /* During DCC on/off, stutter period is calculated before * DCC has fully transitioned. This results in incorrect * stutter period calculation. Triggering a full update will * recalculate stutter period. */ update_flags->bits.dcc_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_FULL); } if (resource_pixel_format_to_bpp(u->plane_info->format) != resource_pixel_format_to_bpp(u->surface->format)) { /* different bytes per element will require full bandwidth * and DML calculation */ update_flags->bits.bpp_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_FULL); } if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { update_flags->bits.plane_size_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); } if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, sizeof(union dc_tiling_info)) != 0) { update_flags->bits.swizzle_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); /* todo: below are HW dependent, we should add a hook to * DCE/N resource and validated there. */ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { /* swizzled mode requires RQ to be setup properly, * thus need to run DML to calculate RQ settings */ update_flags->bits.bandwidth_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_FULL); } } /* This should be UPDATE_TYPE_FAST if nothing has changed. */ return update_type; } static enum surface_update_type get_scaling_info_update_type( const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; if (!u->scaling_info) return UPDATE_TYPE_FAST; if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width || u->scaling_info->dst_rect.height != u->surface->dst_rect.height || u->scaling_info->scaling_quality.integer_scaling != u->surface->scaling_quality.integer_scaling ) { update_flags->bits.scaling_change = 1; if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) && (u->scaling_info->dst_rect.width < u->surface->src_rect.width || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) /* Making dst rect smaller requires a bandwidth change */ update_flags->bits.bandwidth_change = 1; } if (u->scaling_info->src_rect.width != u->surface->src_rect.width || u->scaling_info->src_rect.height != u->surface->src_rect.height) { update_flags->bits.scaling_change = 1; if (u->scaling_info->src_rect.width > u->surface->src_rect.width || u->scaling_info->src_rect.height > u->surface->src_rect.height) /* Making src rect bigger requires a bandwidth change */ update_flags->bits.clock_change = 1; } if (u->scaling_info->src_rect.x != u->surface->src_rect.x || u->scaling_info->src_rect.y != u->surface->src_rect.y || u->scaling_info->clip_rect.x != u->surface->clip_rect.x || u->scaling_info->clip_rect.y != u->surface->clip_rect.y || u->scaling_info->dst_rect.x != u->surface->dst_rect.x || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) update_flags->bits.position_change = 1; if (update_flags->bits.clock_change || update_flags->bits.bandwidth_change || update_flags->bits.scaling_change) return UPDATE_TYPE_FULL; if (update_flags->bits.position_change) return UPDATE_TYPE_MED; return UPDATE_TYPE_FAST; } static enum surface_update_type det_surface_update(const struct dc *dc, const struct dc_surface_update *u) { const struct dc_state *context = dc->current_state; enum surface_update_type type; enum surface_update_type overall_type = UPDATE_TYPE_FAST; union surface_update_flags *update_flags = &u->surface->update_flags; if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { update_flags->raw = 0xFFFFFFFF; return UPDATE_TYPE_FULL; } update_flags->raw = 0; // Reset all flags type = get_plane_info_update_type(u); elevate_update_type(&overall_type, type); type = get_scaling_info_update_type(u); elevate_update_type(&overall_type, type); if (u->flip_addr) { update_flags->bits.addr_update = 1; if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { update_flags->bits.tmz_changed = 1; elevate_update_type(&overall_type, UPDATE_TYPE_FULL); } } if (u->in_transfer_func) update_flags->bits.in_transfer_func_change = 1; if (u->input_csc_color_matrix) update_flags->bits.input_csc_change = 1; if (u->coeff_reduction_factor) update_flags->bits.coeff_reduction_change = 1; if (u->gamut_remap_matrix) update_flags->bits.gamut_remap_change = 1; if (u->gamma) { enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; if (u->plane_info) format = u->plane_info->format; else if (u->surface) format = u->surface->format; if (dce_use_lut(format)) update_flags->bits.gamma_change = 1; } if (u->lut3d_func || u->func_shaper) update_flags->bits.lut_3d = 1; if (u->hdr_mult.value) if (u->hdr_mult.value != u->surface->hdr_mult.value) { update_flags->bits.hdr_mult = 1; elevate_update_type(&overall_type, UPDATE_TYPE_MED); } if (update_flags->bits.in_transfer_func_change) { type = UPDATE_TYPE_MED; elevate_update_type(&overall_type, type); } if (update_flags->bits.lut_3d) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } if (dc->debug.enable_legacy_fast_update && (update_flags->bits.gamma_change || update_flags->bits.gamut_remap_change || update_flags->bits.input_csc_change || update_flags->bits.coeff_reduction_change)) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } return overall_type; } static enum surface_update_type check_update_surfaces_for_stream( struct dc *dc, struct dc_surface_update *updates, int surface_count, struct dc_stream_update *stream_update, const struct dc_stream_status *stream_status) { int i; enum surface_update_type overall_type = UPDATE_TYPE_FAST; if (dc->idle_optimizations_allowed) overall_type = UPDATE_TYPE_FULL; if (stream_status == NULL || stream_status->plane_count != surface_count) overall_type = UPDATE_TYPE_FULL; if (stream_update && stream_update->pending_test_pattern) { overall_type = UPDATE_TYPE_FULL; } /* some stream updates require passive update */ if (stream_update) { union stream_update_flags *su_flags = &stream_update->stream->update_flags; if ((stream_update->src.height != 0 && stream_update->src.width != 0) || (stream_update->dst.height != 0 && stream_update->dst.width != 0) || stream_update->integer_scaling_update) su_flags->bits.scaling = 1; if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; if (stream_update->abm_level) su_flags->bits.abm_level = 1; if (stream_update->dpms_off) su_flags->bits.dpms_off = 1; if (stream_update->gamut_remap) su_flags->bits.gamut_remap = 1; if (stream_update->wb_update) su_flags->bits.wb_update = 1; if (stream_update->dsc_config) su_flags->bits.dsc_changed = 1; if (stream_update->mst_bw_update) su_flags->bits.mst_bw = 1; if (stream_update->stream && stream_update->stream->freesync_on_desktop && (stream_update->vrr_infopacket || stream_update->allow_freesync || stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) su_flags->bits.fams_changed = 1; if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; if (stream_update->output_csc_transform || stream_update->output_color_space) su_flags->bits.out_csc = 1; /* Output transfer function changes do not require bandwidth recalculation, * so don't trigger a full update */ if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; } for (i = 0 ; i < surface_count; i++) { enum surface_update_type type = det_surface_update(dc, &updates[i]); elevate_update_type(&overall_type, type); } return overall_type; } /* * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) * * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types */ enum surface_update_type dc_check_update_surfaces_for_stream( struct dc *dc, struct dc_surface_update *updates, int surface_count, struct dc_stream_update *stream_update, const struct dc_stream_status *stream_status) { int i; enum surface_update_type type; if (stream_update) stream_update->stream->update_flags.raw = 0; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0; type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); if (type == UPDATE_TYPE_FULL) { if (stream_update) { uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; stream_update->stream->update_flags.raw = 0xFFFFFFFF; stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; } for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0xFFFFFFFF; } if (type == UPDATE_TYPE_FAST) { // If there's an available clock comparator, we use that. if (dc->clk_mgr->funcs->are_clock_states_equal) { if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) dc->optimized_required = true; // Else we fallback to mem compare. } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { dc->optimized_required = true; } dc->optimized_required |= dc->wm_optimized_required; } return type; } static struct dc_stream_status *stream_get_status( struct dc_state *ctx, struct dc_stream_state *stream) { uint8_t i; for (i = 0; i < ctx->stream_count; i++) { if (stream == ctx->streams[i]) { return &ctx->stream_status[i]; } } return NULL; } static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; static void copy_surface_update_to_plane( struct dc_plane_state *surface, struct dc_surface_update *srf_update) { if (srf_update->flip_addr) { surface->address = srf_update->flip_addr->address; surface->flip_immediate = srf_update->flip_addr->flip_immediate; surface->time.time_elapsed_in_us[surface->time.index] = srf_update->flip_addr->flip_timestamp_in_us - surface->time.prev_update_time_in_us; surface->time.prev_update_time_in_us = srf_update->flip_addr->flip_timestamp_in_us; surface->time.index++; if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) surface->time.index = 0; surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; } if (srf_update->scaling_info) { surface->scaling_quality = srf_update->scaling_info->scaling_quality; surface->dst_rect = srf_update->scaling_info->dst_rect; surface->src_rect = srf_update->scaling_info->src_rect; surface->clip_rect = srf_update->scaling_info->clip_rect; } if (srf_update->plane_info) { surface->color_space = srf_update->plane_info->color_space; surface->format = srf_update->plane_info->format; surface->plane_size = srf_update->plane_info->plane_size; surface->rotation = srf_update->plane_info->rotation; surface->horizontal_mirror = srf_update->plane_info->horizontal_mirror; surface->stereo_format = srf_update->plane_info->stereo_format; surface->tiling_info = srf_update->plane_info->tiling_info; surface->visible = srf_update->plane_info->visible; surface->per_pixel_alpha = srf_update->plane_info->per_pixel_alpha; surface->global_alpha = srf_update->plane_info->global_alpha; surface->global_alpha_value = srf_update->plane_info->global_alpha_value; surface->dcc = srf_update->plane_info->dcc; surface->layer_index = srf_update->plane_info->layer_index; } if (srf_update->gamma && (surface->gamma_correction != srf_update->gamma)) { memcpy(&surface->gamma_correction->entries, &srf_update->gamma->entries, sizeof(struct dc_gamma_entries)); surface->gamma_correction->is_identity = srf_update->gamma->is_identity; surface->gamma_correction->num_entries = srf_update->gamma->num_entries; surface->gamma_correction->type = srf_update->gamma->type; } if (srf_update->in_transfer_func && (surface->in_transfer_func != srf_update->in_transfer_func)) { surface->in_transfer_func->sdr_ref_white_level = srf_update->in_transfer_func->sdr_ref_white_level; surface->in_transfer_func->tf = srf_update->in_transfer_func->tf; surface->in_transfer_func->type = srf_update->in_transfer_func->type; memcpy(&surface->in_transfer_func->tf_pts, &srf_update->in_transfer_func->tf_pts, sizeof(struct dc_transfer_func_distributed_points)); } if (srf_update->func_shaper && (surface->in_shaper_func != srf_update->func_shaper)) memcpy(surface->in_shaper_func, srf_update->func_shaper, sizeof(*surface->in_shaper_func)); if (srf_update->lut3d_func && (surface->lut3d_func != srf_update->lut3d_func)) memcpy(surface->lut3d_func, srf_update->lut3d_func, sizeof(*surface->lut3d_func)); if (srf_update->hdr_mult.value) surface->hdr_mult = srf_update->hdr_mult; if (srf_update->blend_tf && (surface->blend_tf != srf_update->blend_tf)) memcpy(surface->blend_tf, srf_update->blend_tf, sizeof(*surface->blend_tf)); if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = *srf_update->input_csc_color_matrix; if (srf_update->coeff_reduction_factor) surface->coeff_reduction_factor = *srf_update->coeff_reduction_factor; if (srf_update->gamut_remap_matrix) surface->gamut_remap_matrix = *srf_update->gamut_remap_matrix; } static void copy_stream_update_to_stream(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, struct dc_stream_update *update) { struct dc_context *dc_ctx = dc->ctx; if (update == NULL || stream == NULL) return; if (update->src.height && update->src.width) stream->src = update->src; if (update->dst.height && update->dst.width) stream->dst = update->dst; if (update->out_transfer_func && stream->out_transfer_func != update->out_transfer_func) { stream->out_transfer_func->sdr_ref_white_level = update->out_transfer_func->sdr_ref_white_level; stream->out_transfer_func->tf = update->out_transfer_func->tf; stream->out_transfer_func->type = update->out_transfer_func->type; memcpy(&stream->out_transfer_func->tf_pts, &update->out_transfer_func->tf_pts, sizeof(struct dc_transfer_func_distributed_points)); } if (update->hdr_static_metadata) stream->hdr_static_metadata = *update->hdr_static_metadata; if (update->abm_level) stream->abm_level = *update->abm_level; if (update->periodic_interrupt) stream->periodic_interrupt = *update->periodic_interrupt; if (update->gamut_remap) stream->gamut_remap_matrix = *update->gamut_remap; /* Note: this being updated after mode set is currently not a use case * however if it arises OCSC would need to be reprogrammed at the * minimum */ if (update->output_color_space) stream->output_color_space = *update->output_color_space; if (update->output_csc_transform) stream->csc_color_matrix = *update->output_csc_transform; if (update->vrr_infopacket) stream->vrr_infopacket = *update->vrr_infopacket; if (update->allow_freesync) stream->allow_freesync = *update->allow_freesync; if (update->vrr_active_variable) stream->vrr_active_variable = *update->vrr_active_variable; if (update->vrr_active_fixed) stream->vrr_active_fixed = *update->vrr_active_fixed; if (update->crtc_timing_adjust) stream->adjust = *update->crtc_timing_adjust; if (update->dpms_off) stream->dpms_off = *update->dpms_off; if (update->hfvsif_infopacket) stream->hfvsif_infopacket = *update->hfvsif_infopacket; if (update->vtem_infopacket) stream->vtem_infopacket = *update->vtem_infopacket; if (update->vsc_infopacket) stream->vsc_infopacket = *update->vsc_infopacket; if (update->vsp_infopacket) stream->vsp_infopacket = *update->vsp_infopacket; if (update->adaptive_sync_infopacket) stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; if (update->dither_option) stream->dither_option = *update->dither_option; if (update->pending_test_pattern) stream->test_pattern = *update->pending_test_pattern; /* update current stream with writeback info */ if (update->wb_update) { int i; stream->num_wb_info = update->wb_update->num_wb_info; ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); for (i = 0; i < stream->num_wb_info; i++) stream->writeback_info[i] = update->wb_update->writeback_info[i]; } if (update->dsc_config) { struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; uint32_t old_dsc_enabled = stream->timing.flags.DSC; uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && update->dsc_config->num_slices_v != 0); /* Use temporarry context for validating new DSC config */ struct dc_state *dsc_validate_context = dc_create_state(dc); if (dsc_validate_context) { dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { stream->timing.dsc_cfg = old_dsc_cfg; stream->timing.flags.DSC = old_dsc_enabled; update->dsc_config = NULL; } dc_release_state(dsc_validate_context); } else { DC_ERROR("Failed to allocate new validate context for DSC change\n"); update->dsc_config = NULL; } } } static bool update_planes_and_stream_state(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, enum surface_update_type *new_update_type, struct dc_state **new_context) { struct dc_state *context; int i, j; enum surface_update_type update_type; const struct dc_stream_status *stream_status; struct dc_context *dc_ctx = dc->ctx; stream_status = dc_stream_get_status(stream); if (!stream_status) { if (surface_count) /* Only an error condition if surf_count non-zero*/ ASSERT(false); return false; /* Cannot commit surface to stream that is not committed */ } context = dc->current_state; update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); /* update current stream with the new updates */ copy_stream_update_to_stream(dc, context, stream, stream_update); /* do not perform surface update if surface has invalid dimensions * (all zero) and no scaling_info is provided */ if (surface_count > 0) { for (i = 0; i < surface_count; i++) { if ((srf_updates[i].surface->src_rect.width == 0 || srf_updates[i].surface->src_rect.height == 0 || srf_updates[i].surface->dst_rect.width == 0 || srf_updates[i].surface->dst_rect.height == 0) && (!srf_updates[i].scaling_info || srf_updates[i].scaling_info->src_rect.width == 0 || srf_updates[i].scaling_info->src_rect.height == 0 || srf_updates[i].scaling_info->dst_rect.width == 0 || srf_updates[i].scaling_info->dst_rect.height == 0)) { DC_ERROR("Invalid src/dst rects in surface update!\n"); return false; } } } if (update_type >= update_surface_trace_level) update_surface_trace(dc, srf_updates, surface_count); if (update_type >= UPDATE_TYPE_FULL) { struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; for (i = 0; i < surface_count; i++) new_planes[i] = srf_updates[i].surface; /* initialize scratch memory for building context */ context = dc_create_state(dc); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); return false; } dc_resource_state_copy_construct( dc->current_state, context); /* For each full update, remove all existing phantom pipes first. * Ensures that we have enough pipes for newly added MPO planes */ if (dc->res_pool->funcs->remove_phantom_pipes) dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); /*remove old surfaces from context */ if (!dc_rem_all_planes_for_stream(dc, stream, context)) { BREAK_TO_DEBUGGER(); goto fail; } /* add surface to context */ if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { BREAK_TO_DEBUGGER(); goto fail; } } /* save update parameters into surface */ for (i = 0; i < surface_count; i++) { struct dc_plane_state *surface = srf_updates[i].surface; copy_surface_update_to_plane(surface, &srf_updates[i]); if (update_type >= UPDATE_TYPE_MED) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (pipe_ctx->plane_state != surface) continue; resource_build_scaling_params(pipe_ctx); } } } if (update_type == UPDATE_TYPE_FULL) { if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { /* For phantom pipes we remove and create a new set of phantom pipes * for each full update (because we don't know if we'll need phantom * pipes until after the first round of validation). However, if validation * fails we need to keep the existing phantom pipes (because we don't update * the dc->current_state). * * The phantom stream/plane refcount is decremented for validation because * we assume it'll be removed (the free comes when the dc_state is freed), * but if validation fails we have to increment back the refcount so it's * consistent. */ if (dc->res_pool->funcs->retain_phantom_pipes) dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); BREAK_TO_DEBUGGER(); goto fail; } } *new_context = context; *new_update_type = update_type; return true; fail: dc_release_state(context); return false; } static void commit_planes_do_stream_update(struct dc *dc, struct dc_stream_state *stream, struct dc_stream_update *stream_update, enum surface_update_type update_type, struct dc_state *context) { int j; // Stream updates for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || stream_update->vsc_infopacket || stream_update->vsp_infopacket || stream_update->hfvsif_infopacket || stream_update->adaptive_sync_infopacket || stream_update->vtem_infopacket) { resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); if (dc_is_dp_signal(pipe_ctx->stream->signal)) dc->link_srv->dp_trace_source_sequence( pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); } if (stream_update->hdr_static_metadata && stream->use_dynamic_meta && dc->hwss.set_dmdata_attributes && pipe_ctx->stream->dmdata_address.quad_part != 0) dc->hwss.set_dmdata_attributes(pipe_ctx); if (stream_update->gamut_remap) dc_stream_set_gamut_remap(dc, stream); if (stream_update->output_csc_transform) dc_stream_program_csc_matrix(dc, stream); if (stream_update->dither_option) { struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); while (odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, &stream->bit_depth_params, &stream->clamping); odm_pipe = odm_pipe->next_odm_pipe; } } /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; if (stream_update->dsc_config) dc->link_srv->update_dsc_config(pipe_ctx); if (stream_update->mst_bw_update) { if (stream_update->mst_bw_update->is_increase) dc->link_srv->increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); else dc->link_srv->reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); } if (stream_update->pending_test_pattern) { dc_link_dp_set_test_pattern(stream->link, stream->test_pattern.type, stream->test_pattern.color_space, stream->test_pattern.p_link_settings, stream->test_pattern.p_custom_pattern, stream->test_pattern.cust_pattern_size); } if (stream_update->dpms_off) { if (*stream_update->dpms_off) { dc->link_srv->set_dpms_off(pipe_ctx); /* for dpms, keep acquired resources*/ if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); dc->optimized_required = true; } else { if (get_seamless_boot_stream_count(context) == 0) dc->hwss.prepare_bandwidth(dc, dc->current_state); dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); } } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { /* * Workaround for firmware issue in some receivers where they don't pick up * correct output color space unless DP link is disabled/re-enabled */ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { bool should_program_abm = true; // if otg funcs defined check if blanked before programming if (pipe_ctx->stream_res.tg->funcs->is_blanked) if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) should_program_abm = false; if (should_program_abm) { if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { dc->hwss.set_abm_immediate_disable(pipe_ctx); } else { pipe_ctx->stream_res.abm->funcs->set_abm_level( pipe_ctx->stream_res.abm, stream->abm_level); } } } } } } static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) { if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) && stream->ctx->dce_version >= DCN_VERSION_3_1) return true; if (stream->link->replay_settings.config.replay_supported) return true; return false; } void dc_dmub_update_dirty_rect(struct dc *dc, int surface_count, struct dc_stream_state *stream, struct dc_surface_update *srf_updates, struct dc_state *context) { union dmub_rb_cmd cmd; struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; unsigned int i, j; unsigned int panel_inst = 0; if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) return; if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) return; memset(&cmd, 0x0, sizeof(cmd)); cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; cmd.update_dirty_rect.header.sub_type = 0; cmd.update_dirty_rect.header.payload_bytes = sizeof(cmd.update_dirty_rect) - sizeof(cmd.update_dirty_rect.header); update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; if (!srf_updates[i].surface || !flip_addr) continue; /* Do not send in immediate flip mode */ if (srf_updates[i].surface->flip_immediate) continue; update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, sizeof(flip_addr->dirty_rects)); for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (pipe_ctx->stream != stream) continue; if (pipe_ctx->plane_state != plane_state) continue; update_dirty_rect->panel_inst = panel_inst; update_dirty_rect->pipe_idx = j; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } } } static void build_dmub_update_dirty_rect( struct dc *dc, int surface_count, struct dc_stream_state *stream, struct dc_surface_update *srf_updates, struct dc_state *context, struct dc_dmub_cmd dc_dmub_cmd[], unsigned int *dmub_cmd_count) { union dmub_rb_cmd cmd; struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; unsigned int i, j; unsigned int panel_inst = 0; if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) return; if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) return; memset(&cmd, 0x0, sizeof(cmd)); cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; cmd.update_dirty_rect.header.sub_type = 0; cmd.update_dirty_rect.header.payload_bytes = sizeof(cmd.update_dirty_rect) - sizeof(cmd.update_dirty_rect.header); update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; if (!srf_updates[i].surface || !flip_addr) continue; /* Do not send in immediate flip mode */ if (srf_updates[i].surface->flip_immediate) continue; update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, sizeof(flip_addr->dirty_rects)); for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (pipe_ctx->stream != stream) continue; if (pipe_ctx->plane_state != plane_state) continue; update_dirty_rect->panel_inst = panel_inst; update_dirty_rect->pipe_idx = j; dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; (*dmub_cmd_count)++; } } } /** * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB * * @dc: Current DC state * @srf_updates: Array of surface updates * @surface_count: Number of surfaces that have an updated * @stream: Corresponding stream to be updated in the current flip * @context: New DC state to be programmed * * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array * * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required * to build an array of commands and have them sent while the OTG lock is acquired. * * Return: void */ static void build_dmub_cmd_list(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_state *context, struct dc_dmub_cmd dc_dmub_cmd[], unsigned int *dmub_cmd_count) { // Initialize cmd count to 0 *dmub_cmd_count = 0; build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); } static void commit_planes_for_stream_fast(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, enum surface_update_type update_type, struct dc_state *context) { int i, j; struct pipe_ctx *top_pipe_to_program = NULL; dc_z10_restore(dc); top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); if (dc->debug.visual_confirm) { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->plane_state) dc_update_viusal_confirm_color(dc, context, pipe); } } for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; /*set logical flag for lock/unlock use*/ for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!pipe_ctx->plane_state) continue; if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; pipe_ctx->plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && dc->hwss.program_triplebuffer && !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { /*triple buffer for VUpdate only*/ pipe_ctx->plane_state->triplebuffer_flips = true; } } } build_dmub_cmd_list(dc, srf_updates, surface_count, stream, context, context->dc_dmub_cmd, &(context->dmub_cmd_count)); hwss_build_fast_sequence(dc, context->dc_dmub_cmd, context->dmub_cmd_count, context->block_sequence, &(context->block_sequence_steps), top_pipe_to_program); hwss_execute_sequence(dc, context->block_sequence, context->block_sequence_steps); /* Clear update flags so next flip doesn't have redundant programming * (if there's no stream update, the update flags are not cleared). * Surface updates are cleared unconditionally at the beginning of each flip, * so no need to clear here. */ if (top_pipe_to_program->stream) top_pipe_to_program->stream->update_flags.raw = 0; } static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) { /* * This function calls HWSS to wait for any potentially double buffered * operations to complete. It should be invoked as a pre-amble prior * to full update programming before asserting any HW locks. */ int pipe_idx; int opp_inst; int opp_count = dc->res_pool->pipe_count; struct hubp *hubp; int mpcc_inst; const struct pipe_ctx *pipe_ctx; for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx]; if (!pipe_ctx->stream) continue; if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); hubp = pipe_ctx->plane_res.hubp; if (!hubp) continue; mpcc_inst = hubp->inst; // MPCC inst is equal to pipe index in practice for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; break; } } } } static void commit_planes_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, enum surface_update_type update_type, struct dc_state *context) { int i, j; struct pipe_ctx *top_pipe_to_program = NULL; bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); bool subvp_prev_use = false; bool subvp_curr_use = false; // Once we apply the new subvp context to hardware it won't be in the // dc->current_state anymore, so we have to cache it before we apply // the new SubVP context subvp_prev_use = false; dc_z10_restore(dc); if (update_type == UPDATE_TYPE_FULL) wait_for_outstanding_hw_updates(dc, context); if (update_type == UPDATE_TYPE_FULL) { dc_allow_idle_optimizations(dc, false); if (get_seamless_boot_stream_count(context) == 0) dc->hwss.prepare_bandwidth(dc, context); if (dc->debug.enable_double_buffered_dsc_pg_support) dc->hwss.update_dsc_pg(dc, context, false); context_clock_trace(dc, context); } top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; // Check old context for SubVP subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); if (subvp_prev_use) break; } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { subvp_curr_use = true; break; } } if (dc->debug.visual_confirm) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->plane_state) dc_update_viusal_confirm_color(dc, context, pipe); } if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; struct pipe_ctx *odm_pipe; for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; } if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) if (top_pipe_to_program && top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { if (should_use_dmub_lock(stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; hw_locks.bits.lock_dig = 1; inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, true, &hw_locks, &inst_flags); } else top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( top_pipe_to_program->stream_res.tg); } if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); dc->hwss.interdependent_update_lock(dc, context, true); } else { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); /* Lock the top pipe while updating plane addrs, since freesync requires * plane addr update event triggers to be synchronized. * top_pipe_to_program is expected to never be NULL */ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); } dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); // Stream updates if (stream_update) commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); if (surface_count == 0) { /* * In case of turning off screen, no need to program front end a second time. * just return after program blank. */ if (dc->hwss.apply_ctx_for_surface) dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, context); if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { dc->hwss.interdependent_update_lock(dc, context, false); } else { dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); } dc->hwss.post_unlock_program_front_end(dc, context); if (update_type != UPDATE_TYPE_FAST) if (dc->hwss.commit_subvp_config) dc->hwss.commit_subvp_config(dc, context); /* Since phantom pipe programming is moved to post_unlock_program_front_end, * move the SubVP lock to after the phantom pipes have been setup */ if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); return; } if (update_type != UPDATE_TYPE_FAST) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && pipe_ctx->stream && pipe_ctx->plane_state) { /* Only update visual confirm for SUBVP and Mclk switching here. * The bar appears on all pipes, so we need to update the bar on all displays, * so the information doesn't get stale. */ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, pipe_ctx->plane_res.hubp->inst); } } } for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; /*set logical flag for lock/unlock use*/ for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!pipe_ctx->plane_state) continue; if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; pipe_ctx->plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && dc->hwss.program_triplebuffer != NULL && !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { /*triple buffer for VUpdate only*/ pipe_ctx->plane_state->triplebuffer_flips = true; } } if (update_type == UPDATE_TYPE_FULL) { /* force vsync flip when reconfiguring pipes to prevent underflow */ plane_state->flip_immediate = false; } } // Update Type FULL, Surface updates for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && should_update_pipe_for_stream(context, pipe_ctx, stream)) { struct dc_stream_status *stream_status = NULL; if (!pipe_ctx->plane_state) continue; /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } stream_status = stream_get_status(context, pipe_ctx->stream); if (dc->hwss.apply_ctx_for_surface) dc->hwss.apply_ctx_for_surface( dc, pipe_ctx->stream, stream_status->plane_count, context); } } if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { dc->hwss.program_front_end_for_ctx(dc, context); if (dc->debug.validate_dml_output) { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; if (cur_pipe->stream == NULL) continue; cur_pipe->plane_res.hubp->funcs->validate_dml_output( cur_pipe->plane_res.hubp, dc->ctx, &context->res_ctx.pipe_ctx[i].rq_regs, &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs); } } } // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { if (dc->hwss.set_flip_control_gsl) for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) continue; if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; // GSL has to be used for flip immediate dc->hwss.set_flip_control_gsl(pipe_ctx, pipe_ctx->plane_state->flip_immediate); } } /* Perform requested Updates */ for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) continue; if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; /*program triple buffer after lock based on flip type*/ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { /*only enable triplebuffer for fast_update*/ dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } if (pipe_ctx->plane_state->update_flags.bits.addr_update) dc->hwss.update_plane_addr(dc, pipe_ctx); } } } if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { dc->hwss.interdependent_update_lock(dc, context, false); } else { dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); } if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { top_pipe_to_program->stream_res.tg->funcs->wait_for_state( top_pipe_to_program->stream_res.tg, CRTC_STATE_VACTIVE); top_pipe_to_program->stream_res.tg->funcs->wait_for_state( top_pipe_to_program->stream_res.tg, CRTC_STATE_VBLANK); top_pipe_to_program->stream_res.tg->funcs->wait_for_state( top_pipe_to_program->stream_res.tg, CRTC_STATE_VACTIVE); if (should_use_dmub_lock(stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; hw_locks.bits.lock_dig = 1; inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, false, &hw_locks, &inst_flags); } else top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( top_pipe_to_program->stream_res.tg); } if (subvp_curr_use) { /* If enabling subvp or transitioning from subvp->subvp, enable the * phantom streams before we program front end for the phantom pipes. */ if (update_type != UPDATE_TYPE_FAST) { if (dc->hwss.enable_phantom_streams) dc->hwss.enable_phantom_streams(dc, context); } } if (update_type != UPDATE_TYPE_FAST) dc->hwss.post_unlock_program_front_end(dc, context); if (subvp_prev_use && !subvp_curr_use) { /* If disabling subvp, disable phantom streams after front end * programming has completed (we turn on phantom OTG in order * to complete the plane disable for phantom pipes). */ dc->hwss.apply_ctx_to_hw(dc, context); } if (update_type != UPDATE_TYPE_FAST) if (dc->hwss.commit_subvp_config) dc->hwss.commit_subvp_config(dc, context); /* Since phantom pipe programming is moved to post_unlock_program_front_end, * move the SubVP lock to after the phantom pipes have been setup */ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); } else { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); } // Fire manual trigger only when bottom plane is flipped for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!pipe_ctx->plane_state) continue; if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || !pipe_ctx->plane_state->update_flags.bits.addr_update || pipe_ctx->plane_state->skip_manual_trigger) continue; if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); } } /** * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change * * @dc: Used to get the current state status * @stream: Target stream, which we want to remove the attached planes * @surface_count: Number of surface update * @is_plane_addition: [in] Fill out with true if it is a plane addition case * * DCN32x and newer support a feature named Dynamic ODM which can conflict with * the MPO if used simultaneously in some specific configurations (e.g., * 4k@144). This function checks if the incoming context requires applying a * transition state with unnecessary pipe splitting and ODM disabled to * circumvent our hardware limitations to prevent this edge case. If the OPP * associated with an MPCC might change due to plane additions, this function * returns true. * * Return: * Return true if OPP and MPCC might change, otherwise, return false. */ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, struct dc_stream_state *stream, int surface_count, bool *is_plane_addition) { struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); bool force_minimal_pipe_splitting = false; bool subvp_active = false; uint32_t i; *is_plane_addition = false; if (cur_stream_status && dc->current_state->stream_count > 0 && dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { /* determine if minimal transition is required due to MPC*/ if (surface_count > 0) { if (cur_stream_status->plane_count > surface_count) { force_minimal_pipe_splitting = true; } else if (cur_stream_status->plane_count < surface_count) { force_minimal_pipe_splitting = true; *is_plane_addition = true; } } } if (cur_stream_status && dc->current_state->stream_count == 1 && dc->debug.enable_single_display_2to1_odm_policy) { /* determine if minimal transition is required due to dynamic ODM*/ if (surface_count > 0) { if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { force_minimal_pipe_splitting = true; } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { force_minimal_pipe_splitting = true; *is_plane_addition = true; } } } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { subvp_active = true; break; } } /* For SubVP when adding or removing planes we need to add a minimal transition * (even when disabling all planes). Whenever disabling a phantom pipe, we * must use the minimal transition path to disable the pipe correctly. * * We want to use the minimal transition whenever subvp is active, not only if * a plane is being added / removed from a subvp stream (MPO plane can be added * to a DRR pipe of SubVP + DRR config, in which case we still want to run through * a min transition to disable subvp. */ if (cur_stream_status && subvp_active) { /* determine if minimal transition is required due to SubVP*/ if (cur_stream_status->plane_count > surface_count) { force_minimal_pipe_splitting = true; } else if (cur_stream_status->plane_count < surface_count) { force_minimal_pipe_splitting = true; *is_plane_addition = true; } } return force_minimal_pipe_splitting; } /** * commit_minimal_transition_state - Create a transition pipe split state * * @dc: Used to get the current state status * @transition_base_context: New transition state * * In some specific configurations, such as pipe split on multi-display with * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe * programming when moving to new planes. To mitigate those types of problems, * this function adds a transition state that minimizes pipe usage before * programming the new configuration. When adding a new plane, the current * state requires the least pipes, so it is applied without splitting. When * removing a plane, the new state requires the least pipes, so it is applied * without splitting. * * Return: * Return false if something is wrong in the transition state. */ static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context) { struct dc_state *transition_context = dc_create_state(dc); enum pipe_split_policy tmp_mpc_policy = 0; bool temp_dynamic_odm_policy = 0; bool temp_subvp_policy = 0; enum dc_status ret = DC_ERROR_UNEXPECTED; unsigned int i, j; unsigned int pipe_in_use = 0; bool subvp_in_use = false; bool odm_in_use = false; if (!transition_context) return false; /* Setup: * Store the current ODM and MPC config in some temp variables to be * restored after we commit the transition state. */ /* check current pipes in use*/ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; if (pipe->plane_state) pipe_in_use++; } /* If SubVP is enabled and we are adding or removing planes from any main subvp * pipe, we must use the minimal transition. */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { subvp_in_use = true; break; } } /* If ODM is enabled and we are adding or removing planes from any ODM * pipe, we must use the minimal transition. */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->next_odm_pipe) { odm_in_use = true; break; } } /* When the OS add a new surface if we have been used all of pipes with odm combine * and mpc split feature, it need use commit_minimal_transition_state to transition safely. * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need * call it again. Otherwise return true to skip. * * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially * enter/exit MPO when DCN still have enough resources. */ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) { dc_release_state(transition_context); return true; } if (!dc->config.is_vmin_only_asic) { tmp_mpc_policy = dc->debug.pipe_split_policy; dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; } temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; dc->debug.enable_single_display_2to1_odm_policy = false; temp_subvp_policy = dc->debug.force_disable_subvp; dc->debug.force_disable_subvp = true; dc_resource_state_copy_construct(transition_base_context, transition_context); /* commit minimal state */ if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) { for (i = 0; i < transition_context->stream_count; i++) { struct dc_stream_status *stream_status = &transition_context->stream_status[i]; for (j = 0; j < stream_status->plane_count; j++) { struct dc_plane_state *plane_state = stream_status->plane_states[j]; /* force vsync flip when reconfiguring pipes to prevent underflow * and corruption */ plane_state->flip_immediate = false; } } ret = dc_commit_state_no_check(dc, transition_context); } /* always release as dc_commit_state_no_check retains in good case */ dc_release_state(transition_context); /* TearDown: * Restore original configuration for ODM and MPO. */ if (!dc->config.is_vmin_only_asic) dc->debug.pipe_split_policy = tmp_mpc_policy; dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy; dc->debug.force_disable_subvp = temp_subvp_policy; if (ret != DC_OK) { /* this should never happen */ BREAK_TO_DEBUGGER(); return false; } /* force full surface update */ for (i = 0; i < dc->current_state->stream_count; i++) { for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; } } return true; } /** * update_seamless_boot_flags() - Helper function for updating seamless boot flags * * @dc: Current DC state * @context: New DC state to be programmed * @surface_count: Number of surfaces that have an updated * @stream: Corresponding stream to be updated in the current flip * * Updating seamless boot flags do not need to be part of the commit sequence. This * helper function will update the seamless boot flags on each flip (if required) * outside of the HW commit sequence (fast or slow). * * Return: void */ static void update_seamless_boot_flags(struct dc *dc, struct dc_state *context, int surface_count, struct dc_stream_state *stream) { if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { /* Optimize seamless boot flag keeps clocks and watermarks high until * first flip. After first flip, optimization is required to lower * bandwidth. Important to note that it is expected UEFI will * only light up a single display on POST, therefore we only expect * one stream with seamless boot flag set. */ if (stream->apply_seamless_boot_optimization) { stream->apply_seamless_boot_optimization = false; if (get_seamless_boot_stream_count(context) == 0) dc->optimized_required = true; } } } static void populate_fast_updates(struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update) { int i = 0; if (stream_update) { fast_update[0].out_transfer_func = stream_update->out_transfer_func; fast_update[0].output_csc_transform = stream_update->output_csc_transform; } for (i = 0; i < surface_count; i++) { fast_update[i].flip_addr = srf_updates[i].flip_addr; fast_update[i].gamma = srf_updates[i].gamma; fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; } } static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) { int i; if (fast_update[0].out_transfer_func || fast_update[0].output_csc_transform) return true; for (i = 0; i < surface_count; i++) { if (fast_update[i].flip_addr || fast_update[i].gamma || fast_update[i].gamut_remap_matrix || fast_update[i].input_csc_color_matrix || fast_update[i].coeff_reduction_factor) return true; } return false; } static bool full_update_required(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update, struct dc_stream_state *stream) { int i; struct dc_stream_status *stream_status; const struct dc_state *context = dc->current_state; for (i = 0; i < surface_count; i++) { if (srf_updates && (srf_updates[i].plane_info || srf_updates[i].scaling_info || (srf_updates[i].hdr_mult.value && srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || srf_updates[i].in_transfer_func || srf_updates[i].func_shaper || srf_updates[i].lut3d_func || srf_updates[i].blend_tf || srf_updates[i].surface->force_full_update || (srf_updates[i].flip_addr && srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || !is_surface_in_context(context, srf_updates[i].surface))) return true; } if (stream_update && (((stream_update->src.height != 0 && stream_update->src.width != 0) || (stream_update->dst.height != 0 && stream_update->dst.width != 0) || stream_update->integer_scaling_update) || stream_update->hdr_static_metadata || stream_update->abm_level || stream_update->periodic_interrupt || stream_update->vrr_infopacket || stream_update->vsc_infopacket || stream_update->vsp_infopacket || stream_update->hfvsif_infopacket || stream_update->vtem_infopacket || stream_update->adaptive_sync_infopacket || stream_update->dpms_off || stream_update->allow_freesync || stream_update->vrr_active_variable || stream_update->vrr_active_fixed || stream_update->gamut_remap || stream_update->output_color_space || stream_update->dither_option || stream_update->wb_update || stream_update->dsc_config || stream_update->mst_bw_update || stream_update->func_shaper || stream_update->lut3d_func || stream_update->pending_test_pattern || stream_update->crtc_timing_adjust)) return true; if (stream) { stream_status = dc_stream_get_status(stream); if (stream_status == NULL || stream_status->plane_count != surface_count) return true; } if (dc->idle_optimizations_allowed) return true; return false; } static bool fast_update_only(struct dc *dc, struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update, struct dc_stream_state *stream) { return fast_updates_exist(fast_update, surface_count) && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update) { struct dc_state *context; enum surface_update_type update_type; int i; struct mall_temp_config mall_temp_config; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; /* In cases where MPO and split or ODM are used transitions can * cause underflow. Apply stream configuration with minimal pipe * split first to avoid unsupported transitions for active pipes. */ bool force_minimal_pipe_splitting = 0; bool is_plane_addition = 0; populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( dc, stream, surface_count, &is_plane_addition); /* on plane addition, minimal state is the current one */ if (force_minimal_pipe_splitting && is_plane_addition && !commit_minimal_transition_state(dc, dc->current_state)) return false; if (!update_planes_and_stream_state( dc, srf_updates, surface_count, stream, stream_update, &update_type, &context)) return false; /* on plane removal, minimal state is the new one */ if (force_minimal_pipe_splitting && !is_plane_addition) { /* Since all phantom pipes are removed in full validation, * we have to save and restore the subvp/mall config when * we do a minimal transition since the flags marking the * pipe as subvp/phantom will be cleared (dc copy constructor * creates a shallow copy). */ if (dc->res_pool->funcs->save_mall_state) dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); if (!commit_minimal_transition_state(dc, context)) { dc_release_state(context); return false; } if (dc->res_pool->funcs->restore_mall_state) dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); /* If we do a minimal transition with plane removal and the context * has subvp we also have to retain back the phantom stream / planes * since the refcount is decremented as part of the min transition * (we commit a state with no subvp, so the phantom streams / planes * had to be removed). */ if (dc->res_pool->funcs->retain_phantom_pipes) dc->res_pool->funcs->retain_phantom_pipes(dc, context); update_type = UPDATE_TYPE_FULL; } update_seamless_boot_flags(dc, context, surface_count, stream); if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, surface_count, stream, stream_update, update_type, context); } else { commit_planes_for_stream( dc, srf_updates, surface_count, stream, stream_update, update_type, context); } if (dc->current_state != context) { /* Since memory free requires elevated IRQL, an interrupt * request is generated by mem free. If this happens * between freeing and reassigning the context, our vsync * interrupt will call into dc and cause a memory * corruption BSOD. Hence, we first reassign the context, * then free the old context. */ struct dc_state *old = dc->current_state; dc->current_state = context; dc_release_state(old); // clear any forced full updates for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state && pipe_ctx->stream == stream) pipe_ctx->plane_state->force_full_update = false; } } return true; } void dc_commit_updates_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, struct dc_state *state) { const struct dc_stream_status *stream_status; enum surface_update_type update_type; struct dc_state *context; struct dc_context *dc_ctx = dc->ctx; int i, j; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); stream_status = dc_stream_get_status(stream); context = dc->current_state; update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); /* TODO: Since change commit sequence can have a huge impact, * we decided to only enable it for DCN3x. However, as soon as * we get more confident about this change we'll need to enable * the new sequence for all ASICs. */ if (dc->ctx->dce_version >= DCN_VERSION_3_2) { /* * Previous frame finished and HW is ready for optimization. */ if (update_type == UPDATE_TYPE_FAST) dc_post_update_surfaces_to_stream(dc); dc_update_planes_and_stream(dc, srf_updates, surface_count, stream, stream_update); return; } if (update_type >= update_surface_trace_level) update_surface_trace(dc, srf_updates, surface_count); if (update_type >= UPDATE_TYPE_FULL) { /* initialize scratch memory for building context */ context = dc_create_state(dc); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); return; } dc_resource_state_copy_construct(state, context); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) new_pipe->plane_state->force_full_update = true; } } else if (update_type == UPDATE_TYPE_FAST) { /* * Previous frame finished and HW is ready for optimization. */ dc_post_update_surfaces_to_stream(dc); } for (i = 0; i < surface_count; i++) { struct dc_plane_state *surface = srf_updates[i].surface; copy_surface_update_to_plane(surface, &srf_updates[i]); if (update_type >= UPDATE_TYPE_MED) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (pipe_ctx->plane_state != surface) continue; resource_build_scaling_params(pipe_ctx); } } } copy_stream_update_to_stream(dc, context, stream, stream_update); if (update_type >= UPDATE_TYPE_FULL) { if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { DC_ERROR("Mode validation failed for stream update!\n"); dc_release_state(context); return; } } TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); update_seamless_boot_flags(dc, context, surface_count, stream); if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, surface_count, stream, stream_update, update_type, context); } else { commit_planes_for_stream( dc, srf_updates, surface_count, stream, stream_update, update_type, context); } /*update current_State*/ if (dc->current_state != context) { struct dc_state *old = dc->current_state; dc->current_state = context; dc_release_state(old); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state && pipe_ctx->stream == stream) pipe_ctx->plane_state->force_full_update = false; } } /* Legacy optimization path for DCE. */ if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { dc_post_update_surfaces_to_stream(dc); TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); } return; } uint8_t dc_get_current_stream_count(struct dc *dc) { return dc->current_state->stream_count; } struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) { if (i < dc->current_state->stream_count) return dc->current_state->streams[i]; return NULL; } enum dc_irq_source dc_interrupt_to_irq_source( struct dc *dc, uint32_t src_id, uint32_t ext_id) { return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); } /* * dc_interrupt_set() - Enable/disable an AMD hw interrupt source */ bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) { if (dc == NULL) return false; return dal_irq_service_set(dc->res_pool->irqs, src, enable); } void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) { dal_irq_service_ack(dc->res_pool->irqs, src); } void dc_power_down_on_boot(struct dc *dc) { if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && dc->hwss.power_down_on_boot) dc->hwss.power_down_on_boot(dc); } void dc_set_power_state( struct dc *dc, enum dc_acpi_cm_power_state power_state) { struct kref refcount; struct display_mode_lib *dml; if (!dc->current_state) return; switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: dc_resource_state_construct(dc, dc->current_state); dc_z10_restore(dc); dc->hwss.init_hw(dc); if (dc->hwss.init_sys_ctx != NULL && dc->vm_pa_config.valid) { dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); } break; default: ASSERT(dc->current_state->stream_count == 0); /* Zero out the current context so that on resume we start with * clean state, and dc hw programming optimizations will not * cause any trouble. */ dml = kzalloc(sizeof(struct display_mode_lib), GFP_KERNEL); ASSERT(dml); if (!dml) return; /* Preserve refcount */ refcount = dc->current_state->refcount; /* Preserve display mode lib */ memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); dc_resource_state_destruct(dc->current_state); memset(dc->current_state, 0, sizeof(*dc->current_state)); dc->current_state->refcount = refcount; dc->current_state->bw_ctx.dml = *dml; kfree(dml); break; } } void dc_resume(struct dc *dc) { uint32_t i; for (i = 0; i < dc->link_count; i++) dc->link_srv->resume(dc->links[i]); } bool dc_is_dmcu_initialized(struct dc *dc) { struct dmcu *dmcu = dc->res_pool->dmcu; if (dmcu) return dmcu->funcs->is_dmcu_initialized(dmcu); return false; } void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) { info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; } enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) { if (dc->hwss.set_clock) return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); return DC_ERROR_UNEXPECTED; } void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) { if (dc->hwss.get_clock) dc->hwss.get_clock(dc, clock_type, clock_cfg); } /* enable/disable eDP PSR without specify stream for eDP */ bool dc_set_psr_allow_active(struct dc *dc, bool enable) { int i; bool allow_active; for (i = 0; i < dc->current_state->stream_count ; i++) { struct dc_link *link; struct dc_stream_state *stream = dc->current_state->streams[i]; link = stream->link; if (!link) continue; if (link->psr_settings.psr_feature_enabled) { if (enable && !link->psr_settings.psr_allow_active) { allow_active = true; if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) return false; } else if (!enable && link->psr_settings.psr_allow_active) { allow_active = false; if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) return false; } } } return true; } void dc_allow_idle_optimizations(struct dc *dc, bool allow) { if (dc->debug.disable_idle_power_optimizations) return; if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) return; if (allow == dc->idle_optimizations_allowed) return; if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) dc->idle_optimizations_allowed = allow; } /* set min and max memory clock to lowest and highest DPM level, respectively */ void dc_unlock_memory_clock_frequency(struct dc *dc) { if (dc->clk_mgr->funcs->set_hard_min_memclk) dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); if (dc->clk_mgr->funcs->set_hard_max_memclk) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); } /* set min memory clock to the min required for current mode, max to maxDPM */ void dc_lock_memory_clock_frequency(struct dc *dc) { if (dc->clk_mgr->funcs->get_memclk_states_from_smu) dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); if (dc->clk_mgr->funcs->set_hard_min_memclk) dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); if (dc->clk_mgr->funcs->set_hard_max_memclk) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); } static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) { struct dc_state *context = dc->current_state; struct hubp *hubp; struct pipe_ctx *pipe; int i; for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream != NULL) { dc->hwss.disable_pixel_data(dc, pipe, true); // wait for double buffer pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); hubp = pipe->plane_res.hubp; hubp->funcs->set_blank_regs(hubp, true); } } dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream != NULL) { dc->hwss.disable_pixel_data(dc, pipe, false); hubp = pipe->plane_res.hubp; hubp->funcs->set_blank_regs(hubp, false); } } } /** * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode * @dc: pointer to dc of the dm calling this * @enable: True = transition to DC mode, false = transition back to AC mode * * Some SoCs define additional clock limits when in DC mode, DM should * invoke this function when the platform undergoes a power source transition * so DC can apply/unapply the limit. This interface may be disruptive to * the onscreen content. * * Context: Triggered by OS through DM interface, or manually by escape calls. * Need to hold a dclock when doing so. * * Return: none (void function) * */ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) { unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; bool p_state_change_support; if (!dc->config.dc_mode_clk_limit_support) return; softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; } funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; p_state_change_support = dc->clk_mgr->clks.p_state_change_support; if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { if (p_state_change_support) { if (funcMin <= softMax) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); // else: No-Op } else { if (funcMin <= softMax) blank_and_force_memclk(dc, true, softMax); // else: No-Op } } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { if (p_state_change_support) { if (funcMin <= softMax) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); // else: No-Op } else { if (funcMin <= softMax) blank_and_force_memclk(dc, true, maxDPM); // else: No-Op } } dc->clk_mgr->dc_mode_softmax_enabled = enable; } bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr) { if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) return true; return false; } /* cleanup on driver unload */ void dc_hardware_release(struct dc *dc) { dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); if (dc->hwss.hardware_release) dc->hwss.hardware_release(dc); } void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) { if (dc->current_state) dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; } /** * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification * * @dc: [in] dc structure * * Checks whether DMUB FW supports outbox notifications, if supported DM * should register outbox interrupt prior to actually enabling interrupts * via dc_enable_dmub_outbox * * Return: * True if DMUB FW supports outbox notifications, False otherwise */ bool dc_is_dmub_outbox_supported(struct dc *dc) { /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && !dc->debug.dpia_debug.bits.disable_dpia) return true; if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 && !dc->debug.dpia_debug.bits.disable_dpia) return true; /* dmub aux needs dmub notifications to be enabled */ return dc->debug.enable_dmub_aux_for_legacy_ddc; } /** * dc_enable_dmub_notifications - Check if dmub fw supports outbox * * @dc: [in] dc structure * * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This * API shall be removed after switching. * * Return: * True if DMUB FW supports outbox notifications, False otherwise */ bool dc_enable_dmub_notifications(struct dc *dc) { return dc_is_dmub_outbox_supported(dc); } /** * dc_enable_dmub_outbox - Enables DMUB unsolicited notification * * @dc: [in] dc structure * * Enables DMUB unsolicited notifications to x86 via outbox. */ void dc_enable_dmub_outbox(struct dc *dc) { struct dc_context *dc_ctx = dc->ctx; dmub_enable_outbox_notification(dc_ctx->dmub_srv); DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); } /** * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message * Sets port index appropriately for legacy DDC * @dc: dc structure * @link_index: link index * @payload: aux payload * * Returns: True if successful, False if failure */ bool dc_process_dmub_aux_transfer_async(struct dc *dc, uint32_t link_index, struct aux_payload *payload) { uint8_t action; union dmub_rb_cmd cmd = {0}; ASSERT(payload->length <= 16); cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; cmd.dp_aux_access.header.payload_bytes = 0; /* For dpia, ddc_pin is set to NULL */ if (!dc->links[link_index]->ddc->ddc_pin) cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; else cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; cmd.dp_aux_access.aux_control.timeout = 0; cmd.dp_aux_access.aux_control.dpaux.address = payload->address; cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; cmd.dp_aux_access.aux_control.dpaux.length = payload->length; /* set aux action */ if (payload->i2c_over_aux) { if (payload->write) { if (payload->mot) action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; else action = DP_AUX_REQ_ACTION_I2C_WRITE; } else { if (payload->mot) action = DP_AUX_REQ_ACTION_I2C_READ_MOT; else action = DP_AUX_REQ_ACTION_I2C_READ; } } else { if (payload->write) action = DP_AUX_REQ_ACTION_DPCD_WRITE; else action = DP_AUX_REQ_ACTION_DPCD_READ; } cmd.dp_aux_access.aux_control.dpaux.action = action; if (payload->length && payload->write) { memcpy(cmd.dp_aux_access.aux_control.dpaux.data, payload->data, payload->length ); } dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, uint8_t dpia_port_index) { uint8_t index, link_index = 0xFF; for (index = 0; index < dc->link_count; index++) { /* ddc_hw_inst has dpia port index for dpia links * and ddc instance for legacy links */ if (!dc->links[index]->ddc->ddc_pin) { if (dc->links[index]->ddc_hw_inst == dpia_port_index) { link_index = index; break; } } } ASSERT(link_index != 0xFF); return link_index; } /** * dc_process_dmub_set_config_async - Submits set_config command * * @dc: [in] dc structure * @link_index: [in] link_index: link index * @payload: [in] aux payload * @notify: [out] set_config immediate reply * * Submits set_config command to dmub via inbox message. * * Return: * True if successful, False if failure */ bool dc_process_dmub_set_config_async(struct dc *dc, uint32_t link_index, struct set_config_cmd_payload *payload, struct dmub_notification *notify) { union dmub_rb_cmd cmd = {0}; bool is_cmd_complete = true; /* prepare SET_CONFIG command */ cmd.set_config_access.header.type = DMUB_CMD__DPIA; cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { /* command is not processed by dmub */ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; return is_cmd_complete; } /* command processed by dmub, if ret_status is 1, it is completed instantly */ if (cmd.set_config_access.header.ret_status == 1) notify->sc_status = cmd.set_config_access.set_config_control.immed_status; else /* cmd pending, will receive notification via outbox */ is_cmd_complete = false; return is_cmd_complete; } /** * dc_process_dmub_set_mst_slots - Submits MST solt allocation * * @dc: [in] dc structure * @link_index: [in] link index * @mst_alloc_slots: [in] mst slots to be allotted * @mst_slots_in_use: [out] mst slots in use returned in failure case * * Submits mst slot allocation command to dmub via inbox message * * Return: * DC_OK if successful, DC_ERROR if failure */ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, uint32_t link_index, uint8_t mst_alloc_slots, uint8_t *mst_slots_in_use) { union dmub_rb_cmd cmd = {0}; /* prepare MST_ALLOC_SLOTS command */ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) /* command is not processed by dmub */ return DC_ERROR_UNEXPECTED; /* command processed by dmub, if ret_status is 1 */ if (cmd.set_config_access.header.ret_status != 1) /* command processing error */ return DC_ERROR_UNEXPECTED; /* command processed and we have a status of 2, mst not enabled in dpia */ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) return DC_FAIL_UNSUPPORTED_1; /* previously configured mst alloc and used slots did not match */ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; return DC_NOT_SUPPORTED; } return DC_OK; } /** * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption * * @dc: [in] dc structure * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable * * Submits dpia hpd int enable command to dmub via inbox message */ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, uint32_t hpd_int_enable) { union dmub_rb_cmd cmd = {0}; cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; cmd.dpia_hpd_int_enable.enable = hpd_int_enable; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); } /** * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging * * @dc: [in] dc structure * * */ void dc_print_dmub_diagnostic_data(const struct dc *dc) { dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); } /** * dc_disable_accelerated_mode - disable accelerated mode * @dc: dc structure */ void dc_disable_accelerated_mode(struct dc *dc) { bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); } /** * dc_notify_vsync_int_state - notifies vsync enable/disable state * @dc: dc structure * @stream: stream where vsync int state changed * @enable: whether vsync is enabled or disabled * * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM * interrupts after steady state is reached. */ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) { int i; int edp_num; struct pipe_ctx *pipe = NULL; struct dc_link *link = stream->sink->link; struct dc_link *edp_links[MAX_NUM_EDP]; if (link->psr_settings.psr_feature_enabled) return; if (link->replay_settings.replay_feature_enabled) return; /*find primary pipe associated with stream*/ for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.tg) break; } if (i == MAX_PIPES) { ASSERT(0); return; } dc_get_edp_links(dc, edp_links, &edp_num); /* Determine panel inst */ for (i = 0; i < edp_num; i++) { if (edp_links[i] == link) break; } if (i == edp_num) { return; } if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); } /***************************************************************************** * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause * ABM * @dc: dc structure * @stream: stream where vsync int state changed * @pData: abm hw states * ****************************************************************************/ bool dc_abm_save_restore( struct dc *dc, struct dc_stream_state *stream, struct abm_save_restore *pData) { int i; int edp_num; struct pipe_ctx *pipe = NULL; struct dc_link *link = stream->sink->link; struct dc_link *edp_links[MAX_NUM_EDP]; /*find primary pipe associated with stream*/ for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.tg) break; } if (i == MAX_PIPES) { ASSERT(0); return false; } dc_get_edp_links(dc, edp_links, &edp_num); /* Determine panel inst */ for (i = 0; i < edp_num; i++) if (edp_links[i] == link) break; if (i == edp_num) return false; if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->save_restore) return pipe->stream_res.abm->funcs->save_restore( pipe->stream_res.abm, i, pData); return false; } void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) { unsigned int i; bool subvp_in_use = false; for (i = 0; i < dc->current_state->stream_count; i++) { if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) { subvp_in_use = true; break; } } properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size; }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc.c
/* Copyright 2021 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "link_enc_cfg.h" #include "resource.h" #include "link.h" #define DC_LOGGER dc->ctx->logger /* Check whether stream is supported by DIG link encoders. */ static bool is_dig_link_enc_stream(struct dc_stream_state *stream) { bool is_dig_stream = false; struct link_encoder *link_enc = NULL; int i; /* Loop over created link encoder objects. */ if (stream) { for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { link_enc = stream->ctx->dc->res_pool->link_encoders[i]; /* Need to check link signal type rather than stream signal type which may not * yet match. */ if (link_enc && ((uint32_t)stream->link->connector_signal & link_enc->output_signals)) { if (dc_is_dp_signal(stream->signal)) { /* DIGs do not support DP2.0 streams with 128b/132b encoding. */ struct dc_link_settings link_settings = {0}; stream->ctx->dc->link_srv->dp_decide_link_settings(stream, &link_settings); if ((link_settings.link_rate >= LINK_RATE_LOW) && link_settings.link_rate <= LINK_RATE_HIGH3) { is_dig_stream = true; break; } } else { is_dig_stream = true; break; } } } } return is_dig_stream; } static struct link_enc_assignment get_assignment(struct dc *dc, int i) { struct link_enc_assignment assignment; if (dc->current_state->res_ctx.link_enc_cfg_ctx.mode == LINK_ENC_CFG_TRANSIENT) assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i]; else /* LINK_ENC_CFG_STEADY */ assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; return assignment; } /* Return stream using DIG link encoder resource. NULL if unused. */ static struct dc_stream_state *get_stream_using_link_enc( struct dc_state *state, enum engine_id eng_id) { struct dc_stream_state *stream = NULL; int i; for (i = 0; i < state->stream_count; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if ((assignment.valid == true) && (assignment.eng_id == eng_id)) { stream = state->streams[i]; break; } } return stream; } static void remove_link_enc_assignment( struct dc_state *state, struct dc_stream_state *stream, enum engine_id eng_id) { int eng_idx; int i; if (eng_id != ENGINE_ID_UNKNOWN) { eng_idx = eng_id - ENGINE_ID_DIGA; /* stream ptr of stream in dc_state used to update correct entry in * link_enc_assignments table. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid && assignment.stream == stream) { state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; /* Only add link encoder back to availability pool if not being * used by any other stream (i.e. removing SST stream or last MST stream). */ if (get_stream_using_link_enc(state, eng_id) == NULL) state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id; stream->link_enc = NULL; state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; dc_stream_release(stream); break; } } } } static void add_link_enc_assignment( struct dc_state *state, struct dc_stream_state *stream, enum engine_id eng_id) { int eng_idx; int i; if (eng_id != ENGINE_ID_UNKNOWN) { eng_idx = eng_id - ENGINE_ID_DIGA; /* stream ptr of stream in dc_state used to update correct entry in * link_enc_assignments table. */ for (i = 0; i < state->stream_count; i++) { if (stream == state->streams[i]) { state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i] = (struct link_enc_assignment){ .valid = true, .ep_id = (struct display_endpoint_id) { .link_id = stream->link->link_id, .ep_type = stream->link->ep_type}, .eng_id = eng_id, .stream = stream}; dc_stream_retain(stream); state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN; stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx]; break; } } /* Attempted to add an encoder assignment for a stream not in dc_state. */ ASSERT(i != state->stream_count); } } /* Return first available DIG link encoder. */ static enum engine_id find_first_avail_link_enc( const struct dc_context *ctx, const struct dc_state *state, enum engine_id eng_id_requested) { enum engine_id eng_id = ENGINE_ID_UNKNOWN; int i; if (eng_id_requested != ENGINE_ID_UNKNOWN) { for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; if (eng_id == eng_id_requested) return eng_id; } } eng_id = ENGINE_ID_UNKNOWN; for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; if (eng_id != ENGINE_ID_UNKNOWN) break; } return eng_id; } /* Check for availability of link encoder eng_id. */ static bool is_avail_link_enc(struct dc_state *state, enum engine_id eng_id, struct dc_stream_state *stream) { bool is_avail = false; int eng_idx = eng_id - ENGINE_ID_DIGA; /* An encoder is available if it is still in the availability pool. */ if (eng_id != ENGINE_ID_UNKNOWN && state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] != ENGINE_ID_UNKNOWN) { is_avail = true; } else { struct dc_stream_state *stream_assigned = NULL; /* MST streams share the same link and should share the same encoder. * If a stream that has already been assigned a link encoder uses as the * same link as the stream checking for availability, it is an MST stream * and should use the same link encoder. */ stream_assigned = get_stream_using_link_enc(state, eng_id); if (stream_assigned && stream != stream_assigned && stream->link == stream_assigned->link) is_avail = true; } return is_avail; } /* Test for display_endpoint_id equality. */ static bool are_ep_ids_equal(struct display_endpoint_id *lhs, struct display_endpoint_id *rhs) { bool are_equal = false; if (lhs->link_id.id == rhs->link_id.id && lhs->link_id.enum_id == rhs->link_id.enum_id && lhs->link_id.type == rhs->link_id.type && lhs->ep_type == rhs->ep_type) are_equal = true; return are_equal; } static struct link_encoder *get_link_enc_used_by_link( struct dc_state *state, const struct dc_link *link) { struct link_encoder *link_enc = NULL; struct display_endpoint_id ep_id; int i; ep_id = (struct display_endpoint_id) { .link_id = link->link_id, .ep_type = link->ep_type}; for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA]; } return link_enc; } /* Clear all link encoder assignments. */ static void clear_enc_assignments(const struct dc *dc, struct dc_state *state) { int i; for (i = 0; i < MAX_PIPES; i++) { state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; if (state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream != NULL) { dc_stream_release(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream); state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; } } for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { if (dc->res_pool->link_encoders[i]) state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = (enum engine_id) i; else state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; } } void link_enc_cfg_init( const struct dc *dc, struct dc_state *state) { clear_enc_assignments(dc, state); state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } void link_enc_cfg_copy(const struct dc_state *src_ctx, struct dc_state *dst_ctx) { memcpy(&dst_ctx->res_ctx.link_enc_cfg_ctx, &src_ctx->res_ctx.link_enc_cfg_ctx, sizeof(dst_ctx->res_ctx.link_enc_cfg_ctx)); } void link_enc_cfg_link_encs_assign( struct dc *dc, struct dc_state *state, struct dc_stream_state *streams[], uint8_t stream_count) { enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN; int i; int j; ASSERT(state->stream_count == stream_count); ASSERT(dc->current_state->res_ctx.link_enc_cfg_ctx.mode == LINK_ENC_CFG_STEADY); /* Release DIG link encoder resources before running assignment algorithm. */ for (i = 0; i < dc->current_state->stream_count; i++) dc->res_pool->funcs->link_enc_unassign(state, dc->current_state->streams[i]); for (i = 0; i < MAX_PIPES; i++) ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false); /* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */ for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; /* skip it if the link is mappable endpoint. */ if (stream->link->is_dig_mapping_flexible) continue; /* Skip stream if not supported by DIG link encoder. */ if (!is_dig_link_enc_stream(stream)) continue; /* Physical endpoints have a fixed mapping to DIG link encoders. */ eng_id = stream->link->eng_id; add_link_enc_assignment(state, stream, eng_id); } /* (b) Retain previous assignments for mappable endpoints if encoders still available. */ eng_id = ENGINE_ID_UNKNOWN; if (state != dc->current_state) { struct dc_state *prev_state = dc->current_state; for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = state->streams[i]; /* Skip it if the link is NOT mappable endpoint. */ if (!stream->link->is_dig_mapping_flexible) continue; /* Skip stream if not supported by DIG link encoder. */ if (!is_dig_link_enc_stream(stream)) continue; for (j = 0; j < prev_state->stream_count; j++) { struct dc_stream_state *prev_stream = prev_state->streams[j]; if (stream == prev_stream && stream->link == prev_stream->link && prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) { eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id; if (is_avail_link_enc(state, eng_id, stream)) add_link_enc_assignment(state, stream, eng_id); } } } } /* (c) Then assign encoders to remaining mappable endpoints. */ eng_id = ENGINE_ID_UNKNOWN; for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; struct link_encoder *link_enc = NULL; /* Skip it if the link is NOT mappable endpoint. */ if (!stream->link->is_dig_mapping_flexible) continue; /* Skip if encoder assignment retained in step (b) above. */ if (stream->link_enc) continue; /* Skip stream if not supported by DIG link encoder. */ if (!is_dig_link_enc_stream(stream)) { ASSERT(stream->link->is_dig_mapping_flexible != true); continue; } /* Mappable endpoints have a flexible mapping to DIG link encoders. */ /* For MST, multiple streams will share the same link / display * endpoint. These streams should use the same link encoder * assigned to that endpoint. */ link_enc = get_link_enc_used_by_link(state, stream->link); if (link_enc == NULL) { if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN) eng_id_req = stream->link->dpia_preferred_eng_id; eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req); } else eng_id = link_enc->preferred_engine; add_link_enc_assignment(state, stream, eng_id); } link_enc_cfg_validate(dc, state); /* Update transient assignments. */ for (i = 0; i < MAX_PIPES; i++) { dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i] = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; } /* Log encoder assignments. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid) DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n", __func__, assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA", assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? assignment.ep_id.link_id.enum_id : assignment.ep_id.link_id.enum_id - 1, assignment.eng_id); } for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid) DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n", __func__, assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA", assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? assignment.ep_id.link_id.enum_id : assignment.ep_id.link_id.enum_id - 1, assignment.eng_id); } /* Current state mode will be set to steady once this state committed. */ state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } void link_enc_cfg_link_enc_unassign( struct dc_state *state, struct dc_stream_state *stream) { enum engine_id eng_id = ENGINE_ID_UNKNOWN; if (stream->link_enc) eng_id = stream->link_enc->preferred_engine; remove_link_enc_assignment(state, stream, eng_id); } bool link_enc_cfg_is_transmitter_mappable( struct dc *dc, struct link_encoder *link_enc) { bool is_mappable = false; enum engine_id eng_id = link_enc->preferred_engine; struct dc_stream_state *stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id); if (stream) is_mappable = stream->link->is_dig_mapping_flexible; return is_mappable; } struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc( struct dc *dc, enum engine_id eng_id) { struct dc_stream_state *stream = NULL; int i; for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = get_assignment(dc, i); if ((assignment.valid == true) && (assignment.eng_id == eng_id)) { stream = assignment.stream; break; } } return stream; } struct dc_link *link_enc_cfg_get_link_using_link_enc( struct dc *dc, enum engine_id eng_id) { struct dc_link *link = NULL; struct dc_stream_state *stream = NULL; stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id); if (stream) link = stream->link; return link; } struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( struct dc *dc, const struct dc_link *link) { struct link_encoder *link_enc = NULL; struct display_endpoint_id ep_id; int i; ep_id = (struct display_endpoint_id) { .link_id = link->link_id, .ep_type = link->ep_type}; for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = get_assignment(dc, i); if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) { link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA]; break; } } return link_enc; } struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc) { struct link_encoder *link_enc = NULL; enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS]; int i; for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++) encs_assigned[i] = ENGINE_ID_UNKNOWN; /* Add assigned encoders to list. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = get_assignment(dc, i); if (assignment.valid) encs_assigned[assignment.eng_id - ENGINE_ID_DIGA] = assignment.eng_id; } for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { if (encs_assigned[i] == ENGINE_ID_UNKNOWN && dc->res_pool->link_encoders[i] != NULL) { link_enc = dc->res_pool->link_encoders[i]; break; } } return link_enc; } struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( struct dc *dc, const struct dc_stream_state *stream) { struct link_encoder *link_enc; link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, stream->link); return link_enc; } struct link_encoder *link_enc_cfg_get_link_enc( const struct dc_link *link) { struct link_encoder *link_enc = NULL; /* Links supporting dynamically assigned link encoder will be assigned next * available encoder if one not already assigned. */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) { link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); if (link_enc == NULL) link_enc = link_enc_cfg_get_next_avail_link_enc( link->ctx->dc); } else link_enc = link->link_enc; return link_enc; } struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream_current( struct dc *dc, const struct dc_stream_state *stream) { struct link_encoder *link_enc = NULL; struct display_endpoint_id ep_id; int i; ep_id = (struct display_endpoint_id) { .link_id = stream->link->link_id, .ep_type = stream->link->ep_type}; for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) { link_enc = stream->link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA]; break; } } return link_enc; } bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link) { bool is_avail = true; int i; /* An encoder is not available if it has already been assigned to a different endpoint. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = get_assignment(dc, i); struct display_endpoint_id ep_id = (struct display_endpoint_id) { .link_id = link->link_id, .ep_type = link->ep_type}; if (assignment.valid && assignment.eng_id == eng_id && !are_ep_ids_equal(&ep_id, &assignment.ep_id)) { is_avail = false; break; } } return is_avail; } bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state) { bool is_valid = false; bool valid_entries = true; bool valid_stream_ptrs = true; bool valid_uniqueness = true; bool valid_avail = true; bool valid_streams = true; int i, j; uint8_t valid_count = 0; uint8_t dig_stream_count = 0; int eng_ids_per_ep_id[MAX_PIPES] = {0}; int ep_ids_per_eng_id[MAX_PIPES] = {0}; int valid_bitmap = 0; /* (1) No. valid entries same as stream count. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid) valid_count++; if (is_dig_link_enc_stream(state->streams[i])) dig_stream_count++; } if (valid_count != dig_stream_count) valid_entries = false; /* (2) Matching stream ptrs. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid) { if (assignment.stream != state->streams[i]) valid_stream_ptrs = false; } } /* (3) Each endpoint assigned unique encoder. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment_i = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment_i.valid) { struct display_endpoint_id ep_id_i = assignment_i.ep_id; eng_ids_per_ep_id[i]++; ep_ids_per_eng_id[i]++; for (j = 0; j < MAX_PIPES; j++) { struct link_enc_assignment assignment_j = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j]; if (j == i) continue; if (assignment_j.valid) { struct display_endpoint_id ep_id_j = assignment_j.ep_id; if (are_ep_ids_equal(&ep_id_i, &ep_id_j) && assignment_i.eng_id != assignment_j.eng_id) { valid_uniqueness = false; eng_ids_per_ep_id[i]++; } else if (!are_ep_ids_equal(&ep_id_i, &ep_id_j) && assignment_i.eng_id == assignment_j.eng_id) { valid_uniqueness = false; ep_ids_per_eng_id[i]++; } } } } } /* (4) Assigned encoders not in available pool. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid) { for (j = 0; j < dc->res_pool->res_cap->num_dig_link_enc; j++) { if (state->res_ctx.link_enc_cfg_ctx.link_enc_avail[j] == assignment.eng_id) { valid_avail = false; break; } } } } /* (5) All streams have valid link encoders. */ for (i = 0; i < state->stream_count; i++) { struct dc_stream_state *stream = state->streams[i]; if (is_dig_link_enc_stream(stream) && stream->link_enc == NULL) { valid_streams = false; break; } } is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams; ASSERT(is_valid); if (is_valid == false) { valid_bitmap = (valid_entries & 0x1) | ((valid_stream_ptrs & 0x1) << 1) | ((valid_uniqueness & 0x1) << 2) | ((valid_avail & 0x1) << 3) | ((valid_streams & 0x1) << 4); DC_LOG_ERROR("%s: Invalid link encoder assignments - 0x%x\n", __func__, valid_bitmap); } return is_valid; } void link_enc_cfg_set_transient_mode(struct dc *dc, struct dc_state *current_state, struct dc_state *new_state) { int i = 0; int num_transient_assignments = 0; for (i = 0; i < MAX_PIPES; i++) { if (current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i].valid) num_transient_assignments++; } /* Only enter transient mode if the new encoder assignments are valid. */ if (new_state->stream_count == num_transient_assignments) { current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT; DC_LOG_DEBUG("%s: current_state(%p) mode(%d)\n", __func__, current_state, LINK_ENC_CFG_TRANSIENT); } }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dm_helpers.h" #include "core_types.h" /******************************************************************************* * Private functions ******************************************************************************/ static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) { struct dc_link *link = init_params->link; if (!link) return false; sink->sink_signal = init_params->sink_signal; sink->link = link; sink->ctx = link->ctx; sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk; sink->converter_disable_audio = init_params->converter_disable_audio; sink->dc_container_id = NULL; sink->sink_id = init_params->link->ctx->dc_sink_id_count; // increment dc_sink_id_count because we don't want two sinks with same ID // unless they are actually the same init_params->link->ctx->dc_sink_id_count++; return true; } /******************************************************************************* * Public functions ******************************************************************************/ void dc_sink_retain(struct dc_sink *sink) { kref_get(&sink->refcount); } static void dc_sink_free(struct kref *kref) { struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); kfree(sink->dc_container_id); kfree(sink); } void dc_sink_release(struct dc_sink *sink) { kref_put(&sink->refcount, dc_sink_free); } struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params) { struct dc_sink *sink = kzalloc(sizeof(*sink), GFP_KERNEL); if (NULL == sink) goto alloc_fail; if (false == dc_sink_construct(sink, init_params)) goto construct_fail; kref_init(&sink->refcount); return sink; construct_fail: kfree(sink); alloc_fail: return NULL; } /******************************************************************************* * Protected functions - visible only inside of DC (not visible in DM) ******************************************************************************/
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_sink.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "resource.h" #include "include/irq_service_interface.h" #include "link_encoder.h" #include "stream_encoder.h" #include "opp.h" #include "timing_generator.h" #include "transform.h" #include "dccg.h" #include "dchubbub.h" #include "dpp.h" #include "core_types.h" #include "set_mode_types.h" #include "virtual/virtual_stream_encoder.h" #include "dpcd_defs.h" #include "link_enc_cfg.h" #include "link.h" #include "virtual/virtual_link_hwss.h" #include "link/hwss/link_hwss_dio.h" #include "link/hwss/link_hwss_dpia.h" #include "link/hwss/link_hwss_hpo_dp.h" #include "link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h" #include "link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h" #if defined(CONFIG_DRM_AMD_DC_SI) #include "dce60/dce60_resource.h" #endif #include "dce80/dce80_resource.h" #include "dce100/dce100_resource.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" #include "dce120/dce120_resource.h" #include "dcn10/dcn10_resource.h" #include "dcn20/dcn20_resource.h" #include "dcn21/dcn21_resource.h" #include "dcn201/dcn201_resource.h" #include "dcn30/dcn30_resource.h" #include "dcn301/dcn301_resource.h" #include "dcn302/dcn302_resource.h" #include "dcn303/dcn303_resource.h" #include "dcn31/dcn31_resource.h" #include "dcn314/dcn314_resource.h" #include "dcn315/dcn315_resource.h" #include "dcn316/dcn316_resource.h" #include "../dcn32/dcn32_resource.h" #include "../dcn321/dcn321_resource.h" #define VISUAL_CONFIRM_BASE_DEFAULT 3 #define VISUAL_CONFIRM_BASE_MIN 1 #define VISUAL_CONFIRM_BASE_MAX 10 /* we choose 240 because it is a common denominator of common v addressable * such as 2160, 1440, 1200, 960. So we take 1/240 portion of v addressable as * the visual confirm dpp offset height. So visual confirm height can stay * relatively the same independent from timing used. */ #define VISUAL_CONFIRM_DPP_OFFSET_DENO 240 #define DC_LOGGER_INIT(logger) #define UNABLE_TO_SPLIT -1 enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) { enum dce_version dc_version = DCE_VERSION_UNKNOWN; switch (asic_id.chip_family) { #if defined(CONFIG_DRM_AMD_DC_SI) case FAMILY_SI: if (ASIC_REV_IS_TAHITI_P(asic_id.hw_internal_rev) || ASIC_REV_IS_PITCAIRN_PM(asic_id.hw_internal_rev) || ASIC_REV_IS_CAPEVERDE_M(asic_id.hw_internal_rev)) dc_version = DCE_VERSION_6_0; else if (ASIC_REV_IS_OLAND_M(asic_id.hw_internal_rev)) dc_version = DCE_VERSION_6_4; else dc_version = DCE_VERSION_6_1; break; #endif case FAMILY_CI: dc_version = DCE_VERSION_8_0; break; case FAMILY_KV: if (ASIC_REV_IS_KALINDI(asic_id.hw_internal_rev) || ASIC_REV_IS_BHAVANI(asic_id.hw_internal_rev) || ASIC_REV_IS_GODAVARI(asic_id.hw_internal_rev)) dc_version = DCE_VERSION_8_3; else dc_version = DCE_VERSION_8_1; break; case FAMILY_CZ: dc_version = DCE_VERSION_11_0; break; case FAMILY_VI: if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) || ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) { dc_version = DCE_VERSION_10_0; break; } if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) || ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) || ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) { dc_version = DCE_VERSION_11_2; } if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) dc_version = DCE_VERSION_11_22; break; case FAMILY_AI: if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev)) dc_version = DCE_VERSION_12_1; else dc_version = DCE_VERSION_12_0; break; case FAMILY_RV: dc_version = DCN_VERSION_1_0; if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_1_01; if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_2_1; if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_2_1; break; case FAMILY_NV: dc_version = DCN_VERSION_2_0; if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) { dc_version = DCN_VERSION_2_01; break; } if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_0; if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_02; if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_03; break; case FAMILY_VGH: dc_version = DCN_VERSION_3_01; break; case FAMILY_YELLOW_CARP: if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_1; break; case AMDGPU_FAMILY_GC_10_3_6: if (ASICREV_IS_GC_10_3_6(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_15; break; case AMDGPU_FAMILY_GC_10_3_7: if (ASICREV_IS_GC_10_3_7(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_16; break; case AMDGPU_FAMILY_GC_11_0_0: dc_version = DCN_VERSION_3_2; if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_21; break; case AMDGPU_FAMILY_GC_11_0_1: dc_version = DCN_VERSION_3_14; break; default: dc_version = DCE_VERSION_UNKNOWN; break; } return dc_version; } struct resource_pool *dc_create_resource_pool(struct dc *dc, const struct dc_init_data *init_data, enum dce_version dc_version) { struct resource_pool *res_pool = NULL; switch (dc_version) { #if defined(CONFIG_DRM_AMD_DC_SI) case DCE_VERSION_6_0: res_pool = dce60_create_resource_pool( init_data->num_virtual_links, dc); break; case DCE_VERSION_6_1: res_pool = dce61_create_resource_pool( init_data->num_virtual_links, dc); break; case DCE_VERSION_6_4: res_pool = dce64_create_resource_pool( init_data->num_virtual_links, dc); break; #endif case DCE_VERSION_8_0: res_pool = dce80_create_resource_pool( init_data->num_virtual_links, dc); break; case DCE_VERSION_8_1: res_pool = dce81_create_resource_pool( init_data->num_virtual_links, dc); break; case DCE_VERSION_8_3: res_pool = dce83_create_resource_pool( init_data->num_virtual_links, dc); break; case DCE_VERSION_10_0: res_pool = dce100_create_resource_pool( init_data->num_virtual_links, dc); break; case DCE_VERSION_11_0: res_pool = dce110_create_resource_pool( init_data->num_virtual_links, dc, init_data->asic_id); break; case DCE_VERSION_11_2: case DCE_VERSION_11_22: res_pool = dce112_create_resource_pool( init_data->num_virtual_links, dc); break; case DCE_VERSION_12_0: case DCE_VERSION_12_1: res_pool = dce120_create_resource_pool( init_data->num_virtual_links, dc); break; #if defined(CONFIG_DRM_AMD_DC_FP) case DCN_VERSION_1_0: case DCN_VERSION_1_01: res_pool = dcn10_create_resource_pool(init_data, dc); break; case DCN_VERSION_2_0: res_pool = dcn20_create_resource_pool(init_data, dc); break; case DCN_VERSION_2_1: res_pool = dcn21_create_resource_pool(init_data, dc); break; case DCN_VERSION_2_01: res_pool = dcn201_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_0: res_pool = dcn30_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_01: res_pool = dcn301_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_02: res_pool = dcn302_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_03: res_pool = dcn303_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_1: res_pool = dcn31_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_14: res_pool = dcn314_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_15: res_pool = dcn315_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_16: res_pool = dcn316_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_2: res_pool = dcn32_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_21: res_pool = dcn321_create_resource_pool(init_data, dc); break; #endif /* CONFIG_DRM_AMD_DC_FP */ default: break; } if (res_pool != NULL) { if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; /* initialize with firmware data first, no all * ASIC have DCCG SW component. FPGA or * simulation need initialization of * dccg_ref_clock_inKhz, dchub_ref_clock_inKhz * with xtalin_clock_inKhz */ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; } else ASSERT_CRITICAL(false); } return res_pool; } void dc_destroy_resource_pool(struct dc *dc) { if (dc) { if (dc->res_pool) dc->res_pool->funcs->destroy(&dc->res_pool); kfree(dc->hwseq); } } static void update_num_audio( const struct resource_straps *straps, unsigned int *num_audio, struct audio_support *aud_support) { aud_support->dp_audio = true; aud_support->hdmi_audio_native = false; aud_support->hdmi_audio_on_dongle = false; if (straps->hdmi_disable == 0) { if (straps->dc_pinstraps_audio & 0x2) { aud_support->hdmi_audio_on_dongle = true; aud_support->hdmi_audio_native = true; } } switch (straps->audio_stream_number) { case 0: /* multi streams supported */ break; case 1: /* multi streams not supported */ *num_audio = 1; break; default: DC_ERR("DC: unexpected audio fuse!\n"); } } bool resource_construct( unsigned int num_virtual_links, struct dc *dc, struct resource_pool *pool, const struct resource_create_funcs *create_funcs) { struct dc_context *ctx = dc->ctx; const struct resource_caps *caps = pool->res_cap; int i; unsigned int num_audio = caps->num_audio; struct resource_straps straps = {0}; if (create_funcs->read_dce_straps) create_funcs->read_dce_straps(dc->ctx, &straps); pool->audio_count = 0; if (create_funcs->create_audio) { /* find the total number of streams available via the * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT * registers (one for each pin) starting from pin 1 * up to the max number of audio pins. * We stop on the first pin where * PORT_CONNECTIVITY == 1 (as instructed by HW team). */ update_num_audio(&straps, &num_audio, &pool->audio_support); for (i = 0; i < caps->num_audio; i++) { struct audio *aud = create_funcs->create_audio(ctx, i); if (aud == NULL) { DC_ERR("DC: failed to create audio!\n"); return false; } if (!aud->funcs->endpoint_valid(aud)) { aud->funcs->destroy(&aud); break; } pool->audios[i] = aud; pool->audio_count++; } } pool->stream_enc_count = 0; if (create_funcs->create_stream_encoder) { for (i = 0; i < caps->num_stream_encoder; i++) { pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx); if (pool->stream_enc[i] == NULL) DC_ERR("DC: failed to create stream_encoder!\n"); pool->stream_enc_count++; } } pool->hpo_dp_stream_enc_count = 0; if (create_funcs->create_hpo_dp_stream_encoder) { for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) { pool->hpo_dp_stream_enc[i] = create_funcs->create_hpo_dp_stream_encoder(i+ENGINE_ID_HPO_DP_0, ctx); if (pool->hpo_dp_stream_enc[i] == NULL) DC_ERR("DC: failed to create HPO DP stream encoder!\n"); pool->hpo_dp_stream_enc_count++; } } pool->hpo_dp_link_enc_count = 0; if (create_funcs->create_hpo_dp_link_encoder) { for (i = 0; i < caps->num_hpo_dp_link_encoder; i++) { pool->hpo_dp_link_enc[i] = create_funcs->create_hpo_dp_link_encoder(i, ctx); if (pool->hpo_dp_link_enc[i] == NULL) DC_ERR("DC: failed to create HPO DP link encoder!\n"); pool->hpo_dp_link_enc_count++; } } for (i = 0; i < caps->num_mpc_3dlut; i++) { pool->mpc_lut[i] = dc_create_3dlut_func(); if (pool->mpc_lut[i] == NULL) DC_ERR("DC: failed to create MPC 3dlut!\n"); pool->mpc_shaper[i] = dc_create_transfer_func(); if (pool->mpc_shaper[i] == NULL) DC_ERR("DC: failed to create MPC shaper!\n"); } dc->caps.dynamic_audio = false; if (pool->audio_count < pool->stream_enc_count) { dc->caps.dynamic_audio = true; } for (i = 0; i < num_virtual_links; i++) { pool->stream_enc[pool->stream_enc_count] = virtual_stream_encoder_create( ctx, ctx->dc_bios); if (pool->stream_enc[pool->stream_enc_count] == NULL) { DC_ERR("DC: failed to create stream_encoder!\n"); return false; } pool->stream_enc_count++; } dc->hwseq = create_funcs->create_hwseq(ctx); return true; } static int find_matching_clock_source( const struct resource_pool *pool, struct clock_source *clock_source) { int i; for (i = 0; i < pool->clk_src_count; i++) { if (pool->clock_sources[i] == clock_source) return i; } return -1; } void resource_unreference_clock_source( struct resource_context *res_ctx, const struct resource_pool *pool, struct clock_source *clock_source) { int i = find_matching_clock_source(pool, clock_source); if (i > -1) res_ctx->clock_source_ref_count[i]--; if (pool->dp_clock_source == clock_source) res_ctx->dp_clock_source_ref_count--; } void resource_reference_clock_source( struct resource_context *res_ctx, const struct resource_pool *pool, struct clock_source *clock_source) { int i = find_matching_clock_source(pool, clock_source); if (i > -1) res_ctx->clock_source_ref_count[i]++; if (pool->dp_clock_source == clock_source) res_ctx->dp_clock_source_ref_count++; } int resource_get_clock_source_reference( struct resource_context *res_ctx, const struct resource_pool *pool, struct clock_source *clock_source) { int i = find_matching_clock_source(pool, clock_source); if (i > -1) return res_ctx->clock_source_ref_count[i]; if (pool->dp_clock_source == clock_source) return res_ctx->dp_clock_source_ref_count; return -1; } bool resource_are_vblanks_synchronizable( struct dc_stream_state *stream1, struct dc_stream_state *stream2) { uint32_t base60_refresh_rates[] = {10, 20, 5}; uint8_t i; uint8_t rr_count = ARRAY_SIZE(base60_refresh_rates); uint64_t frame_time_diff; if (stream1->ctx->dc->config.vblank_alignment_dto_params && stream1->ctx->dc->config.vblank_alignment_max_frame_time_diff > 0 && dc_is_dp_signal(stream1->signal) && dc_is_dp_signal(stream2->signal) && false == stream1->has_non_synchronizable_pclk && false == stream2->has_non_synchronizable_pclk && stream1->timing.flags.VBLANK_SYNCHRONIZABLE && stream2->timing.flags.VBLANK_SYNCHRONIZABLE) { /* disable refresh rates higher than 60Hz for now */ if (stream1->timing.pix_clk_100hz*100/stream1->timing.h_total/ stream1->timing.v_total > 60) return false; if (stream2->timing.pix_clk_100hz*100/stream2->timing.h_total/ stream2->timing.v_total > 60) return false; frame_time_diff = (uint64_t)10000 * stream1->timing.h_total * stream1->timing.v_total * stream2->timing.pix_clk_100hz; frame_time_diff = div_u64(frame_time_diff, stream1->timing.pix_clk_100hz); frame_time_diff = div_u64(frame_time_diff, stream2->timing.h_total); frame_time_diff = div_u64(frame_time_diff, stream2->timing.v_total); for (i = 0; i < rr_count; i++) { int64_t diff = (int64_t)div_u64(frame_time_diff * base60_refresh_rates[i], 10) - 10000; if (diff < 0) diff = -diff; if (diff < stream1->ctx->dc->config.vblank_alignment_max_frame_time_diff) return true; } } return false; } bool resource_are_streams_timing_synchronizable( struct dc_stream_state *stream1, struct dc_stream_state *stream2) { if (stream1->timing.h_total != stream2->timing.h_total) return false; if (stream1->timing.v_total != stream2->timing.v_total) return false; if (stream1->timing.h_addressable != stream2->timing.h_addressable) return false; if (stream1->timing.v_addressable != stream2->timing.v_addressable) return false; if (stream1->timing.v_front_porch != stream2->timing.v_front_porch) return false; if (stream1->timing.pix_clk_100hz != stream2->timing.pix_clk_100hz) return false; if (stream1->clamping.c_depth != stream2->clamping.c_depth) return false; if (stream1->phy_pix_clk != stream2->phy_pix_clk && (!dc_is_dp_signal(stream1->signal) || !dc_is_dp_signal(stream2->signal))) return false; if (stream1->view_format != stream2->view_format) return false; if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param) return false; return true; } static bool is_dp_and_hdmi_sharable( struct dc_stream_state *stream1, struct dc_stream_state *stream2) { if (stream1->ctx->dc->caps.disable_dp_clk_share) return false; if (stream1->clamping.c_depth != COLOR_DEPTH_888 || stream2->clamping.c_depth != COLOR_DEPTH_888) return false; return true; } static bool is_sharable_clk_src( const struct pipe_ctx *pipe_with_clk_src, const struct pipe_ctx *pipe) { if (pipe_with_clk_src->clock_source == NULL) return false; if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL) return false; if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) || (dc_is_dp_signal(pipe->stream->signal) && !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream, pipe->stream))) return false; if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal) && dc_is_dual_link_signal(pipe->stream->signal)) return false; if (dc_is_hdmi_signal(pipe->stream->signal) && dc_is_dual_link_signal(pipe_with_clk_src->stream->signal)) return false; if (!resource_are_streams_timing_synchronizable( pipe_with_clk_src->stream, pipe->stream)) return false; return true; } struct clock_source *resource_find_used_clk_src_for_sharing( struct resource_context *res_ctx, struct pipe_ctx *pipe_ctx) { int i; for (i = 0; i < MAX_PIPES; i++) { if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx)) return res_ctx->pipe_ctx[i].clock_source; } return NULL; } static enum pixel_format convert_pixel_format_to_dalsurface( enum surface_pixel_format surface_pixel_format) { enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN; switch (surface_pixel_format) { case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: dal_pixel_format = PIXEL_FORMAT_INDEX8; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: dal_pixel_format = PIXEL_FORMAT_RGB565; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB565: dal_pixel_format = PIXEL_FORMAT_RGB565; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: dal_pixel_format = PIXEL_FORMAT_ARGB8888; break; case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: dal_pixel_format = PIXEL_FORMAT_ARGB8888; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: dal_pixel_format = PIXEL_FORMAT_ARGB2101010; break; case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: dal_pixel_format = PIXEL_FORMAT_ARGB2101010; break; case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS; break; case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: dal_pixel_format = PIXEL_FORMAT_FP16; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: dal_pixel_format = PIXEL_FORMAT_420BPP8; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: dal_pixel_format = PIXEL_FORMAT_420BPP10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: default: dal_pixel_format = PIXEL_FORMAT_UNKNOWN; break; } return dal_pixel_format; } static inline void get_vp_scan_direction( enum dc_rotation_angle rotation, bool horizontal_mirror, bool *orthogonal_rotation, bool *flip_vert_scan_dir, bool *flip_horz_scan_dir) { *orthogonal_rotation = false; *flip_vert_scan_dir = false; *flip_horz_scan_dir = false; if (rotation == ROTATION_ANGLE_180) { *flip_vert_scan_dir = true; *flip_horz_scan_dir = true; } else if (rotation == ROTATION_ANGLE_90) { *orthogonal_rotation = true; *flip_horz_scan_dir = true; } else if (rotation == ROTATION_ANGLE_270) { *orthogonal_rotation = true; *flip_vert_scan_dir = true; } if (horizontal_mirror) *flip_horz_scan_dir = !*flip_horz_scan_dir; } int resource_get_num_mpc_splits(const struct pipe_ctx *pipe) { int mpc_split_count = 0; const struct pipe_ctx *other_pipe = pipe->bottom_pipe; while (other_pipe && other_pipe->plane_state == pipe->plane_state) { mpc_split_count++; other_pipe = other_pipe->bottom_pipe; } other_pipe = pipe->top_pipe; while (other_pipe && other_pipe->plane_state == pipe->plane_state) { mpc_split_count++; other_pipe = other_pipe->top_pipe; } return mpc_split_count; } int resource_get_num_odm_splits(const struct pipe_ctx *pipe) { int odm_split_count = 0; pipe = resource_get_otg_master(pipe); while (pipe->next_odm_pipe) { odm_split_count++; pipe = pipe->next_odm_pipe; } return odm_split_count; } static int get_odm_split_index(struct pipe_ctx *pipe_ctx) { int index = 0; pipe_ctx = resource_get_opp_head(pipe_ctx); if (!pipe_ctx) return 0; while (pipe_ctx->prev_odm_pipe) { index++; pipe_ctx = pipe_ctx->prev_odm_pipe; } return index; } static int get_mpc_split_index(struct pipe_ctx *pipe_ctx) { struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; int index = 0; while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { index++; split_pipe = split_pipe->top_pipe; } return index; } /* * This is a preliminary vp size calculation to allow us to check taps support. * The result is completely overridden afterwards. */ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx) { struct scaler_data *data = &pipe_ctx->plane_res.scl_data; data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width)); data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height)); data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width)); data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height)); if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { swap(data->viewport.width, data->viewport.height); swap(data->viewport_c.width, data->viewport_c.height); } } static struct rect intersect_rec(const struct rect *r0, const struct rect *r1) { struct rect rec; int r0_x_end = r0->x + r0->width; int r1_x_end = r1->x + r1->width; int r0_y_end = r0->y + r0->height; int r1_y_end = r1->y + r1->height; rec.x = r0->x > r1->x ? r0->x : r1->x; rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x; rec.y = r0->y > r1->y ? r0->y : r1->y; rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y; /* in case that there is no intersection */ if (rec.width < 0 || rec.height < 0) memset(&rec, 0, sizeof(rec)); return rec; } static struct rect shift_rec(const struct rect *rec_in, int x, int y) { struct rect rec_out = *rec_in; rec_out.x += x; rec_out.y += y; return rec_out; } static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx) { const struct dc_stream_state *stream = pipe_ctx->stream; int odm_slice_count = resource_get_num_odm_splits(pipe_ctx) + 1; int odm_slice_idx = get_odm_split_index(pipe_ctx); bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count; int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; int odm_slice_width = h_active / odm_slice_count; struct rect odm_rec; odm_rec.x = odm_slice_width * odm_slice_idx; odm_rec.width = is_last_odm_slice ? /* last slice width is the reminder of h_active */ h_active - odm_slice_width * (odm_slice_count - 1) : /* odm slice width is the floor of h_active / count */ odm_slice_width; odm_rec.y = 0; odm_rec.height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; return odm_rec; } static struct rect calculate_plane_rec_in_timing_active( struct pipe_ctx *pipe_ctx, const struct rect *rec_in) { /* * The following diagram shows an example where we map a 1920x1200 * desktop to a 2560x1440 timing with a plane rect in the middle * of the screen. To map a plane rect from Stream Source to Timing * Active space, we first multiply stream scaling ratios (i.e 2304/1920 * horizontal and 1440/1200 vertical) to the plane's x and y, then * we add stream destination offsets (i.e 128 horizontal, 0 vertical). * This will give us a plane rect's position in Timing Active. However * we have to remove the fractional. The rule is that we find left/right * and top/bottom positions and round the value to the adjacent integer. * * Stream Source Space * ------------ * __________________________________________________ * |Stream Source (1920 x 1200) ^ | * | y | * | <------- w --------|> | * | __________________V | * |<-- x -->|Plane//////////////| ^ | * | |(pre scale)////////| | | * | |///////////////////| | | * | |///////////////////| h | * | |///////////////////| | | * | |///////////////////| | | * | |///////////////////| V | * | | * | | * |__________________________________________________| * * * Timing Active Space * --------------------------------- * * Timing Active (2560 x 1440) * __________________________________________________ * |*****| Stteam Destination (2304 x 1440) |*****| * |*****| |*****| * |<128>| |*****| * |*****| __________________ |*****| * |*****| |Plane/////////////| |*****| * |*****| |(post scale)//////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |*****| * |*****| |*****| * |*****| |*****| * |*****|______________________________________|*****| * * So the resulting formulas are shown below: * * recout_x = 128 + round(plane_x * 2304 / 1920) * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x * recout_y = 0 + round(plane_y * 1440 / 1280) * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y * * NOTE: fixed point division is not error free. To reduce errors * introduced by fixed point division, we divide only after * multiplication is complete. */ const struct dc_stream_state *stream = pipe_ctx->stream; struct rect rec_out = {0}; struct fixed31_32 temp; temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width, stream->src.width); rec_out.x = stream->dst.x + dc_fixpt_round(temp); temp = dc_fixpt_from_fraction( (rec_in->x + rec_in->width) * stream->dst.width, stream->src.width); rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x; temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height, stream->src.height); rec_out.y = stream->dst.y + dc_fixpt_round(temp); temp = dc_fixpt_from_fraction( (rec_in->y + rec_in->height) * stream->dst.height, stream->src.height); rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y; return rec_out; } static struct rect calculate_mpc_slice_in_timing_active( struct pipe_ctx *pipe_ctx, struct rect *plane_clip_rec) { const struct dc_stream_state *stream = pipe_ctx->stream; int mpc_slice_count = resource_get_num_mpc_splits(pipe_ctx) + 1; int mpc_slice_idx = get_mpc_split_index(pipe_ctx); int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; struct rect mpc_rec; mpc_rec.width = plane_clip_rec->width / mpc_slice_count; mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx; mpc_rec.height = plane_clip_rec->height; mpc_rec.y = plane_clip_rec->y; ASSERT(mpc_slice_count == 1 || stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || mpc_rec.width % 2 == 0); /* extra pixels in the division remainder need to go to pipes after * the extra pixel index minus one(epimo) defined here as: */ if (mpc_slice_idx > epimo) { mpc_rec.x += mpc_slice_idx - epimo - 1; mpc_rec.width += 1; } if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { ASSERT(mpc_rec.height % 2 == 0); mpc_rec.height /= 2; } return mpc_rec; } static void adjust_recout_for_visual_confirm(struct rect *recout, struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; int dpp_offset, base_offset; if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) return; dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO; dpp_offset *= pipe_ctx->plane_res.dpp->inst; if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) && dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX) base_offset = dc->debug.visual_confirm_rect_height; else base_offset = VISUAL_CONFIRM_BASE_DEFAULT; recout->height -= base_offset; recout->height -= dpp_offset; } /* * The function maps a plane clip from Stream Source Space to ODM Slice Space * and calculates the rec of the overlapping area of MPC slice of the plane * clip, ODM slice associated with the pipe context and stream destination rec. */ static void calculate_recout(struct pipe_ctx *pipe_ctx) { /* * A plane clip represents the desired plane size and position in Stream * Source Space. Stream Source is the destination where all planes are * blended (i.e. positioned, scaled and overlaid). It is a canvas where * all planes associated with the current stream are drawn together. * After Stream Source is completed, we will further scale and * reposition the entire canvas of the stream source to Stream * Destination in Timing Active Space. This could be due to display * overscan adjustment where we will need to rescale and reposition all * the planes so they can fit into a TV with overscan or downscale * upscale features such as GPU scaling or VSR. * * This two step blending is a virtual procedure in software. In * hardware there is no such thing as Stream Source. all planes are * blended once in Timing Active Space. Software virtualizes a Stream * Source space to decouple the math complicity so scaling param * calculation focuses on one step at a time. * * In the following two diagrams, user applied 10% overscan adjustment * so the Stream Source needs to be scaled down a little before mapping * to Timing Active Space. As a result the Plane Clip is also scaled * down by the same ratio, Plane Clip position (i.e. x and y) with * respect to Stream Source is also scaled down. To map it in Timing * Active Space additional x and y offsets from Stream Destination are * added to Plane Clip as well. * * Stream Source Space * ------------ * __________________________________________________ * |Stream Source (3840 x 2160) ^ | * | y | * | | | * | __________________V | * |<-- x -->|Plane Clip/////////| | * | |(pre scale)////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | | * | | * |__________________________________________________| * * * Timing Active Space (3840 x 2160) * --------------------------------- * * Timing Active * __________________________________________________ * | y_____________________________________________ | * |x |Stream Destination (3456 x 1944) | | * | | | | * | | __________________ | | * | | |Plane Clip////////| | | * | | |(post scale)//////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | | | * | | | | * | |____________________________________________| | * |__________________________________________________| * * * In Timing Active Space a plane clip could be further sliced into * pieces called MPC slices. Each Pipe Context is responsible for * processing only one MPC slice so the plane processing workload can be * distributed to multiple DPP Pipes. MPC slices could be blended * together to a single ODM slice. Each ODM slice is responsible for * processing a portion of Timing Active divided horizontally so the * output pixel processing workload can be distributed to multiple OPP * pipes. All ODM slices are mapped together in ODM block so all MPC * slices belong to different ODM slices could be pieced together to * form a single image in Timing Active. MPC slices must belong to * single ODM slice. If an MPC slice goes across ODM slice boundary, it * needs to be divided into two MPC slices one for each ODM slice. * * In the following diagram the output pixel processing workload is * divided horizontally into two ODM slices one for each OPP blend tree. * OPP0 blend tree is responsible for processing left half of Timing * Active, while OPP2 blend tree is responsible for processing right * half. * * The plane has two MPC slices. However since the right MPC slice goes * across ODM boundary, two DPP pipes are needed one for each OPP blend * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree). * * Assuming that we have a Pipe Context associated with OPP0 and DPP1 * working on processing the plane in the diagram. We want to know the * width and height of the shaded rectangle and its relative position * with respect to the ODM slice0. This is called the recout of the pipe * context. * * Planes can be at arbitrary size and position and there could be an * arbitrary number of MPC and ODM slices. The algorithm needs to take * all scenarios into account. * * Timing Active Space (3840 x 2160) * --------------------------------- * * Timing Active * __________________________________________________ * |OPP0(ODM slice0)^ |OPP2(ODM slice1) | * | y | | * | | <- w -> | * | _____V________|____ | * | |DPP0 ^ |DPP1 |DPP2| | * |<------ x |-----|->|/////| | | * | | | |/////| | | * | | h |/////| | | * | | | |/////| | | * | |_____V__|/////|____| | * | | | * | | | * | | | * |_________________________|________________________| * * */ struct rect plane_clip; struct rect mpc_slice_of_plane_clip; struct rect odm_slice; struct rect overlapping_area; plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx, &pipe_ctx->plane_state->clip_rect); /* guard plane clip from drawing beyond stream dst here */ plane_clip = intersect_rec(&plane_clip, &pipe_ctx->stream->dst); mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active( pipe_ctx, &plane_clip); odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice); if (overlapping_area.height > 0 && overlapping_area.width > 0) { /* shift the overlapping area so it is with respect to current * ODM slice's position */ pipe_ctx->plane_res.scl_data.recout = shift_rec( &overlapping_area, -odm_slice.x, -odm_slice.y); adjust_recout_for_visual_confirm( &pipe_ctx->plane_res.scl_data.recout, pipe_ctx); } else { /* if there is no overlap, zero recout */ memset(&pipe_ctx->plane_res.scl_data.recout, 0, sizeof(struct rect)); } } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; const struct dc_stream_state *stream = pipe_ctx->stream; struct rect surf_src = plane_state->src_rect; const int in_w = stream->src.width; const int in_h = stream->src.height; const int out_w = stream->dst.width; const int out_h = stream->dst.height; /*Swap surf_src height and width since scaling ratios are in recout rotation*/ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) swap(surf_src.height, surf_src.width); pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction( surf_src.width, plane_state->dst_rect.width); pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_from_fraction( surf_src.height, plane_state->dst_rect.height); if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE) pipe_ctx->plane_res.scl_data.ratios.horz.value *= 2; else if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) pipe_ctx->plane_res.scl_data.ratios.vert.value *= 2; pipe_ctx->plane_res.scl_data.ratios.vert.value = div64_s64( pipe_ctx->plane_res.scl_data.ratios.vert.value * in_h, out_h); pipe_ctx->plane_res.scl_data.ratios.horz.value = div64_s64( pipe_ctx->plane_res.scl_data.ratios.horz.value * in_w, out_w); pipe_ctx->plane_res.scl_data.ratios.horz_c = pipe_ctx->plane_res.scl_data.ratios.horz; pipe_ctx->plane_res.scl_data.ratios.vert_c = pipe_ctx->plane_res.scl_data.ratios.vert; if (pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP8 || pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP10) { pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2; pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2; } pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_truncate( pipe_ctx->plane_res.scl_data.ratios.horz, 19); pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_truncate( pipe_ctx->plane_res.scl_data.ratios.vert, 19); pipe_ctx->plane_res.scl_data.ratios.horz_c = dc_fixpt_truncate( pipe_ctx->plane_res.scl_data.ratios.horz_c, 19); pipe_ctx->plane_res.scl_data.ratios.vert_c = dc_fixpt_truncate( pipe_ctx->plane_res.scl_data.ratios.vert_c, 19); } /* * We completely calculate vp offset, size and inits here based entirely on scaling * ratios and recout for pixel perfect pipe combine. */ static void calculate_init_and_vp( bool flip_scan_dir, int recout_offset_within_recout_full, int recout_size, int src_size, int taps, struct fixed31_32 ratio, struct fixed31_32 *init, int *vp_offset, int *vp_size) { struct fixed31_32 temp; int int_part; /* * First of the taps starts sampling pixel number <init_int_part> corresponding to recout * pixel 1. Next recout pixel samples int part of <init + scaling ratio> and so on. * All following calculations are based on this logic. * * Init calculated according to formula: * init = (scaling_ratio + number_of_taps + 1) / 2 * init_bot = init + scaling_ratio * to get pixel perfect combine add the fraction from calculating vp offset */ temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full); *vp_offset = dc_fixpt_floor(temp); temp.value &= 0xffffffff; *init = dc_fixpt_truncate(dc_fixpt_add(dc_fixpt_div_int( dc_fixpt_add_int(ratio, taps + 1), 2), temp), 19); /* * If viewport has non 0 offset and there are more taps than covered by init then * we should decrease the offset and increase init so we are never sampling * outside of viewport. */ int_part = dc_fixpt_floor(*init); if (int_part < taps) { int_part = taps - int_part; if (int_part > *vp_offset) int_part = *vp_offset; *vp_offset -= int_part; *init = dc_fixpt_add_int(*init, int_part); } /* * If taps are sampling outside of viewport at end of recout and there are more pixels * available in the surface we should increase the viewport size, regardless set vp to * only what is used. */ temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1)); *vp_size = dc_fixpt_floor(temp); if (*vp_size + *vp_offset > src_size) *vp_size = src_size - *vp_offset; /* We did all the math assuming we are scanning same direction as display does, * however mirror/rotation changes how vp scans vs how it is offset. If scan direction * is flipped we simply need to calculate offset from the other side of plane. * Note that outside of viewport all scaling hardware works in recout space. */ if (flip_scan_dir) *vp_offset = src_size - *vp_offset - *vp_size; } static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect src = plane_state->src_rect; struct rect recout_dst_in_active_timing; struct rect recout_clip_in_active_timing; struct rect recout_clip_in_recout_dst; struct rect overlap_in_active_timing; struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; recout_clip_in_active_timing = shift_rec( &data->recout, odm_slice.x, odm_slice.y); recout_dst_in_active_timing = calculate_plane_rec_in_timing_active( pipe_ctx, &plane_state->dst_rect); overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing, &recout_dst_in_active_timing); if (overlap_in_active_timing.width > 0 && overlap_in_active_timing.height > 0) recout_clip_in_recout_dst = shift_rec(&overlap_in_active_timing, -recout_dst_in_active_timing.x, -recout_dst_in_active_timing.y); else memset(&recout_clip_in_recout_dst, 0, sizeof(struct rect)); /* * Work in recout rotation since that requires less transformations */ get_vp_scan_direction( plane_state->rotation, plane_state->horizontal_mirror, &orthogonal_rotation, &flip_vert_scan_dir, &flip_horz_scan_dir); if (orthogonal_rotation) { swap(src.width, src.height); swap(flip_vert_scan_dir, flip_horz_scan_dir); } calculate_init_and_vp( flip_horz_scan_dir, recout_clip_in_recout_dst.x, data->recout.width, src.width, data->taps.h_taps, data->ratios.horz, &data->inits.h, &data->viewport.x, &data->viewport.width); calculate_init_and_vp( flip_horz_scan_dir, recout_clip_in_recout_dst.x, data->recout.width, src.width / vpc_div, data->taps.h_taps_c, data->ratios.horz_c, &data->inits.h_c, &data->viewport_c.x, &data->viewport_c.width); calculate_init_and_vp( flip_vert_scan_dir, recout_clip_in_recout_dst.y, data->recout.height, src.height, data->taps.v_taps, data->ratios.vert, &data->inits.v, &data->viewport.y, &data->viewport.height); calculate_init_and_vp( flip_vert_scan_dir, recout_clip_in_recout_dst.y, data->recout.height, src.height / vpc_div, data->taps.v_taps_c, data->ratios.vert_c, &data->inits.v_c, &data->viewport_c.y, &data->viewport_c.height); if (orthogonal_rotation) { swap(data->viewport.x, data->viewport.y); swap(data->viewport.width, data->viewport.height); swap(data->viewport_c.x, data->viewport_c.y); swap(data->viewport_c.width, data->viewport_c.height); } data->viewport.x += src.x; data->viewport.y += src.y; ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0); data->viewport_c.x += src.x / vpc_div; data->viewport_c.y += src.y / vpc_div; } bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx); bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Invalid input */ if (!plane_state->dst_rect.width || !plane_state->dst_rect.height || !plane_state->src_rect.width || !plane_state->src_rect.height) { ASSERT(0); return false; } pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); /* Timing borders are part of vactive that we are also supposed to skip in addition * to any stream dst offset. Since dm logic assumes dst is in addressable * space we need to add the left and top borders to dst offsets temporarily. * TODO: fix in DM, stream dst is supposed to be in vactive */ pipe_ctx->stream->dst.x += timing->h_border_left; pipe_ctx->stream->dst.y += timing->v_border_top; /* Calculate H and V active size */ pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; /* depends on h_active */ calculate_recout(pipe_ctx); /* depends on pixel format */ calculate_scaling_ratios(pipe_ctx); /* depends on scaling ratios and recout, does not calculate offset yet */ calculate_viewport_size(pipe_ctx); if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) { /* Stopgap for validation of ODM + MPO on one side of screen case */ if (pipe_ctx->plane_res.scl_data.viewport.height < 1 || pipe_ctx->plane_res.scl_data.viewport.width < 1) return false; } /* * LB calculations depend on vp size, h/v_active and scaling ratios * Setting line buffer pixel depth to 24bpp yields banding * on certain displays, such as the Sharp 4k. 36bpp is needed * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc * precision on DCN display engines, but apparently not for DCE, as * far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have * problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth, * neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel * passthrough). Therefore only use 36 bpp on DCN where it is actually needed. */ if (plane_state->ctx->dce_version > DCE_VERSION_MAX) pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP; else pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha; if (pipe_ctx->plane_res.xfm != NULL) res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); if (pipe_ctx->plane_res.dpp != NULL) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); if (!res) { /* Try 24 bpp linebuffer */ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP; if (pipe_ctx->plane_res.xfm != NULL) res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); if (pipe_ctx->plane_res.dpp != NULL) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); } /* * Depends on recout, scaling ratios, h_active and taps * May need to re-check lb size after this in some obscure scenario */ if (res) calculate_inits_and_viewports(pipe_ctx); /* * Handle side by side and top bottom 3d recout offsets after vp calculation * since 3d is special and needs to calculate vp as if there is no recout offset * This may break with rotation, good thing we aren't mixing hw rotation and 3d */ if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == plane_state) { ASSERT(plane_state->rotation == ROTATION_ANGLE_0 || (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_TOP_AND_BOTTOM && pipe_ctx->stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE)); if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height; else if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE) pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width; } /* Clamp minimum viewport size */ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE) pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE; if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE; DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n" "src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n", __func__, pipe_ctx->pipe_idx, pipe_ctx->plane_res.scl_data.viewport.height, pipe_ctx->plane_res.scl_data.viewport.width, pipe_ctx->plane_res.scl_data.viewport.x, pipe_ctx->plane_res.scl_data.viewport.y, pipe_ctx->plane_res.scl_data.recout.height, pipe_ctx->plane_res.scl_data.recout.width, pipe_ctx->plane_res.scl_data.recout.x, pipe_ctx->plane_res.scl_data.recout.y, pipe_ctx->plane_res.scl_data.h_active, pipe_ctx->plane_res.scl_data.v_active, plane_state->src_rect.height, plane_state->src_rect.width, plane_state->src_rect.x, plane_state->src_rect.y, plane_state->dst_rect.height, plane_state->dst_rect.width, plane_state->dst_rect.x, plane_state->dst_rect.y, plane_state->clip_rect.height, plane_state->clip_rect.width, plane_state->clip_rect.x, plane_state->clip_rect.y); pipe_ctx->stream->dst.x -= timing->h_border_left; pipe_ctx->stream->dst.y -= timing->v_border_top; return res; } enum dc_status resource_build_scaling_params_for_context( const struct dc *dc, struct dc_state *context) { int i; for (i = 0; i < MAX_PIPES; i++) { if (context->res_ctx.pipe_ctx[i].plane_state != NULL && context->res_ctx.pipe_ctx[i].stream != NULL) if (!resource_build_scaling_params(&context->res_ctx.pipe_ctx[i])) return DC_FAIL_SCALING; } return DC_OK; } struct pipe_ctx *resource_find_free_secondary_pipe_legacy( struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) { int i; struct pipe_ctx *secondary_pipe = NULL; /* * We add a preferred pipe mapping to avoid the chance that * MPCCs already in use will need to be reassigned to other trees. * For example, if we went with the strict, assign backwards logic: * * (State 1) * Display A on, no surface, top pipe = 0 * Display B on, no surface, top pipe = 1 * * (State 2) * Display A on, no surface, top pipe = 0 * Display B on, surface enable, top pipe = 1, bottom pipe = 5 * * (State 3) * Display A on, surface enable, top pipe = 0, bottom pipe = 5 * Display B on, surface enable, top pipe = 1, bottom pipe = 4 * * The state 2->3 transition requires remapping MPCC 5 from display B * to display A. * * However, with the preferred pipe logic, state 2 would look like: * * (State 2) * Display A on, no surface, top pipe = 0 * Display B on, surface enable, top pipe = 1, bottom pipe = 4 * * This would then cause 2->3 to not require remapping any MPCCs. */ if (primary_pipe) { int preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; } } /* * search backwards for the second pipe to keep pipe * assignment more consistent */ if (!secondary_pipe) for (i = pool->pipe_count - 1; i >= 0; i--) { if (res_ctx->pipe_ctx[i].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[i]; secondary_pipe->pipe_idx = i; break; } } return secondary_pipe; } int resource_find_free_pipe_used_in_cur_mpc_blending_tree( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, const struct pipe_ctx *cur_opp_head) { const struct pipe_ctx *cur_sec_dpp = cur_opp_head->bottom_pipe; struct pipe_ctx *new_pipe; int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; while (cur_sec_dpp) { /* find a free pipe used in current opp blend tree, * this is to avoid MPO pipe switching to different opp blending * tree */ new_pipe = &new_res_ctx->pipe_ctx[cur_sec_dpp->pipe_idx]; if (resource_is_pipe_type(new_pipe, FREE_PIPE)) { free_pipe_idx = cur_sec_dpp->pipe_idx; break; } cur_sec_dpp = cur_sec_dpp->bottom_pipe; } return free_pipe_idx; } int recource_find_free_pipe_not_used_in_cur_res_ctx( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, const struct resource_pool *pool) { int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; const struct pipe_ctx *new_pipe, *cur_pipe; int i; for (i = 0; i < pool->pipe_count; i++) { cur_pipe = &cur_res_ctx->pipe_ctx[i]; new_pipe = &new_res_ctx->pipe_ctx[i]; if (resource_is_pipe_type(cur_pipe, FREE_PIPE) && resource_is_pipe_type(new_pipe, FREE_PIPE)) { free_pipe_idx = i; break; } } return free_pipe_idx; } int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, const struct resource_pool *pool) { int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; const struct pipe_ctx *new_pipe, *cur_pipe; int i; for (i = 0; i < pool->pipe_count; i++) { cur_pipe = &cur_res_ctx->pipe_ctx[i]; new_pipe = &new_res_ctx->pipe_ctx[i]; if (resource_is_pipe_type(cur_pipe, DPP_PIPE) && !resource_is_pipe_type(cur_pipe, OPP_HEAD) && resource_is_for_mpcc_combine(cur_pipe) && resource_is_pipe_type(new_pipe, FREE_PIPE)) { free_pipe_idx = i; break; } } return free_pipe_idx; } int resource_find_any_free_pipe(struct resource_context *new_res_ctx, const struct resource_pool *pool) { int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; const struct pipe_ctx *new_pipe; int i; for (i = 0; i < pool->pipe_count; i++) { new_pipe = &new_res_ctx->pipe_ctx[i]; if (resource_is_pipe_type(new_pipe, FREE_PIPE)) { free_pipe_idx = i; break; } } return free_pipe_idx; } bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type) { #ifdef DBG if (pipe_ctx->stream == NULL) { /* a free pipe with dangling states */ ASSERT(!pipe_ctx->plane_state); ASSERT(!pipe_ctx->prev_odm_pipe); ASSERT(!pipe_ctx->next_odm_pipe); ASSERT(!pipe_ctx->top_pipe); ASSERT(!pipe_ctx->bottom_pipe); } else if (pipe_ctx->top_pipe) { /* a secondary DPP pipe must be signed to a plane */ ASSERT(pipe_ctx->plane_state) } /* Add more checks here to prevent corrupted pipe ctx. It is very hard * to debug this issue afterwards because we can't pinpoint the code * location causing inconsistent pipe context states. */ #endif switch (type) { case OTG_MASTER: return !pipe_ctx->prev_odm_pipe && !pipe_ctx->top_pipe && pipe_ctx->stream; case OPP_HEAD: return !pipe_ctx->top_pipe && pipe_ctx->stream; case DPP_PIPE: return pipe_ctx->plane_state && pipe_ctx->stream; case FREE_PIPE: return !pipe_ctx->plane_state && !pipe_ctx->stream; default: return false; } } bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx) { return resource_get_num_mpc_splits(pipe_ctx) > 0; } struct pipe_ctx *resource_get_otg_master_for_stream( struct resource_context *res_ctx, struct dc_stream_state *stream) { int i; for (i = 0; i < MAX_PIPES; i++) { if (res_ctx->pipe_ctx[i].stream == stream && resource_is_pipe_type(&res_ctx->pipe_ctx[i], OTG_MASTER)) return &res_ctx->pipe_ctx[i]; } return NULL; } struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx) { struct pipe_ctx *otg_master = resource_get_opp_head(pipe_ctx); while (otg_master->prev_odm_pipe) otg_master = otg_master->prev_odm_pipe; return otg_master; } struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx) { struct pipe_ctx *opp_head = (struct pipe_ctx *) pipe_ctx; ASSERT(!resource_is_pipe_type(opp_head, FREE_PIPE)); while (opp_head->top_pipe) opp_head = opp_head->top_pipe; return opp_head; } static struct pipe_ctx *get_tail_pipe( struct pipe_ctx *head_pipe) { struct pipe_ctx *tail_pipe = head_pipe->bottom_pipe; while (tail_pipe) { head_pipe = tail_pipe; tail_pipe = tail_pipe->bottom_pipe; } return head_pipe; } static int acquire_first_split_pipe( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; for (i = 0; i < pool->pipe_count; i++) { struct pipe_ctx *split_pipe = &res_ctx->pipe_ctx[i]; if (split_pipe->top_pipe && split_pipe->top_pipe->plane_state == split_pipe->plane_state) { split_pipe->top_pipe->bottom_pipe = split_pipe->bottom_pipe; if (split_pipe->bottom_pipe) split_pipe->bottom_pipe->top_pipe = split_pipe->top_pipe; if (split_pipe->top_pipe->plane_state) resource_build_scaling_params(split_pipe->top_pipe); memset(split_pipe, 0, sizeof(*split_pipe)); split_pipe->stream_res.tg = pool->timing_generators[i]; split_pipe->plane_res.hubp = pool->hubps[i]; split_pipe->plane_res.ipp = pool->ipps[i]; split_pipe->plane_res.dpp = pool->dpps[i]; split_pipe->stream_res.opp = pool->opps[i]; split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; split_pipe->pipe_idx = i; split_pipe->stream = stream; return i; } } return UNABLE_TO_SPLIT; } static bool add_plane_to_opp_head_pipes(struct pipe_ctx *otg_master_pipe, struct dc_plane_state *plane_state, struct dc_state *context) { struct pipe_ctx *opp_head_pipe = otg_master_pipe; while (opp_head_pipe) { if (opp_head_pipe->plane_state) { ASSERT(0); return false; } opp_head_pipe->plane_state = plane_state; opp_head_pipe = opp_head_pipe->next_odm_pipe; } return true; } static void insert_secondary_dpp_pipe_with_plane(struct pipe_ctx *opp_head_pipe, struct pipe_ctx *sec_pipe, struct dc_plane_state *plane_state) { struct pipe_ctx *tail_pipe = get_tail_pipe(opp_head_pipe); tail_pipe->bottom_pipe = sec_pipe; sec_pipe->top_pipe = tail_pipe; if (tail_pipe->prev_odm_pipe) { ASSERT(tail_pipe->prev_odm_pipe->bottom_pipe); sec_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = sec_pipe; } sec_pipe->plane_state = plane_state; } /* for each opp head pipe of an otg master pipe, acquire a secondary dpp pipe * and add the plane. So the plane is added to all MPC blend trees associated * with the otg master pipe. */ static bool acquire_secondary_dpp_pipes_and_add_plane( struct pipe_ctx *otg_master_pipe, struct dc_plane_state *plane_state, struct dc_state *new_ctx, struct dc_state *cur_ctx, struct resource_pool *pool) { struct pipe_ctx *opp_head_pipe, *sec_pipe; if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) return false; opp_head_pipe = otg_master_pipe; while (opp_head_pipe) { sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe( cur_ctx, new_ctx, pool, opp_head_pipe); if (!sec_pipe) { /* try tearing down MPCC combine */ int pipe_idx = acquire_first_split_pipe( &new_ctx->res_ctx, pool, otg_master_pipe->stream); if (pipe_idx >= 0) sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx]; } if (!sec_pipe) return false; insert_secondary_dpp_pipe_with_plane(opp_head_pipe, sec_pipe, plane_state); opp_head_pipe = opp_head_pipe->next_odm_pipe; } return true; } bool dc_add_plane_to_context( const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context) { struct resource_pool *pool = dc->res_pool; struct pipe_ctx *otg_master_pipe; struct dc_stream_status *stream_status = NULL; bool added = false; stream_status = dc_stream_get_status_from_state(context, stream); if (stream_status == NULL) { dm_error("Existing stream not found; failed to attach surface!\n"); goto out; } else if (stream_status->plane_count == MAX_SURFACE_NUM) { dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", plane_state, MAX_SURFACE_NUM); goto out; } otg_master_pipe = resource_get_otg_master_for_stream( &context->res_ctx, stream); if (otg_master_pipe->plane_state == NULL) added = add_plane_to_opp_head_pipes(otg_master_pipe, plane_state, context); else added = acquire_secondary_dpp_pipes_and_add_plane( otg_master_pipe, plane_state, context, dc->current_state, pool); if (added) { stream_status->plane_states[stream_status->plane_count] = plane_state; stream_status->plane_count++; dc_plane_state_retain(plane_state); } out: return added; } bool dc_remove_plane_from_context( const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context) { int i; struct dc_stream_status *stream_status = NULL; struct resource_pool *pool = dc->res_pool; if (!plane_state) return true; for (i = 0; i < context->stream_count; i++) if (context->streams[i] == stream) { stream_status = &context->stream_status[i]; break; } if (stream_status == NULL) { dm_error("Existing stream not found; failed to remove plane.\n"); return false; } /* release pipe for plane*/ for (i = pool->pipe_count - 1; i >= 0; i--) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state == plane_state) { if (pipe_ctx->top_pipe) pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe; /* Second condition is to avoid setting NULL to top pipe * of tail pipe making it look like head pipe in subsequent * deletes */ if (pipe_ctx->bottom_pipe && pipe_ctx->top_pipe) pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe; /* * For head pipe detach surfaces from pipe for tail * pipe just zero it out */ if (!pipe_ctx->top_pipe) pipe_ctx->plane_state = NULL; else memset(pipe_ctx, 0, sizeof(*pipe_ctx)); } } for (i = 0; i < stream_status->plane_count; i++) { if (stream_status->plane_states[i] == plane_state) { dc_plane_state_release(stream_status->plane_states[i]); break; } } if (i == stream_status->plane_count) { dm_error("Existing plane_state not found; failed to detach it!\n"); return false; } stream_status->plane_count--; /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ for (; i < stream_status->plane_count; i++) stream_status->plane_states[i] = stream_status->plane_states[i + 1]; stream_status->plane_states[stream_status->plane_count] = NULL; return true; } /** * dc_rem_all_planes_for_stream - Remove planes attached to the target stream. * * @dc: Current dc state. * @stream: Target stream, which we want to remove the attached plans. * @context: New context. * * Return: * Return true if DC was able to remove all planes from the target * stream, otherwise, return false. */ bool dc_rem_all_planes_for_stream( const struct dc *dc, struct dc_stream_state *stream, struct dc_state *context) { int i, old_plane_count; struct dc_stream_status *stream_status = NULL; struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; for (i = 0; i < context->stream_count; i++) if (context->streams[i] == stream) { stream_status = &context->stream_status[i]; break; } if (stream_status == NULL) { dm_error("Existing stream %p not found!\n", stream); return false; } old_plane_count = stream_status->plane_count; for (i = 0; i < old_plane_count; i++) del_planes[i] = stream_status->plane_states[i]; for (i = 0; i < old_plane_count; i++) if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context)) return false; return true; } static bool add_all_planes_for_stream( const struct dc *dc, struct dc_stream_state *stream, const struct dc_validation_set set[], int set_count, struct dc_state *context) { int i, j; for (i = 0; i < set_count; i++) if (set[i].stream == stream) break; if (i == set_count) { dm_error("Stream %p not found in set!\n", stream); return false; } for (j = 0; j < set[i].plane_count; j++) if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context)) return false; return true; } bool dc_add_all_planes_for_stream( const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state * const *plane_states, int plane_count, struct dc_state *context) { struct dc_validation_set set; int i; set.stream = stream; set.plane_count = plane_count; for (i = 0; i < plane_count; i++) set.plane_states[i] = plane_states[i]; return add_all_planes_for_stream(dc, stream, &set, 1, context); } bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream) { if (cur_stream == NULL) return true; /* If output color space is changed, need to reprogram info frames */ if (cur_stream->output_color_space != new_stream->output_color_space) return true; return memcmp( &cur_stream->timing, &new_stream->timing, sizeof(struct dc_crtc_timing)) != 0; } static bool are_stream_backends_same( struct dc_stream_state *stream_a, struct dc_stream_state *stream_b) { if (stream_a == stream_b) return true; if (stream_a == NULL || stream_b == NULL) return false; if (dc_is_timing_changed(stream_a, stream_b)) return false; if (stream_a->signal != stream_b->signal) return false; if (stream_a->dpms_off != stream_b->dpms_off) return false; return true; } /* * dc_is_stream_unchanged() - Compare two stream states for equivalence. * * Checks if there a difference between the two states * that would require a mode change. * * Does not compare cursor position or attributes. */ bool dc_is_stream_unchanged( struct dc_stream_state *old_stream, struct dc_stream_state *stream) { if (!are_stream_backends_same(old_stream, stream)) return false; if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; /*compare audio info*/ if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0) return false; return true; } /* * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams. */ bool dc_is_stream_scaling_unchanged(struct dc_stream_state *old_stream, struct dc_stream_state *stream) { if (old_stream == stream) return true; if (old_stream == NULL || stream == NULL) return false; if (memcmp(&old_stream->src, &stream->src, sizeof(struct rect)) != 0) return false; if (memcmp(&old_stream->dst, &stream->dst, sizeof(struct rect)) != 0) return false; return true; } static void update_stream_engine_usage( struct resource_context *res_ctx, const struct resource_pool *pool, struct stream_encoder *stream_enc, bool acquired) { int i; for (i = 0; i < pool->stream_enc_count; i++) { if (pool->stream_enc[i] == stream_enc) res_ctx->is_stream_enc_acquired[i] = acquired; } } static void update_hpo_dp_stream_engine_usage( struct resource_context *res_ctx, const struct resource_pool *pool, struct hpo_dp_stream_encoder *hpo_dp_stream_enc, bool acquired) { int i; for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { if (pool->hpo_dp_stream_enc[i] == hpo_dp_stream_enc) res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired; } } static inline int find_acquired_hpo_dp_link_enc_for_link( const struct resource_context *res_ctx, const struct dc_link *link) { int i; for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_to_link_idx); i++) if (res_ctx->hpo_dp_link_enc_ref_cnts[i] > 0 && res_ctx->hpo_dp_link_enc_to_link_idx[i] == link->link_index) return i; return -1; } static inline int find_free_hpo_dp_link_enc(const struct resource_context *res_ctx, const struct resource_pool *pool) { int i; for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts); i++) if (res_ctx->hpo_dp_link_enc_ref_cnts[i] == 0) break; return (i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts) && i < pool->hpo_dp_link_enc_count) ? i : -1; } static inline void acquire_hpo_dp_link_enc( struct resource_context *res_ctx, unsigned int link_index, int enc_index) { res_ctx->hpo_dp_link_enc_to_link_idx[enc_index] = link_index; res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] = 1; } static inline void retain_hpo_dp_link_enc( struct resource_context *res_ctx, int enc_index) { res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]++; } static inline void release_hpo_dp_link_enc( struct resource_context *res_ctx, int enc_index) { ASSERT(res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] > 0); res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]--; } static bool add_hpo_dp_link_enc_to_ctx(struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream) { int enc_index; enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link); if (enc_index >= 0) { retain_hpo_dp_link_enc(res_ctx, enc_index); } else { enc_index = find_free_hpo_dp_link_enc(res_ctx, pool); if (enc_index >= 0) acquire_hpo_dp_link_enc(res_ctx, stream->link->link_index, enc_index); } if (enc_index >= 0) pipe_ctx->link_res.hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index]; return pipe_ctx->link_res.hpo_dp_link_enc != NULL; } static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream) { int enc_index; enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link); if (enc_index >= 0) { release_hpo_dp_link_enc(res_ctx, enc_index); pipe_ctx->link_res.hpo_dp_link_enc = NULL; } } /* TODO: release audio object */ void update_audio_usage( struct resource_context *res_ctx, const struct resource_pool *pool, struct audio *audio, bool acquired) { int i; for (i = 0; i < pool->audio_count; i++) { if (pool->audios[i] == audio) res_ctx->is_audio_acquired[i] = acquired; } } static int acquire_first_free_pipe( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; for (i = 0; i < pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; pipe_ctx->stream_res.tg = pool->timing_generators[i]; pipe_ctx->plane_res.mi = pool->mis[i]; pipe_ctx->plane_res.hubp = pool->hubps[i]; pipe_ctx->plane_res.ipp = pool->ipps[i]; pipe_ctx->plane_res.xfm = pool->transforms[i]; pipe_ctx->plane_res.dpp = pool->dpps[i]; pipe_ctx->stream_res.opp = pool->opps[i]; if (pool->dpps[i]) pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst; pipe_ctx->pipe_idx = i; if (i >= pool->timing_generator_count) { int tg_inst = pool->timing_generator_count - 1; pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst]; pipe_ctx->stream_res.opp = pool->opps[tg_inst]; } pipe_ctx->stream = stream; return i; } } return -1; } static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { if (!res_ctx->is_hpo_dp_stream_enc_acquired[i] && pool->hpo_dp_stream_enc[i]) { return pool->hpo_dp_stream_enc[i]; } } return NULL; } static struct audio *find_first_free_audio( struct resource_context *res_ctx, const struct resource_pool *pool, enum engine_id id, enum dce_version dc_version) { int i, available_audio_count; available_audio_count = pool->audio_count; for (i = 0; i < available_audio_count; i++) { if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) { /*we have enough audio endpoint, find the matching inst*/ if (id != i) continue; return pool->audios[i]; } } /* use engine id to find free audio */ if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) { return pool->audios[id]; } /*not found the matching one, first come first serve*/ for (i = 0; i < available_audio_count; i++) { if (res_ctx->is_audio_acquired[i] == false) { return pool->audios[i]; } } return NULL; } /* * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state. */ enum dc_status dc_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream) { enum dc_status res; DC_LOGGER_INIT(dc->ctx->logger); if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) { DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); return DC_ERROR_UNEXPECTED; } new_ctx->streams[new_ctx->stream_count] = stream; dc_stream_retain(stream); new_ctx->stream_count++; res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream); if (res != DC_OK) DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); return res; } /* * dc_remove_stream_from_ctx() - Remove a stream from a dc_state. */ enum dc_status dc_remove_stream_from_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream) { int i; struct dc_context *dc_ctx = dc->ctx; struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(&new_ctx->res_ctx, stream); struct pipe_ctx *odm_pipe; if (!del_pipe) { DC_ERROR("Pipe not found for stream %p !\n", stream); return DC_ERROR_UNEXPECTED; } odm_pipe = del_pipe->next_odm_pipe; /* Release primary pipe */ ASSERT(del_pipe->stream_res.stream_enc); update_stream_engine_usage( &new_ctx->res_ctx, dc->res_pool, del_pipe->stream_res.stream_enc, false); if (dc->link_srv->dp_is_128b_132b_signal(del_pipe)) { update_hpo_dp_stream_engine_usage( &new_ctx->res_ctx, dc->res_pool, del_pipe->stream_res.hpo_dp_stream_enc, false); remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream); } if (del_pipe->stream_res.audio) update_audio_usage( &new_ctx->res_ctx, dc->res_pool, del_pipe->stream_res.audio, false); resource_unreference_clock_source(&new_ctx->res_ctx, dc->res_pool, del_pipe->clock_source); if (dc->res_pool->funcs->remove_stream_from_ctx) dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream); while (odm_pipe) { struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe; memset(odm_pipe, 0, sizeof(*odm_pipe)); odm_pipe = next_odm_pipe; } memset(del_pipe, 0, sizeof(*del_pipe)); for (i = 0; i < new_ctx->stream_count; i++) if (new_ctx->streams[i] == stream) break; if (new_ctx->streams[i] != stream) { DC_ERROR("Context doesn't have stream %p !\n", stream); return DC_ERROR_UNEXPECTED; } dc_stream_release(new_ctx->streams[i]); new_ctx->stream_count--; /* Trim back arrays */ for (; i < new_ctx->stream_count; i++) { new_ctx->streams[i] = new_ctx->streams[i + 1]; new_ctx->stream_status[i] = new_ctx->stream_status[i + 1]; } new_ctx->streams[new_ctx->stream_count] = NULL; memset( &new_ctx->stream_status[new_ctx->stream_count], 0, sizeof(new_ctx->stream_status[0])); return DC_OK; } static struct dc_stream_state *find_pll_sharable_stream( struct dc_stream_state *stream_needs_pll, struct dc_state *context) { int i; for (i = 0; i < context->stream_count; i++) { struct dc_stream_state *stream_has_pll = context->streams[i]; /* We are looking for non dp, non virtual stream */ if (resource_are_streams_timing_synchronizable( stream_needs_pll, stream_has_pll) && !dc_is_dp_signal(stream_has_pll->signal) && stream_has_pll->link->connector_signal != SIGNAL_TYPE_VIRTUAL) return stream_has_pll; } return NULL; } static int get_norm_pix_clk(const struct dc_crtc_timing *timing) { uint32_t pix_clk = timing->pix_clk_100hz; uint32_t normalized_pix_clk = pix_clk; if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) pix_clk /= 2; if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { switch (timing->display_color_depth) { case COLOR_DEPTH_666: case COLOR_DEPTH_888: normalized_pix_clk = pix_clk; break; case COLOR_DEPTH_101010: normalized_pix_clk = (pix_clk * 30) / 24; break; case COLOR_DEPTH_121212: normalized_pix_clk = (pix_clk * 36) / 24; break; case COLOR_DEPTH_161616: normalized_pix_clk = (pix_clk * 48) / 24; break; default: ASSERT(0); break; } } return normalized_pix_clk; } static void calculate_phy_pix_clks(struct dc_stream_state *stream) { /* update actual pixel clock on all streams */ if (dc_is_hdmi_signal(stream->signal)) stream->phy_pix_clk = get_norm_pix_clk( &stream->timing) / 10; else stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) stream->phy_pix_clk *= 2; } static int acquire_resource_from_hw_enabled_state( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { struct dc_link *link = stream->link; unsigned int i, inst, tg_inst = 0; uint32_t numPipes = 1; uint32_t id_src[4] = {0}; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return -1; inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (inst == ENGINE_ID_UNKNOWN) return -1; for (i = 0; i < pool->stream_enc_count; i++) { if (pool->stream_enc[i]->id == inst) { tg_inst = pool->stream_enc[i]->funcs->dig_source_otg( pool->stream_enc[i]); break; } } // tg_inst not found if (i == pool->stream_enc_count) return -1; if (tg_inst >= pool->timing_generator_count) return -1; if (!res_ctx->pipe_ctx[tg_inst].stream) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst]; pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst]; id_src[0] = tg_inst; if (pipe_ctx->stream_res.tg->funcs->get_optc_source) pipe_ctx->stream_res.tg->funcs->get_optc_source(pipe_ctx->stream_res.tg, &numPipes, &id_src[0], &id_src[1]); if (id_src[0] == 0xf && id_src[1] == 0xf) { id_src[0] = tg_inst; numPipes = 1; } for (i = 0; i < numPipes; i++) { //Check if src id invalid if (id_src[i] == 0xf) return -1; pipe_ctx = &res_ctx->pipe_ctx[id_src[i]]; pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst]; pipe_ctx->plane_res.mi = pool->mis[id_src[i]]; pipe_ctx->plane_res.hubp = pool->hubps[id_src[i]]; pipe_ctx->plane_res.ipp = pool->ipps[id_src[i]]; pipe_ctx->plane_res.xfm = pool->transforms[id_src[i]]; pipe_ctx->plane_res.dpp = pool->dpps[id_src[i]]; pipe_ctx->stream_res.opp = pool->opps[id_src[i]]; if (pool->dpps[id_src[i]]) { pipe_ctx->plane_res.mpcc_inst = pool->dpps[id_src[i]]->inst; if (pool->mpc->funcs->read_mpcc_state) { struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s); if (s.dpp_id < MAX_MPCC) pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id; if (s.bot_mpcc_id < MAX_MPCC) pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot = &pool->mpc->mpcc_array[s.bot_mpcc_id]; if (s.opp_id < MAX_OPP) pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id; } } pipe_ctx->pipe_idx = id_src[i]; if (id_src[i] >= pool->timing_generator_count) { id_src[i] = pool->timing_generator_count - 1; pipe_ctx->stream_res.tg = pool->timing_generators[id_src[i]]; pipe_ctx->stream_res.opp = pool->opps[id_src[i]]; } pipe_ctx->stream = stream; } if (numPipes == 2) { stream->apply_boot_odm_mode = dm_odm_combine_policy_2to1; res_ctx->pipe_ctx[id_src[0]].next_odm_pipe = &res_ctx->pipe_ctx[id_src[1]]; res_ctx->pipe_ctx[id_src[0]].prev_odm_pipe = NULL; res_ctx->pipe_ctx[id_src[1]].next_odm_pipe = NULL; res_ctx->pipe_ctx[id_src[1]].prev_odm_pipe = &res_ctx->pipe_ctx[id_src[0]]; } else stream->apply_boot_odm_mode = dm_odm_combine_mode_disabled; return id_src[0]; } return -1; } static void mark_seamless_boot_stream( const struct dc *dc, struct dc_stream_state *stream) { struct dc_bios *dcb = dc->ctx->dc_bios; if (dc->config.allow_seamless_boot_optimization && !dcb->funcs->is_accelerated_mode(dcb)) { if (dc_validate_boot_timing(dc, stream->sink, &stream->timing)) stream->apply_seamless_boot_optimization = true; } } enum dc_status resource_map_pool_resources( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { const struct resource_pool *pool = dc->res_pool; int i; struct dc_context *dc_ctx = dc->ctx; struct pipe_ctx *pipe_ctx = NULL; int pipe_idx = -1; calculate_phy_pix_clks(stream); mark_seamless_boot_stream(dc, stream); if (stream->apply_seamless_boot_optimization) { pipe_idx = acquire_resource_from_hw_enabled_state( &context->res_ctx, pool, stream); if (pipe_idx < 0) /* hw resource was assigned to other stream */ stream->apply_seamless_boot_optimization = false; } if (pipe_idx < 0) /* acquire new resources */ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); if (pipe_idx < 0) pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL) return DC_NO_CONTROLLER_RESOURCE; pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; pipe_ctx->stream_res.stream_enc = dc->res_pool->funcs->find_first_free_match_stream_enc_for_link( &context->res_ctx, pool, stream); if (!pipe_ctx->stream_res.stream_enc) return DC_NO_STREAM_ENC_RESOURCE; update_stream_engine_usage( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc, true); /* Allocate DP HPO Stream Encoder based on signal, hw capabilities * and link settings */ if (dc_is_dp_signal(stream->signal)) { if (!dc->link_srv->dp_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings)) return DC_FAIL_DP_LINK_BANDWIDTH; if (dc->link_srv->dp_get_encoding_format( &pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { pipe_ctx->stream_res.hpo_dp_stream_enc = find_first_free_match_hpo_dp_stream_enc_for_link( &context->res_ctx, pool, stream); if (!pipe_ctx->stream_res.hpo_dp_stream_enc) return DC_NO_STREAM_ENC_RESOURCE; update_hpo_dp_stream_engine_usage( &context->res_ctx, pool, pipe_ctx->stream_res.hpo_dp_stream_enc, true); if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, pool, pipe_ctx, stream)) return DC_NO_LINK_ENC_RESOURCE; } } /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); /* * Audio assigned in order first come first get. * There are asics which has number of audio * resources less then number of pipes */ if (pipe_ctx->stream_res.audio) update_audio_usage(&context->res_ctx, pool, pipe_ctx->stream_res.audio, true); } /* Add ABM to the resource if on EDP */ if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal)) { if (pool->abm) pipe_ctx->stream_res.abm = pool->abm; else pipe_ctx->stream_res.abm = pool->multiple_abms[pipe_ctx->stream_res.tg->inst]; } for (i = 0; i < context->stream_count; i++) if (context->streams[i] == stream) { context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst; context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst; context->stream_status[i].audio_inst = pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1; return DC_OK; } DC_ERROR("Stream %p not found in new ctx!\n", stream); return DC_ERROR_UNEXPECTED; } /** * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state * * @dc: copy out of dc->current_state * @dst_ctx: copy into this * * This function makes a shallow copy of the current DC state and increments * refcounts on existing streams and planes. */ void dc_resource_state_copy_construct_current( const struct dc *dc, struct dc_state *dst_ctx) { dc_resource_state_copy_construct(dc->current_state, dst_ctx); } void dc_resource_state_construct( const struct dc *dc, struct dc_state *dst_ctx) { dst_ctx->clk_mgr = dc->clk_mgr; /* Initialise DIG link encoder resource tracking variables. */ link_enc_cfg_init(dc, dst_ctx); } bool dc_resource_is_dsc_encoding_supported(const struct dc *dc) { if (dc->res_pool == NULL) return false; return dc->res_pool->res_cap->num_dsc > 0; } static bool planes_changed_for_existing_stream(struct dc_state *context, struct dc_stream_state *stream, const struct dc_validation_set set[], int set_count) { int i, j; struct dc_stream_status *stream_status = NULL; for (i = 0; i < context->stream_count; i++) { if (context->streams[i] == stream) { stream_status = &context->stream_status[i]; break; } } if (!stream_status) ASSERT(0); for (i = 0; i < set_count; i++) if (set[i].stream == stream) break; if (i == set_count) ASSERT(0); if (set[i].plane_count != stream_status->plane_count) return true; for (j = 0; j < set[i].plane_count; j++) if (set[i].plane_states[j] != stream_status->plane_states[j]) return true; return false; } /** * dc_validate_with_context - Validate and update the potential new stream in the context object * * @dc: Used to get the current state status * @set: An array of dc_validation_set with all the current streams reference * @set_count: Total of streams * @context: New context * @fast_validate: Enable or disable fast validation * * This function updates the potential new stream in the context object. It * creates multiple lists for the add, remove, and unchanged streams. In * particular, if the unchanged streams have a plane that changed, it is * necessary to remove all planes from the unchanged streams. In summary, this * function is responsible for validating the new context. * * Return: * In case of success, return DC_OK (1), otherwise, return a DC error. */ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, bool fast_validate) { struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 }; struct dc_stream_state *del_streams[MAX_PIPES] = { 0 }; struct dc_stream_state *add_streams[MAX_PIPES] = { 0 }; int old_stream_count = context->stream_count; enum dc_status res = DC_ERROR_UNEXPECTED; int unchanged_streams_count = 0; int del_streams_count = 0; int add_streams_count = 0; bool found = false; int i, j, k; DC_LOGGER_INIT(dc->ctx->logger); /* First build a list of streams to be remove from current context */ for (i = 0; i < old_stream_count; i++) { struct dc_stream_state *stream = context->streams[i]; for (j = 0; j < set_count; j++) { if (stream == set[j].stream) { found = true; break; } } if (!found) del_streams[del_streams_count++] = stream; found = false; } /* Second, build a list of new streams */ for (i = 0; i < set_count; i++) { struct dc_stream_state *stream = set[i].stream; for (j = 0; j < old_stream_count; j++) { if (stream == context->streams[j]) { found = true; break; } } if (!found) add_streams[add_streams_count++] = stream; found = false; } /* Build a list of unchanged streams which is necessary for handling * planes change such as added, removed, and updated. */ for (i = 0; i < set_count; i++) { /* Check if stream is part of the delete list */ for (j = 0; j < del_streams_count; j++) { if (set[i].stream == del_streams[j]) { found = true; break; } } if (!found) { /* Check if stream is part of the add list */ for (j = 0; j < add_streams_count; j++) { if (set[i].stream == add_streams[j]) { found = true; break; } } } if (!found) unchanged_streams[unchanged_streams_count++] = set[i].stream; found = false; } /* Remove all planes for unchanged streams if planes changed */ for (i = 0; i < unchanged_streams_count; i++) { if (planes_changed_for_existing_stream(context, unchanged_streams[i], set, set_count)) { if (!dc_rem_all_planes_for_stream(dc, unchanged_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; goto fail; } } } /* Remove all planes for removed streams and then remove the streams */ for (i = 0; i < del_streams_count; i++) { /* Need to cpy the dwb data from the old stream in order to efc to work */ if (del_streams[i]->num_wb_info > 0) { for (j = 0; j < add_streams_count; j++) { if (del_streams[i]->sink == add_streams[j]->sink) { add_streams[j]->num_wb_info = del_streams[i]->num_wb_info; for (k = 0; k < del_streams[i]->num_wb_info; k++) add_streams[j]->writeback_info[k] = del_streams[i]->writeback_info[k]; } } } if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; goto fail; } res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); if (res != DC_OK) goto fail; } /* Swap seamless boot stream to pipe 0 (if needed) to ensure pipe_ctx * matches. This may change in the future if seamless_boot_stream can be * multiple. */ for (i = 0; i < add_streams_count; i++) { mark_seamless_boot_stream(dc, add_streams[i]); if (add_streams[i]->apply_seamless_boot_optimization && i != 0) { struct dc_stream_state *temp = add_streams[0]; add_streams[0] = add_streams[i]; add_streams[i] = temp; break; } } /* Add new streams and then add all planes for the new stream */ for (i = 0; i < add_streams_count; i++) { calculate_phy_pix_clks(add_streams[i]); res = dc_add_stream_to_ctx(dc, context, add_streams[i]); if (res != DC_OK) goto fail; if (!add_all_planes_for_stream(dc, add_streams[i], set, set_count, context)) { res = DC_FAIL_ATTACH_SURFACES; goto fail; } } /* Add all planes for unchanged streams if planes changed */ for (i = 0; i < unchanged_streams_count; i++) { if (planes_changed_for_existing_stream(context, unchanged_streams[i], set, set_count)) { if (!add_all_planes_for_stream(dc, unchanged_streams[i], set, set_count, context)) { res = DC_FAIL_ATTACH_SURFACES; goto fail; } } } res = dc_validate_global_state(dc, context, fast_validate); fail: if (res != DC_OK) DC_LOG_WARNING("%s:resource validation failed, dc_status:%d\n", __func__, res); return res; } /** * dc_validate_global_state() - Determine if hardware can support a given state * * @dc: dc struct for this driver * @new_ctx: state to be validated * @fast_validate: set to true if only yes/no to support matters * * Checks hardware resource availability and bandwidth requirement. * * Return: * DC_OK if the result can be programmed. Otherwise, an error code. */ enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, bool fast_validate) { enum dc_status result = DC_ERROR_UNEXPECTED; int i, j; if (!new_ctx) return DC_ERROR_UNEXPECTED; if (dc->res_pool->funcs->validate_global) { result = dc->res_pool->funcs->validate_global(dc, new_ctx); if (result != DC_OK) return result; } for (i = 0; i < new_ctx->stream_count; i++) { struct dc_stream_state *stream = new_ctx->streams[i]; for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[j]; if (pipe_ctx->stream != stream) continue; if (dc->res_pool->funcs->patch_unknown_plane_state && pipe_ctx->plane_state && pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) { result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state); if (result != DC_OK) return result; } /* Switch to dp clock source only if there is * no non dp stream that shares the same timing * with the dp stream. */ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !find_pll_sharable_stream(stream, new_ctx)) { resource_unreference_clock_source( &new_ctx->res_ctx, dc->res_pool, pipe_ctx->clock_source); pipe_ctx->clock_source = dc->res_pool->dp_clock_source; resource_reference_clock_source( &new_ctx->res_ctx, dc->res_pool, pipe_ctx->clock_source); } } } result = resource_build_scaling_params_for_context(dc, new_ctx); if (result == DC_OK) if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; /* * Only update link encoder to stream assignment after bandwidth validation passed. * TODO: Split out assignment and validation. */ if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) dc->res_pool->funcs->link_encs_assign( dc, new_ctx, new_ctx->streams, new_ctx->stream_count); return result; } static void patch_gamut_packet_checksum( struct dc_info_packet *gamut_packet) { /* For gamut we recalc checksum */ if (gamut_packet->valid) { uint8_t chk_sum = 0; uint8_t *ptr; uint8_t i; /*start of the Gamut data. */ ptr = &gamut_packet->sb[3]; for (i = 0; i <= gamut_packet->sb[1]; i++) chk_sum += ptr[i]; gamut_packet->sb[2] = (uint8_t) (0x100 - chk_sum); } } static void set_avi_info_frame( struct dc_info_packet *info_packet, struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; uint32_t pixel_encoding = 0; enum scanning_type scan_type = SCANNING_TYPE_NODATA; enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA; bool itc = false; uint8_t itc_value = 0; uint8_t cn0_cn1 = 0; unsigned int cn0_cn1_value = 0; uint8_t *check_sum = NULL; uint8_t byte_index = 0; union hdmi_info_packet hdmi_info; union display_content_support support = {0}; unsigned int vic = pipe_ctx->stream->timing.vic; unsigned int rid = pipe_ctx->stream->timing.rid; unsigned int fr_ind = pipe_ctx->stream->timing.fr_index; enum dc_timing_3d_format format; memset(&hdmi_info, 0, sizeof(union hdmi_info_packet)); color_space = pipe_ctx->stream->output_color_space; if (color_space == COLOR_SPACE_UNKNOWN) color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709; /* Initialize header */ hdmi_info.bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI; /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall * not be used in HDMI 2.0 (Section 10.1) */ hdmi_info.bits.header.version = 2; hdmi_info.bits.header.length = HDMI_AVI_INFOFRAME_SIZE; /* * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built * according to HDMI 2.0 spec (Section 10.1) */ switch (stream->timing.pixel_encoding) { case PIXEL_ENCODING_YCBCR422: pixel_encoding = 1; break; case PIXEL_ENCODING_YCBCR444: pixel_encoding = 2; break; case PIXEL_ENCODING_YCBCR420: pixel_encoding = 3; break; case PIXEL_ENCODING_RGB: default: pixel_encoding = 0; } /* Y0_Y1_Y2 : The pixel encoding */ /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */ hdmi_info.bits.Y0_Y1_Y2 = pixel_encoding; /* A0 = 1 Active Format Information valid */ hdmi_info.bits.A0 = ACTIVE_FORMAT_VALID; /* B0, B1 = 3; Bar info data is valid */ hdmi_info.bits.B0_B1 = BAR_INFO_BOTH_VALID; hdmi_info.bits.SC0_SC1 = PICTURE_SCALING_UNIFORM; /* S0, S1 : Underscan / Overscan */ /* TODO: un-hardcode scan type */ scan_type = SCANNING_TYPE_UNDERSCAN; hdmi_info.bits.S0_S1 = scan_type; /* C0, C1 : Colorimetry */ switch (color_space) { case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601; break; case COLOR_SPACE_2020_RGB_FULLRANGE: case COLOR_SPACE_2020_RGB_LIMITEDRANGE: case COLOR_SPACE_2020_YCBCR: hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR; hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; break; case COLOR_SPACE_ADOBERGB: hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB; hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; break; case COLOR_SPACE_SRGB: default: hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA; break; } if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR && stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { hdmi_info.bits.EC0_EC2 = 0; hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; } /* TODO: un-hardcode aspect ratio */ aspect = stream->timing.aspect_ratio; switch (aspect) { case ASPECT_RATIO_4_3: case ASPECT_RATIO_16_9: hdmi_info.bits.M0_M1 = aspect; break; case ASPECT_RATIO_NO_DATA: case ASPECT_RATIO_64_27: case ASPECT_RATIO_256_135: default: hdmi_info.bits.M0_M1 = 0; } /* Active Format Aspect ratio - same as Picture Aspect Ratio. */ hdmi_info.bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE; /* TODO: un-hardcode cn0_cn1 and itc */ cn0_cn1 = 0; cn0_cn1_value = 0; itc = true; itc_value = 1; support = stream->content_support; if (itc) { if (!support.bits.valid_content_type) { cn0_cn1_value = 0; } else { if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GRAPHICS) { if (support.bits.graphics_content == 1) { cn0_cn1_value = 0; } } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_PHOTO) { if (support.bits.photo_content == 1) { cn0_cn1_value = 1; } else { cn0_cn1_value = 0; itc_value = 0; } } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_CINEMA) { if (support.bits.cinema_content == 1) { cn0_cn1_value = 2; } else { cn0_cn1_value = 0; itc_value = 0; } } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GAME) { if (support.bits.game_content == 1) { cn0_cn1_value = 3; } else { cn0_cn1_value = 0; itc_value = 0; } } } hdmi_info.bits.CN0_CN1 = cn0_cn1_value; hdmi_info.bits.ITC = itc_value; } if (stream->qs_bit == 1) { if (color_space == COLOR_SPACE_SRGB || color_space == COLOR_SPACE_2020_RGB_FULLRANGE) hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; else if (color_space == COLOR_SPACE_SRGB_LIMITED || color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; else hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; } else hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; /* TODO : We should handle YCC quantization */ /* but we do not have matrix calculation */ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; ///VIC if (pipe_ctx->stream->timing.hdmi_vic != 0) vic = 0; format = stream->timing.timing_3d_format; /*todo, add 3DStereo support*/ if (format != TIMING_3D_FORMAT_NONE) { // Based on HDMI specs hdmi vic needs to be converted to cea vic when 3D is enabled switch (pipe_ctx->stream->timing.hdmi_vic) { case 1: vic = 95; break; case 2: vic = 94; break; case 3: vic = 93; break; case 4: vic = 98; break; default: break; } } /* If VIC >= 128, the Source shall use AVI InfoFrame Version 3*/ hdmi_info.bits.VIC0_VIC7 = vic; if (vic >= 128) hdmi_info.bits.header.version = 3; /* If (C1, C0)=(1, 1) and (EC2, EC1, EC0)=(1, 1, 1), * the Source shall use 20 AVI InfoFrame Version 4 */ if (hdmi_info.bits.C0_C1 == COLORIMETRY_EXTENDED && hdmi_info.bits.EC0_EC2 == COLORIMETRYEX_RESERVED) { hdmi_info.bits.header.version = 4; hdmi_info.bits.header.length = 14; } if (rid != 0 && fr_ind != 0) { hdmi_info.bits.header.version = 5; hdmi_info.bits.header.length = 15; hdmi_info.bits.FR0_FR3 = fr_ind & 0xF; hdmi_info.bits.FR4 = (fr_ind >> 4) & 0x1; hdmi_info.bits.RID0_RID5 = rid; } /* pixel repetition * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel * repetition start from 1 */ hdmi_info.bits.PR0_PR3 = 0; /* Bar Info * barTop: Line Number of End of Top Bar. * barBottom: Line Number of Start of Bottom Bar. * barLeft: Pixel Number of End of Left Bar. * barRight: Pixel Number of Start of Right Bar. */ hdmi_info.bits.bar_top = stream->timing.v_border_top; hdmi_info.bits.bar_bottom = (stream->timing.v_total - stream->timing.v_border_bottom + 1); hdmi_info.bits.bar_left = stream->timing.h_border_left; hdmi_info.bits.bar_right = (stream->timing.h_total - stream->timing.h_border_right + 1); /* Additional Colorimetry Extension * Used in conduction with C0-C1 and EC0-EC2 * 0 = DCI-P3 RGB (D65) * 1 = DCI-P3 RGB (theater) */ hdmi_info.bits.ACE0_ACE3 = 0; /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */ check_sum = &hdmi_info.packet_raw_data.sb[0]; *check_sum = HDMI_INFOFRAME_TYPE_AVI + hdmi_info.bits.header.length + hdmi_info.bits.header.version; for (byte_index = 1; byte_index <= hdmi_info.bits.header.length; byte_index++) *check_sum += hdmi_info.packet_raw_data.sb[byte_index]; /* one byte complement */ *check_sum = (uint8_t) (0x100 - *check_sum); /* Store in hw_path_mode */ info_packet->hb0 = hdmi_info.packet_raw_data.hb0; info_packet->hb1 = hdmi_info.packet_raw_data.hb1; info_packet->hb2 = hdmi_info.packet_raw_data.hb2; for (byte_index = 0; byte_index < sizeof(hdmi_info.packet_raw_data.sb); byte_index++) info_packet->sb[byte_index] = hdmi_info.packet_raw_data.sb[byte_index]; info_packet->valid = true; } static void set_vendor_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { /* SPD info packet for FreeSync */ /* Check if Freesync is supported. Return if false. If true, * set the corresponding bit in the info packet */ if (!stream->vsp_infopacket.valid) return; *info_packet = stream->vsp_infopacket; } static void set_spd_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { /* SPD info packet for FreeSync */ /* Check if Freesync is supported. Return if false. If true, * set the corresponding bit in the info packet */ if (!stream->vrr_infopacket.valid) return; *info_packet = stream->vrr_infopacket; } static void set_hdr_static_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { /* HDR Static Metadata info packet for HDR10 */ if (!stream->hdr_static_metadata.valid || stream->use_dynamic_meta) return; *info_packet = stream->hdr_static_metadata; } static void set_vsc_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { if (!stream->vsc_infopacket.valid) return; *info_packet = stream->vsc_infopacket; } static void set_hfvs_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { if (!stream->hfvsif_infopacket.valid) return; *info_packet = stream->hfvsif_infopacket; } static void adaptive_sync_override_dp_info_packets_sdp_line_num( const struct dc_crtc_timing *timing, struct enc_sdp_line_num *sdp_line_num, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param) { uint32_t asic_blank_start = 0; uint32_t asic_blank_end = 0; uint32_t v_update = 0; const struct dc_crtc_timing *tg = timing; /* blank_start = frame end - front porch */ asic_blank_start = tg->v_total - tg->v_front_porch; /* blank_end = blank_start - active */ asic_blank_end = (asic_blank_start - tg->v_border_bottom - tg->v_addressable - tg->v_border_top); if (pipe_dlg_param->vstartup_start > asic_blank_end) { v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end)); sdp_line_num->adaptive_sync_line_num_valid = true; sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1); } else { sdp_line_num->adaptive_sync_line_num_valid = false; sdp_line_num->adaptive_sync_line_num = 0; } } static void set_adaptive_sync_info_packet( struct dc_info_packet *info_packet, const struct dc_stream_state *stream, struct encoder_info_frame *info_frame, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param) { if (!stream->adaptive_sync_infopacket.valid) return; adaptive_sync_override_dp_info_packets_sdp_line_num( &stream->timing, &info_frame->sdp_line_num, pipe_dlg_param); *info_packet = stream->adaptive_sync_infopacket; } static void set_vtem_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { if (!stream->vtem_infopacket.valid) return; *info_packet = stream->vtem_infopacket; } void dc_resource_state_destruct(struct dc_state *context) { int i, j; for (i = 0; i < context->stream_count; i++) { for (j = 0; j < context->stream_status[i].plane_count; j++) dc_plane_state_release( context->stream_status[i].plane_states[j]); context->stream_status[i].plane_count = 0; dc_stream_release(context->streams[i]); context->streams[i] = NULL; } context->stream_count = 0; } void dc_resource_state_copy_construct( const struct dc_state *src_ctx, struct dc_state *dst_ctx) { int i, j; struct kref refcount = dst_ctx->refcount; *dst_ctx = *src_ctx; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i]; if (cur_pipe->top_pipe) cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; if (cur_pipe->bottom_pipe) cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; if (cur_pipe->next_odm_pipe) cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; if (cur_pipe->prev_odm_pipe) cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; } for (i = 0; i < dst_ctx->stream_count; i++) { dc_stream_retain(dst_ctx->streams[i]); for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++) dc_plane_state_retain( dst_ctx->stream_status[i].plane_states[j]); } /* context refcount should not be overridden */ dst_ctx->refcount = refcount; } struct clock_source *dc_resource_find_first_free_pll( struct resource_context *res_ctx, const struct resource_pool *pool) { int i; for (i = 0; i < pool->clk_src_count; ++i) { if (res_ctx->clock_source_ref_count[i] == 0) return pool->clock_sources[i]; } return NULL; } void resource_build_info_frame(struct pipe_ctx *pipe_ctx) { enum signal_type signal = SIGNAL_TYPE_NONE; struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame; /* default all packets to invalid */ info->avi.valid = false; info->gamut.valid = false; info->vendor.valid = false; info->spd.valid = false; info->hdrsmd.valid = false; info->vsc.valid = false; info->hfvsif.valid = false; info->vtem.valid = false; info->adaptive_sync.valid = false; signal = pipe_ctx->stream->signal; /* HDMi and DP have different info packets*/ if (dc_is_hdmi_signal(signal)) { set_avi_info_frame(&info->avi, pipe_ctx); set_vendor_info_packet(&info->vendor, pipe_ctx->stream); set_hfvs_info_packet(&info->hfvsif, pipe_ctx->stream); set_vtem_info_packet(&info->vtem, pipe_ctx->stream); set_spd_info_packet(&info->spd, pipe_ctx->stream); set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream); } else if (dc_is_dp_signal(signal)) { set_vsc_info_packet(&info->vsc, pipe_ctx->stream); set_spd_info_packet(&info->spd, pipe_ctx->stream); set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream); set_adaptive_sync_info_packet(&info->adaptive_sync, pipe_ctx->stream, info, &pipe_ctx->pipe_dlg_param); } patch_gamut_packet_checksum(&info->gamut); } enum dc_status resource_map_clock_resources( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { /* acquire new resources */ const struct resource_pool *pool = dc->res_pool; struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream( &context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; if (dc_is_dp_signal(pipe_ctx->stream->signal) || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL) pipe_ctx->clock_source = pool->dp_clock_source; else { pipe_ctx->clock_source = NULL; if (!dc->config.disable_disp_pll_sharing) pipe_ctx->clock_source = resource_find_used_clk_src_for_sharing( &context->res_ctx, pipe_ctx); if (pipe_ctx->clock_source == NULL) pipe_ctx->clock_source = dc_resource_find_first_free_pll( &context->res_ctx, pool); } if (pipe_ctx->clock_source == NULL) return DC_NO_CLOCK_SOURCE_RESOURCE; resource_reference_clock_source( &context->res_ctx, pool, pipe_ctx->clock_source); return DC_OK; } /* * Note: We need to disable output if clock sources change, * since bios does optimization and doesn't apply if changing * PHY when not already disabled. */ bool pipe_need_reprogram( struct pipe_ctx *pipe_ctx_old, struct pipe_ctx *pipe_ctx) { if (!pipe_ctx_old->stream) return false; if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink) return true; if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal) return true; if (pipe_ctx_old->stream_res.audio != pipe_ctx->stream_res.audio) return true; if (pipe_ctx_old->clock_source != pipe_ctx->clock_source && pipe_ctx_old->stream != pipe_ctx->stream) return true; if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc) return true; if (dc_is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream)) return true; if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off) return true; if (false == pipe_ctx_old->stream->link->link_state_valid && false == pipe_ctx_old->stream->dpms_off) return true; if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc) return true; if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc) return true; if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc) return true; /* DIG link encoder resource assignment for stream changed. */ if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) { bool need_reprogram = false; struct dc *dc = pipe_ctx_old->stream->ctx->dc; struct link_encoder *link_enc_prev = link_enc_cfg_get_link_enc_used_by_stream_current(dc, pipe_ctx_old->stream); if (link_enc_prev != pipe_ctx->stream->link_enc) need_reprogram = true; return need_reprogram; } return false; } void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, struct bit_depth_reduction_params *fmt_bit_depth) { enum dc_dither_option option = stream->dither_option; enum dc_pixel_encoding pixel_encoding = stream->timing.pixel_encoding; memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth)); if (option == DITHER_OPTION_DEFAULT) { switch (stream->timing.display_color_depth) { case COLOR_DEPTH_666: option = DITHER_OPTION_SPATIAL6; break; case COLOR_DEPTH_888: option = DITHER_OPTION_SPATIAL8; break; case COLOR_DEPTH_101010: option = DITHER_OPTION_SPATIAL10; break; default: option = DITHER_OPTION_DISABLE; } } if (option == DITHER_OPTION_DISABLE) return; if (option == DITHER_OPTION_TRUN6) { fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; fmt_bit_depth->flags.TRUNCATE_DEPTH = 0; } else if (option == DITHER_OPTION_TRUN8 || option == DITHER_OPTION_TRUN8_SPATIAL6 || option == DITHER_OPTION_TRUN8_FM6) { fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; fmt_bit_depth->flags.TRUNCATE_DEPTH = 1; } else if (option == DITHER_OPTION_TRUN10 || option == DITHER_OPTION_TRUN10_SPATIAL6 || option == DITHER_OPTION_TRUN10_SPATIAL8 || option == DITHER_OPTION_TRUN10_FM8 || option == DITHER_OPTION_TRUN10_FM6 || option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; } /* special case - Formatter can only reduce by 4 bits at most. * When reducing from 12 to 6 bits, * HW recommends we use trunc with round mode * (if we did nothing, trunc to 10 bits would be used) * note that any 12->10 bit reduction is ignored prior to DCE8, * as the input was 10 bits. */ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || option == DITHER_OPTION_SPATIAL6 || option == DITHER_OPTION_FM6) { fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; fmt_bit_depth->flags.TRUNCATE_MODE = 1; } /* spatial dither * note that spatial modes 1-3 are never used */ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || option == DITHER_OPTION_SPATIAL6 || option == DITHER_OPTION_TRUN10_SPATIAL6 || option == DITHER_OPTION_TRUN8_SPATIAL6) { fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0; fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; fmt_bit_depth->flags.RGB_RANDOM = (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM || option == DITHER_OPTION_SPATIAL8 || option == DITHER_OPTION_SPATIAL8_FM6 || option == DITHER_OPTION_TRUN10_SPATIAL8 || option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1; fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; fmt_bit_depth->flags.RGB_RANDOM = (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM || option == DITHER_OPTION_SPATIAL10 || option == DITHER_OPTION_SPATIAL10_FM8 || option == DITHER_OPTION_SPATIAL10_FM6) { fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2; fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; fmt_bit_depth->flags.RGB_RANDOM = (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; } if (option == DITHER_OPTION_SPATIAL6 || option == DITHER_OPTION_SPATIAL8 || option == DITHER_OPTION_SPATIAL10) { fmt_bit_depth->flags.FRAME_RANDOM = 0; } else { fmt_bit_depth->flags.FRAME_RANDOM = 1; } ////////////////////// //// temporal dither ////////////////////// if (option == DITHER_OPTION_FM6 || option == DITHER_OPTION_SPATIAL8_FM6 || option == DITHER_OPTION_SPATIAL10_FM6 || option == DITHER_OPTION_TRUN10_FM6 || option == DITHER_OPTION_TRUN8_FM6 || option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0; } else if (option == DITHER_OPTION_FM8 || option == DITHER_OPTION_SPATIAL10_FM8 || option == DITHER_OPTION_TRUN10_FM8) { fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1; } else if (option == DITHER_OPTION_FM10) { fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2; } fmt_bit_depth->pixel_encoding = pixel_encoding; } enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) { struct dc_link *link = stream->link; struct timing_generator *tg = dc->res_pool->timing_generators[0]; enum dc_status res = DC_OK; calculate_phy_pix_clks(stream); if (!tg->funcs->validate_timing(tg, &stream->timing)) res = DC_FAIL_CONTROLLER_VALIDATE; if (res == DC_OK) { if (link->ep_type == DISPLAY_ENDPOINT_PHY && !link->link_enc->funcs->validate_output_with_stream( link->link_enc, stream)) res = DC_FAIL_ENC_VALIDATE; } /* TODO: validate audio ASIC caps, encoder */ if (res == DC_OK) res = dc->link_srv->validate_mode_timing(stream, link, &stream->timing); return res; } enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state) { enum dc_status res = DC_OK; /* check if surface has invalid dimensions */ if (plane_state->src_rect.width == 0 || plane_state->src_rect.height == 0 || plane_state->dst_rect.width == 0 || plane_state->dst_rect.height == 0) return DC_FAIL_SURFACE_VALIDATE; /* TODO For now validates pixel format only */ if (dc->res_pool->funcs->validate_plane) return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps); return res; } unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format) { switch (format) { case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: return 8; case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: return 12; case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: case SURFACE_PIXEL_FORMAT_GRPH_RGB565: case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: return 16; case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: case SURFACE_PIXEL_FORMAT_GRPH_RGBE: case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: return 32; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: return 64; default: ASSERT_CRITICAL(false); return -1; } } static unsigned int get_max_audio_sample_rate(struct audio_mode *modes) { if (modes) { if (modes->sample_rates.rate.RATE_192) return 192000; if (modes->sample_rates.rate.RATE_176_4) return 176400; if (modes->sample_rates.rate.RATE_96) return 96000; if (modes->sample_rates.rate.RATE_88_2) return 88200; if (modes->sample_rates.rate.RATE_48) return 48000; if (modes->sample_rates.rate.RATE_44_1) return 44100; if (modes->sample_rates.rate.RATE_32) return 32000; } /*original logic when no audio info*/ return 441000; } void get_audio_check(struct audio_info *aud_modes, struct audio_check *audio_chk) { unsigned int i; unsigned int max_sample_rate = 0; if (aud_modes) { audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/ audio_chk->max_audiosample_rate = 0; for (i = 0; i < aud_modes->mode_count; i++) { max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]); if (audio_chk->max_audiosample_rate < max_sample_rate) audio_chk->max_audiosample_rate = max_sample_rate; /*dts takes the same as type 2: AP = 0.25*/ } /*check which one take more bandwidth*/ if (audio_chk->max_audiosample_rate > 192000) audio_chk->audio_packet_type = 0x9;/*AP =1*/ audio_chk->acat = 0;/*not support*/ } } static struct hpo_dp_link_encoder *get_temp_hpo_dp_link_enc( const struct resource_context *res_ctx, const struct resource_pool *const pool, const struct dc_link *link) { struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL; int enc_index; enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, link); if (enc_index < 0) enc_index = find_free_hpo_dp_link_enc(res_ctx, pool); if (enc_index >= 0) hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index]; return hpo_dp_link_enc; } bool get_temp_dp_link_res(struct dc_link *link, struct link_resource *link_res, struct dc_link_settings *link_settings) { const struct dc *dc = link->dc; const struct resource_context *res_ctx = &dc->current_state->res_ctx; memset(link_res, 0, sizeof(*link_res)); if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx, dc->res_pool, link); if (!link_res->hpo_dp_link_enc) return false; } return true; } void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, struct dc_state *context) { int i, j; struct pipe_ctx *pipe_ctx_old, *pipe_ctx, *pipe_ctx_syncd; /* If pipe backend is reset, need to reset pipe syncd status */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (!resource_is_pipe_type(pipe_ctx_old, OTG_MASTER)) continue; if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { /* Reset all the syncd pipes from the disabled pipe */ for (j = 0; j < dc->res_pool->pipe_count; j++) { pipe_ctx_syncd = &context->res_ctx.pipe_ctx[j]; if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_syncd) == pipe_ctx_old->pipe_idx) || !IS_PIPE_SYNCD_VALID(pipe_ctx_syncd)) SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_syncd, j); } } } } void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, struct dc_state *context, uint8_t disabled_master_pipe_idx) { int i; struct pipe_ctx *pipe_ctx, *pipe_ctx_check; pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx]; if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) || !IS_PIPE_SYNCD_VALID(pipe_ctx)) SET_PIPE_SYNCD_TO_PIPE(pipe_ctx, disabled_master_pipe_idx); /* for the pipe disabled, check if any slave pipe exists and assert */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe_ctx_check = &context->res_ctx.pipe_ctx[i]; if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) && IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) { struct pipe_ctx *first_pipe = pipe_ctx_check; while (first_pipe->prev_odm_pipe) first_pipe = first_pipe->prev_odm_pipe; /* When ODM combine is enabled, this case is expected. If the disabled pipe * is part of the ODM tree, then we should not print an error. * */ if (first_pipe->pipe_idx == disabled_master_pipe_idx) continue; DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", i, disabled_master_pipe_idx); } } } void reset_sync_context_for_pipe(const struct dc *dc, struct dc_state *context, uint8_t pipe_idx) { int i; struct pipe_ctx *pipe_ctx_reset; /* reset the otg sync context for the pipe and its slave pipes if any */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe_ctx_reset = &context->res_ctx.pipe_ctx[i]; if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) && IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx)) SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i); } } uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter) { /* TODO - get transmitter to phy idx mapping from DMUB */ uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A; if (dc->ctx->dce_version == DCN_VERSION_3_1 && dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { switch (transmitter) { case TRANSMITTER_UNIPHY_A: phy_idx = 0; break; case TRANSMITTER_UNIPHY_B: phy_idx = 1; break; case TRANSMITTER_UNIPHY_C: phy_idx = 5; break; case TRANSMITTER_UNIPHY_D: phy_idx = 6; break; case TRANSMITTER_UNIPHY_E: phy_idx = 4; break; default: phy_idx = 0; break; } } return phy_idx; } const struct link_hwss *get_link_hwss(const struct dc_link *link, const struct link_resource *link_res) { /* Link_hwss is only accessible by getter function instead of accessing * by pointers in dc with the intent to protect against breaking polymorphism. */ if (can_use_hpo_dp_link_hwss(link, link_res)) /* TODO: some assumes that if decided link settings is 128b/132b * channel coding format hpo_dp_link_enc should be used. * Others believe that if hpo_dp_link_enc is available in link * resource then hpo_dp_link_enc must be used. This bound between * hpo_dp_link_enc != NULL and decided link settings is loosely coupled * with a premise that both hpo_dp_link_enc pointer and decided link * settings are determined based on single policy function like * "decide_link_settings" from upper layer. This "convention" * cannot be maintained and enforced at current level. * Therefore a refactor is due so we can enforce a strong bound * between those two parameters at this level. * * To put it simple, we want to make enforcement at low level so that * we will not return link hwss if caller plans to do 8b/10b * with an hpo encoder. Or we can return a very dummy one that doesn't * do work for all functions */ return (requires_fixed_vs_pe_retimer_hpo_link_hwss(link) ? get_hpo_fixed_vs_pe_retimer_dp_link_hwss() : get_hpo_dp_link_hwss()); else if (can_use_dpia_link_hwss(link, link_res)) return get_dpia_link_hwss(); else if (can_use_dio_link_hwss(link, link_res)) return (requires_fixed_vs_pe_retimer_dio_link_hwss(link)) ? get_dio_fixed_vs_pe_retimer_link_hwss() : get_dio_link_hwss(); else return get_virtual_link_hwss(); } bool is_h_timing_divisible_by_2(struct dc_stream_state *stream) { bool divisible = false; uint16_t h_blank_start = 0; uint16_t h_blank_end = 0; if (stream) { h_blank_start = stream->timing.h_total - stream->timing.h_front_porch; h_blank_end = h_blank_start - stream->timing.h_addressable; /* HTOTAL, Hblank start/end, and Hsync start/end all must be * divisible by 2 in order for the horizontal timing params * to be considered divisible by 2. Hsync start is always 0. */ divisible = (stream->timing.h_total % 2 == 0) && (h_blank_start % 2 == 0) && (h_blank_end % 2 == 0) && (stream->timing.h_sync_width % 2 == 0); } return divisible; } bool dc_resource_acquire_secondary_pipe_for_mpc_odm( const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm) { int pipe_idx = sec_pipe->pipe_idx; struct pipe_ctx *sec_top, *sec_bottom, *sec_next, *sec_prev; const struct resource_pool *pool = dc->res_pool; sec_top = sec_pipe->top_pipe; sec_bottom = sec_pipe->bottom_pipe; sec_next = sec_pipe->next_odm_pipe; sec_prev = sec_pipe->prev_odm_pipe; *sec_pipe = *pri_pipe; sec_pipe->top_pipe = sec_top; sec_pipe->bottom_pipe = sec_bottom; sec_pipe->next_odm_pipe = sec_next; sec_pipe->prev_odm_pipe = sec_prev; sec_pipe->pipe_idx = pipe_idx; sec_pipe->plane_res.mi = pool->mis[pipe_idx]; sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; sec_pipe->stream_res.dsc = NULL; if (odm) { if (!sec_pipe->top_pipe) sec_pipe->stream_res.opp = pool->opps[pipe_idx]; else sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; if (sec_pipe->stream->timing.flags.DSC == 1) { #if defined(CONFIG_DRM_AMD_DC_FP) dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); #endif ASSERT(sec_pipe->stream_res.dsc); if (sec_pipe->stream_res.dsc == NULL) return false; } #if defined(CONFIG_DRM_AMD_DC_FP) dcn20_build_mapped_resource(dc, state, sec_pipe->stream); #endif } return true; } enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { if (dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) { pipe_ctx->stream_res.hpo_dp_stream_enc = find_first_free_match_hpo_dp_stream_enc_for_link( &context->res_ctx, dc->res_pool, pipe_ctx->stream); if (!pipe_ctx->stream_res.hpo_dp_stream_enc) return DC_NO_STREAM_ENC_RESOURCE; update_hpo_dp_stream_engine_usage( &context->res_ctx, dc->res_pool, pipe_ctx->stream_res.hpo_dp_stream_enc, true); } if (pipe_ctx->link_res.hpo_dp_link_enc == NULL) { if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, dc->res_pool, pipe_ctx, pipe_ctx->stream)) return DC_NO_LINK_ENC_RESOURCE; } } else { if (pipe_ctx->stream_res.hpo_dp_stream_enc) { update_hpo_dp_stream_engine_usage( &context->res_ctx, dc->res_pool, pipe_ctx->stream_res.hpo_dp_stream_enc, false); pipe_ctx->stream_res.hpo_dp_stream_enc = NULL; } if (pipe_ctx->link_res.hpo_dp_link_enc) remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream); } return DC_OK; }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* DC interface (public) */ #include "dm_services.h" #include "dc.h" /* DC core (private) */ #include "core_types.h" #include "transform.h" #include "dpp.h" /******************************************************************************* * Private functions ******************************************************************************/ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) { plane_state->ctx = ctx; plane_state->gamma_correction = dc_create_gamma(); if (plane_state->gamma_correction != NULL) plane_state->gamma_correction->is_identity = true; plane_state->in_transfer_func = dc_create_transfer_func(); if (plane_state->in_transfer_func != NULL) { plane_state->in_transfer_func->type = TF_TYPE_BYPASS; } plane_state->in_shaper_func = dc_create_transfer_func(); if (plane_state->in_shaper_func != NULL) { plane_state->in_shaper_func->type = TF_TYPE_BYPASS; } plane_state->lut3d_func = dc_create_3dlut_func(); plane_state->blend_tf = dc_create_transfer_func(); if (plane_state->blend_tf != NULL) { plane_state->blend_tf->type = TF_TYPE_BYPASS; } plane_state->pre_multiplied_alpha = true; } static void dc_plane_destruct(struct dc_plane_state *plane_state) { if (plane_state->gamma_correction != NULL) { dc_gamma_release(&plane_state->gamma_correction); } if (plane_state->in_transfer_func != NULL) { dc_transfer_func_release( plane_state->in_transfer_func); plane_state->in_transfer_func = NULL; } if (plane_state->in_shaper_func != NULL) { dc_transfer_func_release( plane_state->in_shaper_func); plane_state->in_shaper_func = NULL; } if (plane_state->lut3d_func != NULL) { dc_3dlut_func_release( plane_state->lut3d_func); plane_state->lut3d_func = NULL; } if (plane_state->blend_tf != NULL) { dc_transfer_func_release( plane_state->blend_tf); plane_state->blend_tf = NULL; } } /******************************************************************************* * Public functions ******************************************************************************/ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, uint32_t controller_id) { plane_state->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1; /*register_flip_interrupt(surface);*/ } struct dc_plane_state *dc_create_plane_state(struct dc *dc) { struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), GFP_KERNEL); if (NULL == plane_state) return NULL; kref_init(&plane_state->refcount); dc_plane_construct(dc->ctx, plane_state); return plane_state; } /* ***************************************************************************** * Function: dc_plane_get_status * * @brief * Looks up the pipe context of plane_state and updates the pending status * of the pipe context. Then returns plane_state->status * * @param [in] plane_state: pointer to the plane_state to get the status of ***************************************************************************** */ const struct dc_plane_status *dc_plane_get_status( const struct dc_plane_state *plane_state) { const struct dc_plane_status *plane_status; struct dc *dc; int i; if (!plane_state || !plane_state->ctx || !plane_state->ctx->dc) { ASSERT(0); return NULL; /* remove this if above assert never hit */ } plane_status = &plane_state->status; dc = plane_state->ctx->dc; if (dc->current_state == NULL) return NULL; /* Find the current plane state and set its pending bit to false */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state != plane_state) continue; pipe_ctx->plane_state->status.is_flip_pending = false; break; } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state != plane_state) continue; dc->hwss.update_pending_status(pipe_ctx); } return plane_status; } void dc_plane_state_retain(struct dc_plane_state *plane_state) { kref_get(&plane_state->refcount); } static void dc_plane_state_free(struct kref *kref) { struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); dc_plane_destruct(plane_state); kvfree(plane_state); } void dc_plane_state_release(struct dc_plane_state *plane_state) { kref_put(&plane_state->refcount, dc_plane_state_free); } void dc_gamma_retain(struct dc_gamma *gamma) { kref_get(&gamma->refcount); } static void dc_gamma_free(struct kref *kref) { struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount); kvfree(gamma); } void dc_gamma_release(struct dc_gamma **gamma) { kref_put(&(*gamma)->refcount, dc_gamma_free); *gamma = NULL; } struct dc_gamma *dc_create_gamma(void) { struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL); if (gamma == NULL) goto alloc_fail; kref_init(&gamma->refcount); return gamma; alloc_fail: return NULL; } void dc_transfer_func_retain(struct dc_transfer_func *tf) { kref_get(&tf->refcount); } static void dc_transfer_func_free(struct kref *kref) { struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount); kvfree(tf); } void dc_transfer_func_release(struct dc_transfer_func *tf) { kref_put(&tf->refcount, dc_transfer_func_free); } struct dc_transfer_func *dc_create_transfer_func(void) { struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL); if (tf == NULL) goto alloc_fail; kref_init(&tf->refcount); return tf; alloc_fail: return NULL; } static void dc_3dlut_func_free(struct kref *kref) { struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount); kvfree(lut); } struct dc_3dlut *dc_create_3dlut_func(void) { struct dc_3dlut *lut = kvzalloc(sizeof(*lut), GFP_KERNEL); if (lut == NULL) goto alloc_fail; kref_init(&lut->refcount); lut->state.raw = 0; return lut; alloc_fail: return NULL; } void dc_3dlut_func_release(struct dc_3dlut *lut) { kref_put(&lut->refcount, dc_3dlut_func_free); } void dc_3dlut_func_retain(struct dc_3dlut *lut) { kref_get(&lut->refcount); }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ /* FILE POLICY AND INTENDED USAGE: * This file provides single entrance to link functionality declared in dc * public headers. The file is intended to be used as a thin translation layer * that directly calls link internal functions without adding new functional * behavior. * * When exporting a new link related dc function, add function declaration in * dc.h with detail interface documentation, then add function implementation * in this file which calls link functions. */ #include "link.h" #include "dce/dce_i2c.h" struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index) { return dc->links[link_index]; } void dc_get_edp_links(const struct dc *dc, struct dc_link **edp_links, int *edp_num) { int i; *edp_num = 0; for (i = 0; i < dc->link_count; i++) { // report any eDP links, even unconnected DDI's if (!dc->links[i]) continue; if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) { edp_links[*edp_num] = dc->links[i]; if (++(*edp_num) == MAX_NUM_EDP) return; } } } bool dc_get_edp_link_panel_inst(const struct dc *dc, const struct dc_link *link, unsigned int *inst_out) { struct dc_link *edp_links[MAX_NUM_EDP]; int edp_num, i; *inst_out = 0; if (link->connector_signal != SIGNAL_TYPE_EDP) return false; dc_get_edp_links(dc, edp_links, &edp_num); for (i = 0; i < edp_num; i++) { if (link == edp_links[i]) break; (*inst_out)++; } return true; } bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) { return link->dc->link_srv->detect_link(link, reason); } bool dc_link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type) { return link->dc->link_srv->detect_connection_type(link, type); } const struct dc_link_status *dc_link_get_status(const struct dc_link *link) { return link->dc->link_srv->get_status(link); } /* return true if the connected receiver supports the hdcp version */ bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal) { return link->dc->link_srv->is_hdcp1x_supported(link, signal); } bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal) { return link->dc->link_srv->is_hdcp2x_supported(link, signal); } void dc_link_clear_dprx_states(struct dc_link *link) { link->dc->link_srv->clear_dprx_states(link); } bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link) { return link->dc->link_srv->reset_cur_dp_mst_topology(link); } uint32_t dc_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_settings) { return link->dc->link_srv->dp_link_bandwidth_kbps(link, link_settings); } void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) { dc->link_srv->get_cur_res_map(dc, map); } void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) { dc->link_srv->restore_res_map(dc, map); } bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx) { struct dc_link *link = pipe_ctx->stream->link; return link->dc->link_srv->update_dsc_config(pipe_ctx); } bool dc_is_oem_i2c_device_present( struct dc *dc, size_t slave_address) { if (dc->res_pool->oem_device) return dce_i2c_oem_device_present( dc->res_pool, dc->res_pool->oem_device, slave_address); return false; } bool dc_submit_i2c( struct dc *dc, uint32_t link_index, struct i2c_command *cmd) { struct dc_link *link = dc->links[link_index]; struct ddc_service *ddc = link->ddc; return dce_i2c_submit_command( dc->res_pool, ddc->ddc_pin, cmd); } bool dc_submit_i2c_oem( struct dc *dc, struct i2c_command *cmd) { struct ddc_service *ddc = dc->res_pool->oem_device; if (ddc) return dce_i2c_submit_command( dc->res_pool, ddc->ddc_pin, cmd); return false; } void dc_link_dp_handle_automated_test(struct dc_link *link) { link->dc->link_srv->dp_handle_automated_test(link); } bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) { return link->dc->link_srv->dp_set_test_pattern(link, test_pattern, test_pattern_color_space, p_link_settings, p_custom_pattern, cust_pattern_size); } void dc_link_set_drive_settings(struct dc *dc, struct link_training_settings *lt_settings, struct dc_link *link) { struct link_resource link_res; dc->link_srv->get_cur_link_res(link, &link_res); dc->link_srv->dp_set_drive_settings(link, &link_res, lt_settings); } void dc_link_set_preferred_link_settings(struct dc *dc, struct dc_link_settings *link_setting, struct dc_link *link) { dc->link_srv->dp_set_preferred_link_settings(dc, link_setting, link); } void dc_link_set_preferred_training_settings(struct dc *dc, struct dc_link_settings *link_setting, struct dc_link_training_overrides *lt_overrides, struct dc_link *link, bool skip_immediate_retrain) { dc->link_srv->dp_set_preferred_training_settings(dc, link_setting, lt_overrides, link, skip_immediate_retrain); } bool dc_dp_trace_is_initialized(struct dc_link *link) { return link->dc->link_srv->dp_trace_is_initialized(link); } void dc_dp_trace_set_is_logged_flag(struct dc_link *link, bool in_detection, bool is_logged) { link->dc->link_srv->dp_trace_set_is_logged_flag(link, in_detection, is_logged); } bool dc_dp_trace_is_logged(struct dc_link *link, bool in_detection) { return link->dc->link_srv->dp_trace_is_logged(link, in_detection); } unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link, bool in_detection) { return link->dc->link_srv->dp_trace_get_lt_end_timestamp(link, in_detection); } const struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link, bool in_detection) { return link->dc->link_srv->dp_trace_get_lt_counts(link, in_detection); } unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link) { return link->dc->link_srv->dp_trace_get_link_loss_count(link); } struct dc_sink *dc_link_add_remote_sink( struct dc_link *link, const uint8_t *edid, int len, struct dc_sink_init_data *init_data) { return link->dc->link_srv->add_remote_sink(link, edid, len, init_data); } void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) { link->dc->link_srv->remove_remote_sink(link, sink); } int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result) { const struct dc *dc = ddc->link->dc; return dc->link_srv->aux_transfer_raw( ddc, payload, operation_result); } uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(const struct dc *dc, uint8_t bw) { return dc->link_srv->bw_kbps_from_raw_frl_link_rate_data(bw); } bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) { return link->dc->link_srv->edp_decide_link_settings(link, link_setting, req_bw); } bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) { return link->dc->link_srv->dp_get_max_link_enc_cap(link, max_link_enc_cap); } enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format( const struct dc_link *link) { return link->dc->link_srv->mst_decide_link_encoding_format(link); } const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link) { return link->dc->link_srv->dp_get_verified_link_cap(link); } enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link) { if (dc_is_dp_signal(link->connector_signal)) { if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE && link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE) return DC_LINK_ENCODING_HDMI_TMDS; else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == DP_8b_10b_ENCODING) return DC_LINK_ENCODING_DP_8b_10b; else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) return DC_LINK_ENCODING_DP_128b_132b; } else if (dc_is_hdmi_signal(link->connector_signal)) { } return DC_LINK_ENCODING_UNSPECIFIED; } bool dc_link_is_dp_sink_present(struct dc_link *link) { return link->dc->link_srv->dp_is_sink_present(link); } bool dc_link_is_fec_supported(const struct dc_link *link) { return link->dc->link_srv->dp_is_fec_supported(link); } void dc_link_overwrite_extended_receiver_cap( struct dc_link *link) { link->dc->link_srv->dp_overwrite_extended_receiver_cap(link); } bool dc_link_should_enable_fec(const struct dc_link *link) { return link->dc->link_srv->dp_should_enable_fec(link); } int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link( struct dc_link *link, int peak_bw) { return link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw); } void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result) { link->dc->link_srv->dpia_handle_bw_alloc_response(link, bw, result); } bool dc_link_check_link_loss_status( struct dc_link *link, union hpd_irq_data *hpd_irq_dpcd_data) { return link->dc->link_srv->dp_parse_link_loss_status(link, hpd_irq_dpcd_data); } bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) { return link->dc->link_srv->dp_should_allow_hpd_rx_irq(link); } void dc_link_dp_handle_link_loss(struct dc_link *link) { link->dc->link_srv->dp_handle_link_loss(link); } enum dc_status dc_link_dp_read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) { return link->dc->link_srv->dp_read_hpd_rx_irq_data(link, irq_data); } bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, bool defer_handling, bool *has_left_work) { return link->dc->link_srv->dp_handle_hpd_rx_irq(link, out_hpd_irq_dpcd_data, out_link_loss, defer_handling, has_left_work); } void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on) { link->dc->link_srv->dpcd_write_rx_power_ctrl(link, on); } enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting) { return link->dc->link_srv->dp_decide_lttpr_mode(link, link_setting); } void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) { link->dc->link_srv->edp_panel_backlight_power_on(link, wait_for_hpd); } int dc_link_get_backlight_level(const struct dc_link *link) { return link->dc->link_srv->edp_get_backlight_level(link); } bool dc_link_get_backlight_level_nits(struct dc_link *link, uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak) { return link->dc->link_srv->edp_get_backlight_level_nits(link, backlight_millinits_avg, backlight_millinits_peak); } bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { return link->dc->link_srv->edp_set_backlight_level(link, backlight_pwm_u16_16, frame_ramp); } bool dc_link_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, uint32_t transition_time_in_ms) { return link->dc->link_srv->edp_set_backlight_level_nits(link, isHDR, backlight_millinits, transition_time_in_ms); } int dc_link_get_target_backlight_pwm(const struct dc_link *link) { return link->dc->link_srv->edp_get_target_backlight_pwm(link); } bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state) { return link->dc->link_srv->edp_get_psr_state(link, state); } bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active, bool wait, bool force_static, const unsigned int *power_opts) { return link->dc->link_srv->edp_set_psr_allow_active(link, allow_active, wait, force_static, power_opts); } bool dc_link_setup_psr(struct dc_link *link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context) { return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context); } bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state) { return link->dc->link_srv->edp_get_replay_state(link, state); } bool dc_link_wait_for_t12(struct dc_link *link) { return link->dc->link_srv->edp_wait_for_t12(link); } bool dc_link_get_hpd_state(struct dc_link *link) { return link->dc->link_srv->get_hpd_state(link); } void dc_link_enable_hpd(const struct dc_link *link) { link->dc->link_srv->enable_hpd(link); } void dc_link_disable_hpd(const struct dc_link *link) { link->dc->link_srv->disable_hpd(link); } void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) { link->dc->link_srv->enable_hpd_filter(link, enable); } bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count) { return dc->link_srv->validate_dpia_bandwidth(streams, count); }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "core_types.h" #include "timing_generator.h" #include "hw_sequencer.h" #include "hw_sequencer_private.h" #include "basics/dc_common.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) /* used as index in array of black_color_format */ enum black_color_format { BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, BLACK_COLOR_FORMAT_RGB_LIMITED, BLACK_COLOR_FORMAT_YUV_TV, BLACK_COLOR_FORMAT_YUV_CV, BLACK_COLOR_FORMAT_YUV_SUPER_AA, BLACK_COLOR_FORMAT_DEBUG, }; enum dc_color_space_type { COLOR_SPACE_RGB_TYPE, COLOR_SPACE_RGB_LIMITED_TYPE, COLOR_SPACE_YCBCR601_TYPE, COLOR_SPACE_YCBCR709_TYPE, COLOR_SPACE_YCBCR2020_TYPE, COLOR_SPACE_YCBCR601_LIMITED_TYPE, COLOR_SPACE_YCBCR709_LIMITED_TYPE, COLOR_SPACE_YCBCR709_BLACK_TYPE, }; static const struct tg_color black_color_format[] = { /* BlackColorFormat_RGB_FullRange */ {0, 0, 0}, /* BlackColorFormat_RGB_Limited */ {0x40, 0x40, 0x40}, /* BlackColorFormat_YUV_TV */ {0x200, 0x40, 0x200}, /* BlackColorFormat_YUV_CV */ {0x1f4, 0x40, 0x1f4}, /* BlackColorFormat_YUV_SuperAA */ {0x1a2, 0x20, 0x1a2}, /* visual confirm debug */ {0xff, 0xff, 0}, }; struct out_csc_color_matrix_type { enum dc_color_space_type color_space_type; uint16_t regval[12]; }; static const struct out_csc_color_matrix_type output_csc_matrix[] = { { COLOR_SPACE_RGB_TYPE, { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, { COLOR_SPACE_RGB_LIMITED_TYPE, { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} }, { COLOR_SPACE_YCBCR601_TYPE, { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45, 0xF6B7, 0xE04, 0x1004} }, { COLOR_SPACE_YCBCR709_TYPE, { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA, 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} }, /* TODO: correct values below */ { COLOR_SPACE_YCBCR601_LIMITED_TYPE, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991, 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} }, { COLOR_SPACE_YCBCR709_LIMITED_TYPE, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3, 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }, { COLOR_SPACE_YCBCR2020_TYPE, { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2, 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} }, { COLOR_SPACE_YCBCR709_BLACK_TYPE, { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} }, }; static bool is_rgb_type( enum dc_color_space color_space) { bool ret = false; if (color_space == COLOR_SPACE_SRGB || color_space == COLOR_SPACE_XR_RGB || color_space == COLOR_SPACE_MSREF_SCRGB || color_space == COLOR_SPACE_2020_RGB_FULLRANGE || color_space == COLOR_SPACE_ADOBERGB || color_space == COLOR_SPACE_DCIP3 || color_space == COLOR_SPACE_DOLBYVISION) ret = true; return ret; } static bool is_rgb_limited_type( enum dc_color_space color_space) { bool ret = false; if (color_space == COLOR_SPACE_SRGB_LIMITED || color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) ret = true; return ret; } static bool is_ycbcr601_type( enum dc_color_space color_space) { bool ret = false; if (color_space == COLOR_SPACE_YCBCR601 || color_space == COLOR_SPACE_XV_YCC_601) ret = true; return ret; } static bool is_ycbcr601_limited_type( enum dc_color_space color_space) { bool ret = false; if (color_space == COLOR_SPACE_YCBCR601_LIMITED) ret = true; return ret; } static bool is_ycbcr709_type( enum dc_color_space color_space) { bool ret = false; if (color_space == COLOR_SPACE_YCBCR709 || color_space == COLOR_SPACE_XV_YCC_709) ret = true; return ret; } static bool is_ycbcr2020_type( enum dc_color_space color_space) { bool ret = false; if (color_space == COLOR_SPACE_2020_YCBCR) ret = true; return ret; } static bool is_ycbcr709_limited_type( enum dc_color_space color_space) { bool ret = false; if (color_space == COLOR_SPACE_YCBCR709_LIMITED) ret = true; return ret; } static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space) { enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE; if (is_rgb_type(color_space)) type = COLOR_SPACE_RGB_TYPE; else if (is_rgb_limited_type(color_space)) type = COLOR_SPACE_RGB_LIMITED_TYPE; else if (is_ycbcr601_type(color_space)) type = COLOR_SPACE_YCBCR601_TYPE; else if (is_ycbcr709_type(color_space)) type = COLOR_SPACE_YCBCR709_TYPE; else if (is_ycbcr601_limited_type(color_space)) type = COLOR_SPACE_YCBCR601_LIMITED_TYPE; else if (is_ycbcr709_limited_type(color_space)) type = COLOR_SPACE_YCBCR709_LIMITED_TYPE; else if (is_ycbcr2020_type(color_space)) type = COLOR_SPACE_YCBCR2020_TYPE; else if (color_space == COLOR_SPACE_YCBCR709) type = COLOR_SPACE_YCBCR709_BLACK_TYPE; else if (color_space == COLOR_SPACE_YCBCR709_BLACK) type = COLOR_SPACE_YCBCR709_BLACK_TYPE; return type; } const uint16_t *find_color_matrix(enum dc_color_space color_space, uint32_t *array_size) { int i; enum dc_color_space_type type; const uint16_t *val = NULL; int arr_size = NUM_ELEMENTS(output_csc_matrix); type = get_color_space_type(color_space); for (i = 0; i < arr_size; i++) if (output_csc_matrix[i].color_space_type == type) { val = output_csc_matrix[i].regval; *array_size = 12; break; } return val; } void color_space_to_black_color( const struct dc *dc, enum dc_color_space colorspace, struct tg_color *black_color) { switch (colorspace) { case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_BLACK: case COLOR_SPACE_YCBCR601_LIMITED: case COLOR_SPACE_YCBCR709_LIMITED: case COLOR_SPACE_2020_YCBCR: *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV]; break; case COLOR_SPACE_SRGB_LIMITED: *black_color = black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED]; break; /** * Remove default and add case for all color space * so when we forget to add new color space * compiler will give a warning */ case COLOR_SPACE_UNKNOWN: case COLOR_SPACE_SRGB: case COLOR_SPACE_XR_RGB: case COLOR_SPACE_MSREF_SCRGB: case COLOR_SPACE_XV_YCC_709: case COLOR_SPACE_XV_YCC_601: case COLOR_SPACE_2020_RGB_FULLRANGE: case COLOR_SPACE_2020_RGB_LIMITEDRANGE: case COLOR_SPACE_ADOBERGB: case COLOR_SPACE_DCIP3: case COLOR_SPACE_DISPLAYNATIVE: case COLOR_SPACE_DOLBYVISION: case COLOR_SPACE_APPCTRL: case COLOR_SPACE_CUSTOMPOINTS: /* fefault is sRGB black (full range). */ *black_color = black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE]; /* default is sRGB black 0. */ break; } } bool hwss_wait_for_blank_complete( struct timing_generator *tg) { int counter; /* Not applicable if the pipe is not primary, save 300ms of boot time */ if (!tg->funcs->is_blanked) return true; for (counter = 0; counter < 100; counter++) { if (tg->funcs->is_blanked(tg)) break; msleep(1); } if (counter == 100) { dm_error("DC: failed to blank crtc!\n"); return false; } return true; } void get_mpctree_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) { const struct tg_color pipe_colors[6] = { {MAX_TG_COLOR_VALUE, 0, 0}, /* red */ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, /* orange */ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */ {0, MAX_TG_COLOR_VALUE, 0}, /* green */ {0, 0, MAX_TG_COLOR_VALUE}, /* blue */ {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, /* purple */ }; struct pipe_ctx *top_pipe = pipe_ctx; while (top_pipe->top_pipe) top_pipe = top_pipe->top_pipe; *color = pipe_colors[top_pipe->pipe_idx]; } void get_surface_visual_confirm_color( const struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; switch (pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB8888: /* set border color to red */ color->color_r_cr = color_value; if (pipe_ctx->plane_state->layer_index > 0) { /* set border color to pink */ color->color_b_cb = color_value; color->color_g_y = color_value * 0.5; } break; case PIXEL_FORMAT_ARGB2101010: /* set border color to blue */ color->color_b_cb = color_value; if (pipe_ctx->plane_state->layer_index > 0) { /* set border color to cyan */ color->color_g_y = color_value; } break; case PIXEL_FORMAT_420BPP8: /* set border color to green */ color->color_g_y = color_value; break; case PIXEL_FORMAT_420BPP10: /* set border color to yellow */ color->color_g_y = color_value; color->color_r_cr = color_value; break; case PIXEL_FORMAT_FP16: /* set border color to white */ color->color_r_cr = color_value; color->color_b_cb = color_value; color->color_g_y = color_value; if (pipe_ctx->plane_state->layer_index > 0) { /* set border color to orange */ color->color_g_y = 0.22 * color_value; color->color_b_cb = 0; } break; default: break; } } void get_hdr_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; bool is_sdr = false; /* Determine the overscan color based on the top-most (desktop) plane's context */ struct pipe_ctx *top_pipe_ctx = pipe_ctx; while (top_pipe_ctx->top_pipe != NULL) top_pipe_ctx = top_pipe_ctx->top_pipe; switch (top_pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB2101010: if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { /* HDR10, ARGB2101010 - set border color to red */ color->color_r_cr = color_value; } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { /* FreeSync 2 ARGB2101010 - set border color to pink */ color->color_r_cr = color_value; color->color_b_cb = color_value; } else is_sdr = true; break; case PIXEL_FORMAT_FP16: if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { /* HDR10, FP16 - set border color to blue */ color->color_b_cb = color_value; } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { /* FreeSync 2 HDR - set border color to green */ color->color_g_y = color_value; } else is_sdr = true; break; default: is_sdr = true; break; } if (is_sdr) { /* SDR - set border color to Gray */ color->color_r_cr = color_value/2; color->color_b_cb = color_value/2; color->color_g_y = color_value/2; } } void get_subvp_visual_confirm_color( struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; bool enable_subvp = false; int i; if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context) return; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { /* SubVP enable - red */ color->color_g_y = 0; color->color_b_cb = 0; color->color_r_cr = color_value; enable_subvp = true; if (pipe_ctx->stream == pipe->stream) return; break; } } if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { color->color_r_cr = 0; if (pipe_ctx->stream->allow_freesync == 1) { /* SubVP enable and DRR on - green */ color->color_b_cb = 0; color->color_g_y = color_value; } else { /* SubVP enable and No DRR - blue */ color->color_g_y = 0; color->color_b_cb = color_value; } } } void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], int *num_steps, struct pipe_ctx *pipe_ctx) { struct dc_plane_state *plane = pipe_ctx->plane_state; struct dc_stream_state *stream = pipe_ctx->stream; struct dce_hwseq *hws = dc->hwseq; struct pipe_ctx *current_pipe = NULL; struct pipe_ctx *current_mpc_pipe = NULL; unsigned int i = 0; *num_steps = 0; // Initialize to 0 if (!plane || !stream) return; if (dc->hwss.subvp_pipe_control_lock_fast) { block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } if (dc->hwss.pipe_control_lock) { block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; block_sequence[*num_steps].params.pipe_control_lock_params.lock = true; block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; (*num_steps)++; } for (i = 0; i < dmub_cmd_count; i++) { block_sequence[*num_steps].params.send_dmcub_cmd_params.ctx = dc->ctx; block_sequence[*num_steps].params.send_dmcub_cmd_params.cmd = &(dc_dmub_cmd[i].dmub_cmd); block_sequence[*num_steps].params.send_dmcub_cmd_params.wait_type = dc_dmub_cmd[i].wait_type; block_sequence[*num_steps].func = DMUB_SEND_DMCUB_CMD; (*num_steps)++; } current_pipe = pipe_ctx; while (current_pipe) { current_mpc_pipe = current_pipe; while (current_mpc_pipe) { if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) { block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; (*num_steps)++; } if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) { block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc; block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips; block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; (*num_steps)++; } if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { block_sequence[*num_steps].params.update_plane_addr_params.dc = dc; block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR; (*num_steps)++; } if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) { block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc; block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state; block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; (*num_steps)++; } if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) { block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP; (*num_steps)++; } if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) { block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].func = DPP_SETUP_DPP; (*num_steps)++; } if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) { block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; (*num_steps)++; } if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) { block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc; block_sequence[*num_steps].params.set_output_transfer_func_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].params.set_output_transfer_func_params.stream = current_mpc_pipe->stream; block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; (*num_steps)++; } if (current_mpc_pipe->stream->update_flags.bits.out_csc) { block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpc = dc->res_pool->mpc; block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst; block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.power_on = true; block_sequence[*num_steps].func = MPC_POWER_ON_MPC_MEM_PWR; (*num_steps)++; if (current_mpc_pipe->stream->csc_color_matrix.enable_adjustment == true) { block_sequence[*num_steps].params.set_output_csc_params.mpc = dc->res_pool->mpc; block_sequence[*num_steps].params.set_output_csc_params.opp_id = current_mpc_pipe->stream_res.opp->inst; block_sequence[*num_steps].params.set_output_csc_params.regval = current_mpc_pipe->stream->csc_color_matrix.matrix; block_sequence[*num_steps].params.set_output_csc_params.ocsc_mode = MPC_OUTPUT_CSC_COEF_A; block_sequence[*num_steps].func = MPC_SET_OUTPUT_CSC; (*num_steps)++; } else { block_sequence[*num_steps].params.set_ocsc_default_params.mpc = dc->res_pool->mpc; block_sequence[*num_steps].params.set_ocsc_default_params.opp_id = current_mpc_pipe->stream_res.opp->inst; block_sequence[*num_steps].params.set_ocsc_default_params.color_space = current_mpc_pipe->stream->output_color_space; block_sequence[*num_steps].params.set_ocsc_default_params.ocsc_mode = MPC_OUTPUT_CSC_COEF_A; block_sequence[*num_steps].func = MPC_SET_OCSC_DEFAULT; (*num_steps)++; } } current_mpc_pipe = current_mpc_pipe->bottom_pipe; } current_pipe = current_pipe->next_odm_pipe; } if (dc->hwss.pipe_control_lock) { block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; block_sequence[*num_steps].params.pipe_control_lock_params.lock = false; block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; (*num_steps)++; } if (dc->hwss.subvp_pipe_control_lock_fast) { block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } current_pipe = pipe_ctx; while (current_pipe) { current_mpc_pipe = current_pipe; while (current_mpc_pipe) { if (!current_mpc_pipe->bottom_pipe && !current_mpc_pipe->next_odm_pipe && current_mpc_pipe->stream && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.bits.addr_update && !current_mpc_pipe->plane_state->skip_manual_trigger) { block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; (*num_steps)++; } current_mpc_pipe = current_mpc_pipe->bottom_pipe; } current_pipe = current_pipe->next_odm_pipe; } } void hwss_execute_sequence(struct dc *dc, struct block_sequence block_sequence[], int num_steps) { unsigned int i; union block_sequence_params *params; struct dce_hwseq *hws = dc->hwseq; for (i = 0; i < num_steps; i++) { params = &(block_sequence[i].params); switch (block_sequence[i].func) { case DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST: dc->hwss.subvp_pipe_control_lock_fast(params); break; case OPTC_PIPE_CONTROL_LOCK: dc->hwss.pipe_control_lock(params->pipe_control_lock_params.dc, params->pipe_control_lock_params.pipe_ctx, params->pipe_control_lock_params.lock); break; case HUBP_SET_FLIP_CONTROL_GSL: dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx, params->set_flip_control_gsl_params.flip_immediate); break; case HUBP_PROGRAM_TRIPLEBUFFER: dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc, params->program_triplebuffer_params.pipe_ctx, params->program_triplebuffer_params.enableTripleBuffer); break; case HUBP_UPDATE_PLANE_ADDR: dc->hwss.update_plane_addr(params->update_plane_addr_params.dc, params->update_plane_addr_params.pipe_ctx); break; case DPP_SET_INPUT_TRANSFER_FUNC: hws->funcs.set_input_transfer_func(params->set_input_transfer_func_params.dc, params->set_input_transfer_func_params.pipe_ctx, params->set_input_transfer_func_params.plane_state); break; case DPP_PROGRAM_GAMUT_REMAP: dc->hwss.program_gamut_remap(params->program_gamut_remap_params.pipe_ctx); break; case DPP_SETUP_DPP: hwss_setup_dpp(params); break; case DPP_PROGRAM_BIAS_AND_SCALE: hwss_program_bias_and_scale(params); break; case OPTC_PROGRAM_MANUAL_TRIGGER: hwss_program_manual_trigger(params); break; case DPP_SET_OUTPUT_TRANSFER_FUNC: hws->funcs.set_output_transfer_func(params->set_output_transfer_func_params.dc, params->set_output_transfer_func_params.pipe_ctx, params->set_output_transfer_func_params.stream); break; case MPC_UPDATE_VISUAL_CONFIRM: dc->hwss.update_visual_confirm_color(params->update_visual_confirm_params.dc, params->update_visual_confirm_params.pipe_ctx, params->update_visual_confirm_params.mpcc_id); break; case MPC_POWER_ON_MPC_MEM_PWR: hwss_power_on_mpc_mem_pwr(params); break; case MPC_SET_OUTPUT_CSC: hwss_set_output_csc(params); break; case MPC_SET_OCSC_DEFAULT: hwss_set_ocsc_default(params); break; case DMUB_SEND_DMCUB_CMD: hwss_send_dmcub_cmd(params); break; default: ASSERT(false); break; } } } void hwss_send_dmcub_cmd(union block_sequence_params *params) { struct dc_context *ctx = params->send_dmcub_cmd_params.ctx; union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd; enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type; dm_execute_dmub_cmd(ctx, cmd, wait_type); } void hwss_program_manual_trigger(union block_sequence_params *params) { struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx; if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); } void hwss_setup_dpp(union block_sequence_params *params) { struct pipe_ctx *pipe_ctx = params->setup_dpp_params.pipe_ctx; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; if (dpp && dpp->funcs->dpp_setup) { // program the input csc dpp->funcs->dpp_setup(dpp, plane_state->format, EXPANSION_MODE_ZERO, plane_state->input_csc_color_matrix, plane_state->color_space, NULL); } } void hwss_program_bias_and_scale(union block_sequence_params *params) { struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_bias_and_scale bns_params = {0}; //TODO :for CNVC set scale and bias registers if necessary build_prescale_params(&bns_params, plane_state); if (dpp->funcs->dpp_program_bias_and_scale) dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params) { struct mpc *mpc = params->power_on_mpc_mem_pwr_params.mpc; int mpcc_id = params->power_on_mpc_mem_pwr_params.mpcc_id; bool power_on = params->power_on_mpc_mem_pwr_params.power_on; if (mpc->funcs->power_on_mpc_mem_pwr) mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, power_on); } void hwss_set_output_csc(union block_sequence_params *params) { struct mpc *mpc = params->set_output_csc_params.mpc; int opp_id = params->set_output_csc_params.opp_id; const uint16_t *matrix = params->set_output_csc_params.regval; enum mpc_output_csc_mode ocsc_mode = params->set_output_csc_params.ocsc_mode; if (mpc->funcs->set_output_csc != NULL) mpc->funcs->set_output_csc(mpc, opp_id, matrix, ocsc_mode); } void hwss_set_ocsc_default(union block_sequence_params *params) { struct mpc *mpc = params->set_ocsc_default_params.mpc; int opp_id = params->set_ocsc_default_params.opp_id; enum dc_color_space colorspace = params->set_ocsc_default_params.color_space; enum mpc_output_csc_mode ocsc_mode = params->set_ocsc_default_params.ocsc_mode; if (mpc->funcs->set_ocsc_default != NULL) mpc->funcs->set_ocsc_default(mpc, opp_id, colorspace, ocsc_mode); } void get_mclk_switch_visual_confirm_color( struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) return; if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) { /* MCLK switching is supported */ if (!pipe_ctx->has_vactive_margin) { /* In Vblank - yellow */ color->color_r_cr = color_value; color->color_g_y = color_value; if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { /* FPO + Vblank - cyan */ color->color_r_cr = 0; color->color_g_y = color_value; color->color_b_cb = color_value; } } else { /* In Vactive - pink */ color->color_r_cr = color_value; color->color_b_cb = color_value; } /* SubVP */ get_subvp_visual_confirm_color(dc, context, pipe_ctx, color); } } void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; /* Determine the overscan color based on the bottom-most plane's context */ struct pipe_ctx *bottom_pipe_ctx = pipe_ctx; while (bottom_pipe_ctx->bottom_pipe != NULL) bottom_pipe_ctx = bottom_pipe_ctx->bottom_pipe; switch (bottom_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) { case DC_SW_LINEAR: /* LINEAR Surface - set border color to red */ color->color_r_cr = color_value; break; default: break; } }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "basics/dc_common.h" #include "dc.h" #include "core_types.h" #include "resource.h" #include "ipp.h" #include "timing_generator.h" #include "dc_dmub_srv.h" #define DC_LOGGER dc->ctx->logger /******************************************************************************* * Private functions ******************************************************************************/ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) { if (sink->sink_signal == SIGNAL_TYPE_NONE) stream->signal = stream->link->connector_signal; else stream->signal = sink->sink_signal; if (dc_is_dvi_signal(stream->signal)) { if (stream->ctx->dc->caps.dual_link_dvi && (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK && sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; else stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; } } static bool dc_stream_construct(struct dc_stream_state *stream, struct dc_sink *dc_sink_data) { uint32_t i = 0; stream->sink = dc_sink_data; dc_sink_retain(dc_sink_data); stream->ctx = dc_sink_data->ctx; stream->link = dc_sink_data->link; stream->sink_patches = dc_sink_data->edid_caps.panel_patch; stream->converter_disable_audio = dc_sink_data->converter_disable_audio; stream->qs_bit = dc_sink_data->edid_caps.qs_bit; stream->qy_bit = dc_sink_data->edid_caps.qy_bit; /* Copy audio modes */ /* TODO - Remove this translation */ for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) { stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count; stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code; stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate; stream->audio_info.modes[i].sample_size = dc_sink_data->edid_caps.audio_modes[i].sample_size; } stream->audio_info.mode_count = dc_sink_data->edid_caps.audio_mode_count; stream->audio_info.audio_latency = dc_sink_data->edid_caps.audio_latency; stream->audio_info.video_latency = dc_sink_data->edid_caps.video_latency; memmove( stream->audio_info.display_name, dc_sink_data->edid_caps.display_name, AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); stream->audio_info.manufacture_id = dc_sink_data->edid_caps.manufacturer_id; stream->audio_info.product_id = dc_sink_data->edid_caps.product_id; stream->audio_info.flags.all = dc_sink_data->edid_caps.speaker_flags; if (dc_sink_data->dc_container_id != NULL) { struct dc_container_id *dc_container_id = dc_sink_data->dc_container_id; stream->audio_info.port_id[0] = dc_container_id->portId[0]; stream->audio_info.port_id[1] = dc_container_id->portId[1]; } else { /* TODO - WindowDM has implemented, other DMs need Unhardcode port_id */ stream->audio_info.port_id[0] = 0x5558859e; stream->audio_info.port_id[1] = 0xd989449; } /* EDID CAP translation for HDMI 2.0 */ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg)); stream->timing.dsc_cfg.num_slices_h = 0; stream->timing.dsc_cfg.num_slices_v = 0; stream->timing.dsc_cfg.bits_per_pixel = 128; stream->timing.dsc_cfg.block_pred_enable = 1; stream->timing.dsc_cfg.linebuf_depth = 9; stream->timing.dsc_cfg.version_minor = 2; stream->timing.dsc_cfg.ycbcr422_simple = 0; update_stream_signal(stream, dc_sink_data); stream->out_transfer_func = dc_create_transfer_func(); if (stream->out_transfer_func == NULL) { dc_sink_release(dc_sink_data); return false; } stream->out_transfer_func->type = TF_TYPE_BYPASS; stream->stream_id = stream->ctx->dc_stream_id_count; stream->ctx->dc_stream_id_count++; return true; } static void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); if (stream->out_transfer_func != NULL) { dc_transfer_func_release(stream->out_transfer_func); stream->out_transfer_func = NULL; } } void dc_stream_retain(struct dc_stream_state *stream) { kref_get(&stream->refcount); } static void dc_stream_free(struct kref *kref) { struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount); dc_stream_destruct(stream); kfree(stream); } void dc_stream_release(struct dc_stream_state *stream) { if (stream != NULL) { kref_put(&stream->refcount, dc_stream_free); } } struct dc_stream_state *dc_create_stream_for_sink( struct dc_sink *sink) { struct dc_stream_state *stream; if (sink == NULL) return NULL; stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL); if (stream == NULL) goto alloc_fail; if (dc_stream_construct(stream, sink) == false) goto construct_fail; kref_init(&stream->refcount); return stream; construct_fail: kfree(stream); alloc_fail: return NULL; } struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) { struct dc_stream_state *new_stream; new_stream = kmemdup(stream, sizeof(struct dc_stream_state), GFP_KERNEL); if (!new_stream) return NULL; if (new_stream->sink) dc_sink_retain(new_stream->sink); if (new_stream->out_transfer_func) dc_transfer_func_retain(new_stream->out_transfer_func); new_stream->stream_id = new_stream->ctx->dc_stream_id_count; new_stream->ctx->dc_stream_id_count++; /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign) new_stream->link_enc = NULL; kref_init(&new_stream->refcount); return new_stream; } /** * dc_stream_get_status_from_state - Get stream status from given dc state * @state: DC state to find the stream status in * @stream: The stream to get the stream status for * * The given stream is expected to exist in the given dc state. Otherwise, NULL * will be returned. */ struct dc_stream_status *dc_stream_get_status_from_state( struct dc_state *state, struct dc_stream_state *stream) { uint8_t i; if (state == NULL) return NULL; for (i = 0; i < state->stream_count; i++) { if (stream == state->streams[i]) return &state->stream_status[i]; } return NULL; } /** * dc_stream_get_status() - Get current stream status of the given stream state * @stream: The stream to get the stream status for. * * The given stream is expected to exist in dc->current_state. Otherwise, NULL * will be returned. */ struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; return dc_stream_get_status_from_state(dc->current_state, stream); } static void program_cursor_attributes( struct dc *dc, struct dc_stream_state *stream, const struct dc_cursor_attributes *attributes) { int i; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; if (!stream) return; res_ctx = &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; if (pipe_ctx->stream != stream) continue; if (!pipe_to_program) { pipe_to_program = pipe_ctx; dc->hwss.cursor_lock(dc, pipe_to_program, true); if (pipe_to_program->next_odm_pipe) dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); } dc->hwss.set_cursor_attribute(pipe_ctx); if (dc->ctx->dmub_srv) dc_send_update_cursor_info_to_dmu(pipe_ctx, i); if (dc->hwss.set_cursor_sdr_white_level) dc->hwss.set_cursor_sdr_white_level(pipe_ctx); } if (pipe_to_program) { dc->hwss.cursor_lock(dc, pipe_to_program, false); if (pipe_to_program->next_odm_pipe) dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); } } #ifndef TRIM_FSFT /* * dc_optimize_timing_for_fsft() - dc to optimize timing */ bool dc_optimize_timing_for_fsft( struct dc_stream_state *pStream, unsigned int max_input_rate_in_khz) { struct dc *dc; dc = pStream->ctx->dc; return (dc->hwss.optimize_timing_for_fsft && dc->hwss.optimize_timing_for_fsft(dc, &pStream->timing, max_input_rate_in_khz)); } #endif static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream) { uint32_t refresh_rate; struct dc *dc = stream->ctx->dc; refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 + stream->timing.v_total * stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, stream->timing.v_total); refresh_rate = div_u64(refresh_rate, stream->timing.h_total); /* If there's any stream that fits the SubVP high refresh criteria, * we must return true. This is because cursor updates are asynchronous * with full updates, so we could transition into a SubVP config and * remain in HW cursor mode if there's no cursor update which will * then cause corruption. */ if ((refresh_rate >= 120 && refresh_rate <= 175 && stream->timing.v_addressable >= 1440 && stream->timing.v_addressable <= 2160) && (dc->current_state->stream_count > 1 || (dc->current_state->stream_count == 1 && !stream->allow_freesync))) return true; return false; } /* * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address */ bool dc_stream_set_cursor_attributes( struct dc_stream_state *stream, const struct dc_cursor_attributes *attributes) { struct dc *dc; bool reset_idle_optimizations = false; if (NULL == stream) { dm_error("DC: dc_stream is NULL!\n"); return false; } if (NULL == attributes) { dm_error("DC: attributes is NULL!\n"); return false; } if (attributes->address.quad_part == 0) { dm_output_to_console("DC: Cursor address is 0!\n"); return false; } dc = stream->ctx->dc; /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs) * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz */ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) return false; if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return false; else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 2160 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return false; } stream->cursor_attributes = *attributes; dc_z10_restore(dc); /* disable idle optimizations while updating cursor */ if (dc->idle_optimizations_allowed) { dc_allow_idle_optimizations(dc, false); reset_idle_optimizations = true; } program_cursor_attributes(dc, stream, attributes); /* re-enable idle optimizations if necessary */ if (reset_idle_optimizations) dc_allow_idle_optimizations(dc, true); return true; } static void program_cursor_position( struct dc *dc, struct dc_stream_state *stream, const struct dc_cursor_position *position) { int i; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; if (!stream) return; res_ctx = &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || !pipe_ctx->plane_state || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp)) continue; if (!pipe_to_program) { pipe_to_program = pipe_ctx; dc->hwss.cursor_lock(dc, pipe_to_program, true); } dc->hwss.set_cursor_position(pipe_ctx); if (dc->ctx->dmub_srv) dc_send_update_cursor_info_to_dmu(pipe_ctx, i); } if (pipe_to_program) dc->hwss.cursor_lock(dc, pipe_to_program, false); } bool dc_stream_set_cursor_position( struct dc_stream_state *stream, const struct dc_cursor_position *position) { struct dc *dc; bool reset_idle_optimizations = false; if (NULL == stream) { dm_error("DC: dc_stream is NULL!\n"); return false; } if (NULL == position) { dm_error("DC: cursor position is NULL!\n"); return false; } dc = stream->ctx->dc; dc_z10_restore(dc); /* disable idle optimizations if enabling cursor */ if (dc->idle_optimizations_allowed && (!stream->cursor_position.enable || dc->debug.exit_idle_opt_for_cursor_updates) && position->enable) { dc_allow_idle_optimizations(dc, false); reset_idle_optimizations = true; } stream->cursor_position = *position; program_cursor_position(dc, stream, position); /* re-enable idle optimizations if necessary */ if (reset_idle_optimizations) dc_allow_idle_optimizations(dc, true); return true; } bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info) { bool isDrc = false; int i = 0; struct dwbc *dwb; if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; } if (wb_info == NULL) { dm_error("DC: dc_writeback_info is NULL!\n"); return false; } if (wb_info->dwb_pipe_inst >= MAX_DWB_PIPES) { dm_error("DC: writeback pipe is invalid!\n"); return false; } wb_info->dwb_params.out_transfer_func = stream->out_transfer_func; dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; dwb->dwb_is_drc = false; /* recalculate and apply DML parameters */ for (i = 0; i < stream->num_wb_info; i++) { /*dynamic update*/ if (stream->writeback_info[i].wb_enabled && stream->writeback_info[i].dwb_pipe_inst == wb_info->dwb_pipe_inst) { stream->writeback_info[i] = *wb_info; isDrc = true; } } if (!isDrc) { ASSERT(stream->num_wb_info + 1 <= MAX_DWB_PIPES); stream->writeback_info[stream->num_wb_info++] = *wb_info; } if (dc->hwss.enable_writeback) { struct dc_stream_status *stream_status = dc_stream_get_status(stream); struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; dwb->otg_inst = stream_status->primary_otg_inst; } return true; } bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst) { int i = 0, j = 0; if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; } if (dwb_pipe_inst >= MAX_DWB_PIPES) { dm_error("DC: writeback pipe is invalid!\n"); return false; } if (stream->num_wb_info > MAX_DWB_PIPES) { dm_error("DC: num_wb_info is invalid!\n"); return false; } // stream->writeback_info[dwb_pipe_inst].wb_enabled = false; for (i = 0; i < stream->num_wb_info; i++) { /*dynamic update*/ if (stream->writeback_info[i].wb_enabled && stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) { stream->writeback_info[i].wb_enabled = false; } } /* remove writeback info for disabled writeback pipes from stream */ for (i = 0, j = 0; i < stream->num_wb_info; i++) { if (stream->writeback_info[i].wb_enabled) { if (j < i) /* trim the array */ memcpy(&stream->writeback_info[j], &stream->writeback_info[i], sizeof(struct dc_writeback_info)); j++; } } stream->num_wb_info = j; return true; } bool dc_stream_warmup_writeback(struct dc *dc, int num_dwb, struct dc_writeback_info *wb_info) { if (dc->hwss.mmhubbub_warmup) return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); else return false; } uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) { uint8_t i; struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; if (res_ctx->pipe_ctx[i].stream != stream) continue; return tg->funcs->get_frame_count(tg); } return 0; } bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream, const uint8_t *custom_sdp_message, unsigned int sdp_message_size) { int i; struct dc *dc; struct resource_context *res_ctx; if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; } dc = stream->ctx->dc; res_ctx = &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; if (pipe_ctx->stream != stream) continue; if (dc->hwss.send_immediate_sdp_message != NULL) dc->hwss.send_immediate_sdp_message(pipe_ctx, custom_sdp_message, sdp_message_size); else DC_LOG_WARNING("%s:send_immediate_sdp_message not implemented on this ASIC\n", __func__); } return true; } bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, uint32_t *v_blank_start, uint32_t *v_blank_end, uint32_t *h_position, uint32_t *v_position) { uint8_t i; bool ret = false; struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; if (res_ctx->pipe_ctx[i].stream != stream) continue; tg->funcs->get_scanoutpos(tg, v_blank_start, v_blank_end, h_position, v_position); ret = true; break; } return ret; } bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) { struct pipe_ctx *pipe = NULL; int i; if (!dc->hwss.dmdata_status_done) return false; for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream) break; } /* Stream not found, by default we'll assume HUBP fetched dm data */ if (i == MAX_PIPES) return true; return dc->hwss.dmdata_status_done(pipe); } bool dc_stream_set_dynamic_metadata(struct dc *dc, struct dc_stream_state *stream, struct dc_dmdata_attributes *attr) { struct pipe_ctx *pipe_ctx = NULL; struct hubp *hubp; int i; /* Dynamic metadata is only supported on HDMI or DP */ if (!dc_is_hdmi_signal(stream->signal) && !dc_is_dp_signal(stream->signal)) return false; /* Check hardware support */ if (!dc->hwss.program_dmdata_engine) return false; for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream == stream) break; } if (i == MAX_PIPES) return false; hubp = pipe_ctx->plane_res.hubp; if (hubp == NULL) return false; pipe_ctx->stream->dmdata_address = attr->address; dc->hwss.program_dmdata_engine(pipe_ctx); if (hubp->funcs->dmdata_set_attributes != NULL && pipe_ctx->stream->dmdata_address.quad_part != 0) { hubp->funcs->dmdata_set_attributes(hubp, attr); } return true; } enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc, struct dc_state *state, struct dc_stream_state *stream) { if (dc->res_pool->funcs->add_dsc_to_stream_resource) { return dc->res_pool->funcs->add_dsc_to_stream_resource(dc, state, stream); } else { return DC_NO_DSC_RESOURCE; } } struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream) { int i = 0; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &stream->ctx->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream) return pipe; } return NULL; } void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) { DC_LOG_DC( "core_stream 0x%p: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n", stream, stream->src.x, stream->src.y, stream->src.width, stream->src.height, stream->dst.x, stream->dst.y, stream->dst.width, stream->dst.height, stream->output_color_space); DC_LOG_DC( "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n", stream->timing.pix_clk_100hz / 10, stream->timing.h_total, stream->timing.v_total, stream->timing.pixel_encoding, stream->timing.display_color_depth); DC_LOG_DC( "\tlink: %d\n", stream->link->link_index); DC_LOG_DC( "\tdsc: %d, mst_pbn: %d\n", stream->timing.flags.DSC, stream->timing.dsc_cfg.mst_pbn); if (stream->sink) { if (stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && stream->sink->sink_signal != SIGNAL_TYPE_NONE) { DC_LOG_DC( "\tdispname: %s signal: %x\n", stream->sink->edid_caps.display_name, stream->signal); } } }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ /* * dc_debug.c * * Created on: Nov 3, 2016 * Author: yonsun */ #include "dm_services.h" #include "dc.h" #include "core_status.h" #include "core_types.h" #include "resource.h" #define DC_LOGGER_INIT(logger) #define SURFACE_TRACE(...) do {\ if (dc->debug.surface_trace) \ DC_LOG_IF_TRACE(__VA_ARGS__); \ } while (0) #define TIMING_TRACE(...) do {\ if (dc->debug.timing_trace) \ DC_LOG_SYNC(__VA_ARGS__); \ } while (0) #define CLOCK_TRACE(...) do {\ if (dc->debug.clock_trace) \ DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \ } while (0) void pre_surface_trace( struct dc *dc, const struct dc_plane_state *const *plane_states, int surface_count) { int i; DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < surface_count; i++) { const struct dc_plane_state *plane_state = plane_states[i]; SURFACE_TRACE("Planes %d:\n", i); SURFACE_TRACE( "plane_state->visible = %d;\n" "plane_state->flip_immediate = %d;\n" "plane_state->address.type = %d;\n" "plane_state->address.grph.addr.quad_part = 0x%llX;\n" "plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n" "plane_state->scaling_quality.h_taps = %d;\n" "plane_state->scaling_quality.v_taps = %d;\n" "plane_state->scaling_quality.h_taps_c = %d;\n" "plane_state->scaling_quality.v_taps_c = %d;\n", plane_state->visible, plane_state->flip_immediate, plane_state->address.type, plane_state->address.grph.addr.quad_part, plane_state->address.grph.meta_addr.quad_part, plane_state->scaling_quality.h_taps, plane_state->scaling_quality.v_taps, plane_state->scaling_quality.h_taps_c, plane_state->scaling_quality.v_taps_c); SURFACE_TRACE( "plane_state->src_rect.x = %d;\n" "plane_state->src_rect.y = %d;\n" "plane_state->src_rect.width = %d;\n" "plane_state->src_rect.height = %d;\n" "plane_state->dst_rect.x = %d;\n" "plane_state->dst_rect.y = %d;\n" "plane_state->dst_rect.width = %d;\n" "plane_state->dst_rect.height = %d;\n" "plane_state->clip_rect.x = %d;\n" "plane_state->clip_rect.y = %d;\n" "plane_state->clip_rect.width = %d;\n" "plane_state->clip_rect.height = %d;\n", plane_state->src_rect.x, plane_state->src_rect.y, plane_state->src_rect.width, plane_state->src_rect.height, plane_state->dst_rect.x, plane_state->dst_rect.y, plane_state->dst_rect.width, plane_state->dst_rect.height, plane_state->clip_rect.x, plane_state->clip_rect.y, plane_state->clip_rect.width, plane_state->clip_rect.height); SURFACE_TRACE( "plane_state->plane_size.surface_size.x = %d;\n" "plane_state->plane_size.surface_size.y = %d;\n" "plane_state->plane_size.surface_size.width = %d;\n" "plane_state->plane_size.surface_size.height = %d;\n" "plane_state->plane_size.surface_pitch = %d;\n", plane_state->plane_size.surface_size.x, plane_state->plane_size.surface_size.y, plane_state->plane_size.surface_size.width, plane_state->plane_size.surface_size.height, plane_state->plane_size.surface_pitch); SURFACE_TRACE( "plane_state->tiling_info.gfx8.num_banks = %d;\n" "plane_state->tiling_info.gfx8.bank_width = %d;\n" "plane_state->tiling_info.gfx8.bank_width_c = %d;\n" "plane_state->tiling_info.gfx8.bank_height = %d;\n" "plane_state->tiling_info.gfx8.bank_height_c = %d;\n" "plane_state->tiling_info.gfx8.tile_aspect = %d;\n" "plane_state->tiling_info.gfx8.tile_aspect_c = %d;\n" "plane_state->tiling_info.gfx8.tile_split = %d;\n" "plane_state->tiling_info.gfx8.tile_split_c = %d;\n" "plane_state->tiling_info.gfx8.tile_mode = %d;\n" "plane_state->tiling_info.gfx8.tile_mode_c = %d;\n", plane_state->tiling_info.gfx8.num_banks, plane_state->tiling_info.gfx8.bank_width, plane_state->tiling_info.gfx8.bank_width_c, plane_state->tiling_info.gfx8.bank_height, plane_state->tiling_info.gfx8.bank_height_c, plane_state->tiling_info.gfx8.tile_aspect, plane_state->tiling_info.gfx8.tile_aspect_c, plane_state->tiling_info.gfx8.tile_split, plane_state->tiling_info.gfx8.tile_split_c, plane_state->tiling_info.gfx8.tile_mode, plane_state->tiling_info.gfx8.tile_mode_c); SURFACE_TRACE( "plane_state->tiling_info.gfx8.pipe_config = %d;\n" "plane_state->tiling_info.gfx8.array_mode = %d;\n" "plane_state->color_space = %d;\n" "plane_state->dcc.enable = %d;\n" "plane_state->format = %d;\n" "plane_state->rotation = %d;\n" "plane_state->stereo_format = %d;\n", plane_state->tiling_info.gfx8.pipe_config, plane_state->tiling_info.gfx8.array_mode, plane_state->color_space, plane_state->dcc.enable, plane_state->format, plane_state->rotation, plane_state->stereo_format); SURFACE_TRACE("plane_state->tiling_info.gfx9.swizzle = %d;\n", plane_state->tiling_info.gfx9.swizzle); SURFACE_TRACE("\n"); } SURFACE_TRACE("\n"); } void update_surface_trace( struct dc *dc, const struct dc_surface_update *updates, int surface_count) { int i; DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < surface_count; i++) { const struct dc_surface_update *update = &updates[i]; SURFACE_TRACE("Update %d\n", i); if (update->flip_addr) { SURFACE_TRACE("flip_addr->address.type = %d;\n" "flip_addr->address.grph.addr.quad_part = 0x%llX;\n" "flip_addr->address.grph.meta_addr.quad_part = 0x%llX;\n" "flip_addr->flip_immediate = %d;\n", update->flip_addr->address.type, update->flip_addr->address.grph.addr.quad_part, update->flip_addr->address.grph.meta_addr.quad_part, update->flip_addr->flip_immediate); } if (update->plane_info) { SURFACE_TRACE( "plane_info->color_space = %d;\n" "plane_info->format = %d;\n" "plane_info->plane_size.surface_pitch = %d;\n" "plane_info->plane_size.surface_size.height = %d;\n" "plane_info->plane_size.surface_size.width = %d;\n" "plane_info->plane_size.surface_size.x = %d;\n" "plane_info->plane_size.surface_size.y = %d;\n" "plane_info->rotation = %d;\n" "plane_info->stereo_format = %d;\n", update->plane_info->color_space, update->plane_info->format, update->plane_info->plane_size.surface_pitch, update->plane_info->plane_size.surface_size.height, update->plane_info->plane_size.surface_size.width, update->plane_info->plane_size.surface_size.x, update->plane_info->plane_size.surface_size.y, update->plane_info->rotation, update->plane_info->stereo_format); SURFACE_TRACE( "plane_info->tiling_info.gfx8.num_banks = %d;\n" "plane_info->tiling_info.gfx8.bank_width = %d;\n" "plane_info->tiling_info.gfx8.bank_width_c = %d;\n" "plane_info->tiling_info.gfx8.bank_height = %d;\n" "plane_info->tiling_info.gfx8.bank_height_c = %d;\n" "plane_info->tiling_info.gfx8.tile_aspect = %d;\n" "plane_info->tiling_info.gfx8.tile_aspect_c = %d;\n" "plane_info->tiling_info.gfx8.tile_split = %d;\n" "plane_info->tiling_info.gfx8.tile_split_c = %d;\n" "plane_info->tiling_info.gfx8.tile_mode = %d;\n" "plane_info->tiling_info.gfx8.tile_mode_c = %d;\n", update->plane_info->tiling_info.gfx8.num_banks, update->plane_info->tiling_info.gfx8.bank_width, update->plane_info->tiling_info.gfx8.bank_width_c, update->plane_info->tiling_info.gfx8.bank_height, update->plane_info->tiling_info.gfx8.bank_height_c, update->plane_info->tiling_info.gfx8.tile_aspect, update->plane_info->tiling_info.gfx8.tile_aspect_c, update->plane_info->tiling_info.gfx8.tile_split, update->plane_info->tiling_info.gfx8.tile_split_c, update->plane_info->tiling_info.gfx8.tile_mode, update->plane_info->tiling_info.gfx8.tile_mode_c); SURFACE_TRACE( "plane_info->tiling_info.gfx8.pipe_config = %d;\n" "plane_info->tiling_info.gfx8.array_mode = %d;\n" "plane_info->visible = %d;\n" "plane_info->per_pixel_alpha = %d;\n", update->plane_info->tiling_info.gfx8.pipe_config, update->plane_info->tiling_info.gfx8.array_mode, update->plane_info->visible, update->plane_info->per_pixel_alpha); SURFACE_TRACE("surface->tiling_info.gfx9.swizzle = %d;\n", update->plane_info->tiling_info.gfx9.swizzle); } if (update->scaling_info) { SURFACE_TRACE( "scaling_info->src_rect.x = %d;\n" "scaling_info->src_rect.y = %d;\n" "scaling_info->src_rect.width = %d;\n" "scaling_info->src_rect.height = %d;\n" "scaling_info->dst_rect.x = %d;\n" "scaling_info->dst_rect.y = %d;\n" "scaling_info->dst_rect.width = %d;\n" "scaling_info->dst_rect.height = %d;\n" "scaling_info->clip_rect.x = %d;\n" "scaling_info->clip_rect.y = %d;\n" "scaling_info->clip_rect.width = %d;\n" "scaling_info->clip_rect.height = %d;\n" "scaling_info->scaling_quality.h_taps = %d;\n" "scaling_info->scaling_quality.v_taps = %d;\n" "scaling_info->scaling_quality.h_taps_c = %d;\n" "scaling_info->scaling_quality.v_taps_c = %d;\n", update->scaling_info->src_rect.x, update->scaling_info->src_rect.y, update->scaling_info->src_rect.width, update->scaling_info->src_rect.height, update->scaling_info->dst_rect.x, update->scaling_info->dst_rect.y, update->scaling_info->dst_rect.width, update->scaling_info->dst_rect.height, update->scaling_info->clip_rect.x, update->scaling_info->clip_rect.y, update->scaling_info->clip_rect.width, update->scaling_info->clip_rect.height, update->scaling_info->scaling_quality.h_taps, update->scaling_info->scaling_quality.v_taps, update->scaling_info->scaling_quality.h_taps_c, update->scaling_info->scaling_quality.v_taps_c); } SURFACE_TRACE("\n"); } SURFACE_TRACE("\n"); } void post_surface_trace(struct dc *dc) { DC_LOGGER_INIT(dc->ctx->logger); SURFACE_TRACE("post surface process.\n"); } void context_timing_trace( struct dc *dc, struct resource_context *res_ctx) { int i; int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; struct crtc_position position; unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; /* get_position() returns CRTC vertical/horizontal counter * hence not applicable for underlay pipe */ if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) continue; pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); h_pos[i] = position.horizontal_count; v_pos[i] = position.vertical_count; } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) continue; TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", pipe_ctx->stream_res.tg->inst, pipe_ctx->stream->timing.h_total, pipe_ctx->stream->timing.v_total, h_pos[i], v_pos[i]); } } void context_clock_trace( struct dc *dc, struct dc_state *context) { DC_LOGGER_INIT(dc->ctx->logger); CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", context->bw_ctx.bw.dcn.clk.dispclk_khz, context->bw_ctx.bw.dcn.clk.dppclk_khz, context->bw_ctx.bw.dcn.clk.dcfclk_khz, context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz, context->bw_ctx.bw.dcn.clk.fclk_khz, context->bw_ctx.bw.dcn.clk.socclk_khz); CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", context->bw_ctx.bw.dcn.clk.dispclk_khz, context->bw_ctx.bw.dcn.clk.dppclk_khz, context->bw_ctx.bw.dcn.clk.dcfclk_khz, context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz, context->bw_ctx.bw.dcn.clk.fclk_khz, context->bw_ctx.bw.dcn.clk.socclk_khz); } /** * dc_status_to_str - convert dc_status to a human readable string * @status: dc_status to be converted * * Return: * A string describing the DC status. */ char *dc_status_to_str(enum dc_status status) { switch (status) { case DC_OK: return "DC OK"; case DC_NO_CONTROLLER_RESOURCE: return "No controller resource"; case DC_NO_STREAM_ENC_RESOURCE: return "No stream encoder"; case DC_NO_CLOCK_SOURCE_RESOURCE: return "No clock source"; case DC_FAIL_CONTROLLER_VALIDATE: return "Controller validation failure"; case DC_FAIL_ENC_VALIDATE: return "Encoder validation failure"; case DC_FAIL_ATTACH_SURFACES: return "Surfaces attachment failure"; case DC_FAIL_DETACH_SURFACES: return "Surfaces detachment failure"; case DC_FAIL_SURFACE_VALIDATE: return "Surface validation failure"; case DC_NO_DP_LINK_BANDWIDTH: return "No DP link bandwidth"; case DC_EXCEED_DONGLE_CAP: return "Exceed dongle capability"; case DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED: return "Unsupported pixel format"; case DC_FAIL_BANDWIDTH_VALIDATE: return "Bandwidth validation failure (BW and Watermark)"; case DC_FAIL_SCALING: return "Scaling failure"; case DC_FAIL_DP_LINK_TRAINING: return "DP link training failure"; case DC_FAIL_DSC_VALIDATE: return "DSC validation failure"; case DC_NO_DSC_RESOURCE: return "No DSC resource"; case DC_FAIL_UNSUPPORTED_1: return "Unsupported"; case DC_FAIL_CLK_EXCEED_MAX: return "Clk exceed max failure"; case DC_FAIL_CLK_BELOW_MIN: return "Fail clk below minimum"; case DC_FAIL_CLK_BELOW_CFG_REQUIRED: return "Fail clk below required CFG (hard_min in PPLIB)"; case DC_NOT_SUPPORTED: return "The operation is not supported."; case DC_UNSUPPORTED_VALUE: return "The value specified is not supported."; case DC_NO_LINK_ENC_RESOURCE: return "No link encoder resource"; case DC_FAIL_DP_PAYLOAD_ALLOCATION: return "Fail dp payload allocation"; case DC_FAIL_DP_LINK_BANDWIDTH: return "Insufficient DP link bandwidth"; case DC_ERROR_UNEXPECTED: return "Unexpected error"; } return "Unexpected status error"; }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_debug.c
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "vm_helper.h" #include "dc.h" void vm_helper_mark_vmid_used(struct vm_helper *vm_helper, unsigned int pos, uint8_t hubp_idx) { struct vmid_usage vmids = vm_helper->hubp_vmid_usage[hubp_idx]; vmids.vmid_usage[0] = vmids.vmid_usage[1]; vmids.vmid_usage[1] = 1 << pos; } int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config) { int num_vmids = 0; /* Call HWSS to setup HUBBUB for address config */ if (dc->hwss.init_sys_ctx) { num_vmids = dc->hwss.init_sys_ctx(dc->hwseq, dc, pa_config); /* Pre-init system aperture start/end for all HUBP instances (if not gating?) * or cache system aperture if using power gating */ memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config)); dc->vm_pa_config.valid = true; dc_z10_save_init(dc); } return num_vmids; } void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid) { dc->hwss.init_vm_ctx(dc->hwseq, dc, va_config, vmid); } int dc_get_vmid_use_vector(struct dc *dc) { int i; int in_use = 0; for (i = 0; i < MAX_HUBP; i++) in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0] | dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1]; return in_use; } void vm_helper_init(struct vm_helper *vm_helper, unsigned int num_vmid) { vm_helper->num_vmid = num_vmid; memset(vm_helper->hubp_vmid_usage, 0, sizeof(vm_helper->hubp_vmid_usage[0]) * MAX_HUBP); }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD */ #include "dc/dc_stat.h" #include "dmub/dmub_srv_stat.h" #include "dc_dmub_srv.h" /** * DOC: DC STAT Interface * * These interfaces are called without acquiring DAL and DC locks. * Hence, there is limitations on whese interfaces can access. Only * variables exclusively defined for these interfaces can be modified. */ /** * dc_stat_get_dmub_notification * * Calls dmub layer to retrieve dmub notification * * @dc: dc structure * @notify: dmub notification structure * * Returns * None */ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification *notify) { /** * This function is called without dal and dc locks, so * we shall not modify any dc, dc_dmub_srv or dmub variables * except variables exclusively accessed by this function */ struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; enum dmub_status status; status = dmub_srv_stat_get_notification(dmub, notify); ASSERT(status == DMUB_STATUS_OK); /* For HPD/HPD RX, convert dpia port index into link index */ if (notify->type == DMUB_NOTIFICATION_HPD || notify->type == DMUB_NOTIFICATION_HPD_IRQ || notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { notify->link_index = get_link_index_from_dpia_port_index(dc, notify->link_index); } } /** * dc_stat_get_dmub_dataout * * Calls dmub layer to retrieve dmub gpint dataout * * @dc: dc structure * @dataout: dmub gpint dataout * * Returns * None */ void dc_stat_get_dmub_dataout(const struct dc *dc, uint32_t *dataout) { struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; enum dmub_status status; status = dmub_srv_get_gpint_dataout(dmub, dataout); ASSERT(status == DMUB_STATUS_OK); }
linux-master
drivers/gpu/drm/amd/display/dc/core/dc_stat.c
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce_i2c.h" #include "dce_i2c_sw.h" #include "include/gpio_service_interface.h" #define SCL false #define SDA true void dce_i2c_sw_construct( struct dce_i2c_sw *dce_i2c_sw, struct dc_context *ctx) { dce_i2c_sw->ctx = ctx; } static inline bool read_bit_from_ddc( struct ddc *ddc, bool data_nor_clock) { uint32_t value = 0; if (data_nor_clock) dal_gpio_get_value(ddc->pin_data, &value); else dal_gpio_get_value(ddc->pin_clock, &value); return (value != 0); } static inline void write_bit_to_ddc( struct ddc *ddc, bool data_nor_clock, bool bit) { uint32_t value = bit ? 1 : 0; if (data_nor_clock) dal_gpio_set_value(ddc->pin_data, value); else dal_gpio_set_value(ddc->pin_clock, value); } static void release_engine_dce_sw( struct resource_pool *pool, struct dce_i2c_sw *dce_i2c_sw) { dal_ddc_close(dce_i2c_sw->ddc); dce_i2c_sw->ddc = NULL; } static bool wait_for_scl_high_sw( struct dc_context *ctx, struct ddc *ddc, uint16_t clock_delay_div_4) { uint32_t scl_retry = 0; uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4; udelay(clock_delay_div_4); do { if (read_bit_from_ddc(ddc, SCL)) return true; udelay(clock_delay_div_4); ++scl_retry; } while (scl_retry <= scl_retry_max); return false; } static bool write_byte_sw( struct dc_context *ctx, struct ddc *ddc_handle, uint16_t clock_delay_div_4, uint8_t byte) { int32_t shift = 7; bool ack; /* bits are transmitted serially, starting from MSB */ do { udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1); udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SCL, true); if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) return false; write_bit_to_ddc(ddc_handle, SCL, false); --shift; } while (shift >= 0); /* The display sends ACK by preventing the SDA from going high * after the SCL pulse we use to send our last data bit. * If the SDA goes high after that bit, it's a NACK */ udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SDA, true); udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SCL, true); if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) return false; /* read ACK bit */ ack = !read_bit_from_ddc(ddc_handle, SDA); udelay(clock_delay_div_4 << 1); write_bit_to_ddc(ddc_handle, SCL, false); udelay(clock_delay_div_4 << 1); return ack; } static bool read_byte_sw( struct dc_context *ctx, struct ddc *ddc_handle, uint16_t clock_delay_div_4, uint8_t *byte, bool more) { int32_t shift = 7; uint8_t data = 0; /* The data bits are read from MSB to LSB; * bit is read while SCL is high */ do { write_bit_to_ddc(ddc_handle, SCL, true); if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) return false; if (read_bit_from_ddc(ddc_handle, SDA)) data |= (1 << shift); write_bit_to_ddc(ddc_handle, SCL, false); udelay(clock_delay_div_4 << 1); --shift; } while (shift >= 0); /* read only whole byte */ *byte = data; udelay(clock_delay_div_4); /* send the acknowledge bit: * SDA low means ACK, SDA high means NACK */ write_bit_to_ddc(ddc_handle, SDA, !more); udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SCL, true); if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) return false; write_bit_to_ddc(ddc_handle, SCL, false); udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SDA, true); udelay(clock_delay_div_4); return true; } static bool stop_sync_sw( struct dc_context *ctx, struct ddc *ddc_handle, uint16_t clock_delay_div_4) { uint32_t retry = 0; /* The I2C communications stop signal is: * the SDA going high from low, while the SCL is high. */ write_bit_to_ddc(ddc_handle, SCL, false); udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SDA, false); udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SCL, true); if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) return false; write_bit_to_ddc(ddc_handle, SDA, true); do { udelay(clock_delay_div_4); if (read_bit_from_ddc(ddc_handle, SDA)) return true; ++retry; } while (retry <= 2); return false; } static bool i2c_write_sw( struct dc_context *ctx, struct ddc *ddc_handle, uint16_t clock_delay_div_4, uint8_t address, uint32_t length, const uint8_t *data) { uint32_t i = 0; if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, address)) return false; while (i < length) { if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, data[i])) return false; ++i; } return true; } static bool i2c_read_sw( struct dc_context *ctx, struct ddc *ddc_handle, uint16_t clock_delay_div_4, uint8_t address, uint32_t length, uint8_t *data) { uint32_t i = 0; if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, address)) return false; while (i < length) { if (!read_byte_sw(ctx, ddc_handle, clock_delay_div_4, data + i, i < length - 1)) return false; ++i; } return true; } static bool start_sync_sw( struct dc_context *ctx, struct ddc *ddc_handle, uint16_t clock_delay_div_4) { uint32_t retry = 0; /* The I2C communications start signal is: * the SDA going low from high, while the SCL is high. */ write_bit_to_ddc(ddc_handle, SCL, true); udelay(clock_delay_div_4); do { write_bit_to_ddc(ddc_handle, SDA, true); if (!read_bit_from_ddc(ddc_handle, SDA)) { ++retry; continue; } udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SCL, true); if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) break; write_bit_to_ddc(ddc_handle, SDA, false); udelay(clock_delay_div_4); write_bit_to_ddc(ddc_handle, SCL, false); udelay(clock_delay_div_4); return true; } while (retry <= I2C_SW_RETRIES); return false; } static void dce_i2c_sw_engine_set_speed( struct dce_i2c_sw *engine, uint32_t speed) { ASSERT(speed); engine->speed = speed ? speed : DCE_I2C_DEFAULT_I2C_SW_SPEED; engine->clock_delay = 1000 / engine->speed; if (engine->clock_delay < 12) engine->clock_delay = 12; } static bool dce_i2c_sw_engine_acquire_engine( struct dce_i2c_sw *engine, struct ddc *ddc) { enum gpio_result result; result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C); if (result != GPIO_RESULT_OK) return false; engine->ddc = ddc; return true; } bool dce_i2c_engine_acquire_sw( struct dce_i2c_sw *dce_i2c_sw, struct ddc *ddc_handle) { uint32_t counter = 0; bool result; do { result = dce_i2c_sw_engine_acquire_engine( dce_i2c_sw, ddc_handle); if (result) break; /* i2c_engine is busy by VBios, lets wait and retry */ udelay(10); ++counter; } while (counter < 2); return result; } static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine, struct i2c_request_transaction_data *req) { struct ddc *ddc = engine->ddc; uint16_t clock_delay_div_4 = engine->clock_delay >> 2; /* send sync (start / repeated start) */ bool result = start_sync_sw(engine->ctx, ddc, clock_delay_div_4); /* process payload */ if (result) { switch (req->action) { case DCE_I2C_TRANSACTION_ACTION_I2C_WRITE: case DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT: result = i2c_write_sw(engine->ctx, ddc, clock_delay_div_4, req->address, req->length, req->data); break; case DCE_I2C_TRANSACTION_ACTION_I2C_READ: case DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT: result = i2c_read_sw(engine->ctx, ddc, clock_delay_div_4, req->address, req->length, req->data); break; default: result = false; break; } } /* send stop if not 'mot' or operation failed */ if (!result || (req->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || (req->action == DCE_I2C_TRANSACTION_ACTION_I2C_READ)) if (!stop_sync_sw(engine->ctx, ddc, clock_delay_div_4)) result = false; req->status = result ? I2C_CHANNEL_OPERATION_SUCCEEDED : I2C_CHANNEL_OPERATION_FAILED; } static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine, struct i2c_payload *payload, bool middle_of_transaction) { struct i2c_request_transaction_data request; if (!payload->write) request.action = middle_of_transaction ? DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_READ; else request.action = middle_of_transaction ? DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_WRITE; request.address = (uint8_t) ((payload->address << 1) | !payload->write); request.length = payload->length; request.data = payload->data; dce_i2c_sw_engine_submit_channel_request(engine, &request); if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) || (request.status == I2C_CHANNEL_OPERATION_FAILED)) return false; return true; } bool dce_i2c_submit_command_sw( struct resource_pool *pool, struct ddc *ddc, struct i2c_command *cmd, struct dce_i2c_sw *dce_i2c_sw) { uint8_t index_of_payload = 0; bool result; dce_i2c_sw_engine_set_speed(dce_i2c_sw, cmd->speed); result = true; while (index_of_payload < cmd->number_of_payloads) { bool mot = (index_of_payload != cmd->number_of_payloads - 1); struct i2c_payload *payload = cmd->payloads + index_of_payload; if (!dce_i2c_sw_engine_submit_payload( dce_i2c_sw, payload, mot)) { result = false; break; } ++index_of_payload; } release_engine_dce_sw(pool, dce_i2c_sw); return result; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "basics/conversion.h" #include "dce_opp.h" #include "reg_helper.h" #define REG(reg)\ (opp110->regs->reg) #undef FN #define FN(reg_name, field_name) \ opp110->opp_shift->field_name, opp110->opp_mask->field_name #define CTX \ opp110->base.ctx enum { MAX_PWL_ENTRY = 128, MAX_REGIONS_NUMBER = 16 }; enum { MAX_LUT_ENTRY = 256, MAX_NUMBER_OF_ENTRIES = 256 }; enum { OUTPUT_CSC_MATRIX_SIZE = 12 }; /* ***************************************************************************** * Function: regamma_config_regions_and_segments * * build regamma curve by using predefined hw points * uses interface parameters ,like EDID coeff. * * @param : parameters interface parameters * @return void * * @note * * @see * ***************************************************************************** */ /* * set_truncation * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp * 2) enable truncation * 3) HW remove 12bit FMT support for DCE11 power saving reason. */ static void set_truncation( struct dce110_opp *opp110, const struct bit_depth_reduction_params *params) { /*Disable truncation*/ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 0, FMT_TRUNCATE_DEPTH, 0, FMT_TRUNCATE_MODE, 0); if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) { /* 8bpc trunc on YCbCr422*/ if (params->flags.TRUNCATE_DEPTH == 1) REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1, FMT_TRUNCATE_DEPTH, 1, FMT_TRUNCATE_MODE, 0); else if (params->flags.TRUNCATE_DEPTH == 2) /* 10bpc trunc on YCbCr422*/ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1, FMT_TRUNCATE_DEPTH, 2, FMT_TRUNCATE_MODE, 0); return; } /* on other format-to do */ if (params->flags.TRUNCATE_ENABLED == 0) return; /*Set truncation depth and Enable truncation*/ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1, FMT_TRUNCATE_DEPTH, params->flags.TRUNCATE_DEPTH, FMT_TRUNCATE_MODE, params->flags.TRUNCATE_MODE); } #if defined(CONFIG_DRM_AMD_DC_SI) /* * dce60_set_truncation * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp * 2) enable truncation * 3) HW remove 12bit FMT support for DCE11 power saving reason. */ static void dce60_set_truncation( struct dce110_opp *opp110, const struct bit_depth_reduction_params *params) { /* DCE6 has no FMT_TRUNCATE_MODE bit in FMT_BIT_DEPTH_CONTROL reg */ /*Disable truncation*/ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 0, FMT_TRUNCATE_DEPTH, 0); if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) { /* 8bpc trunc on YCbCr422*/ if (params->flags.TRUNCATE_DEPTH == 1) REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1, FMT_TRUNCATE_DEPTH, 1); else if (params->flags.TRUNCATE_DEPTH == 2) /* 10bpc trunc on YCbCr422*/ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1, FMT_TRUNCATE_DEPTH, 2); return; } /* on other format-to do */ if (params->flags.TRUNCATE_ENABLED == 0) return; /*Set truncation depth and Enable truncation*/ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1, FMT_TRUNCATE_DEPTH, params->flags.TRUNCATE_DEPTH); } #endif /* * set_spatial_dither * 1) set spatial dithering mode: pattern of seed * 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp * 3) set random seed * 4) set random mode * lfsr is reset every frame or not reset * RGB dithering method * 0: RGB data are all dithered with x^28+x^3+1 * 1: R data is dithered with x^28+x^3+1 * G data is dithered with x^28+X^9+1 * B data is dithered with x^28+x^13+1 * enable high pass filter or not * 5) enable spatical dithering */ static void set_spatial_dither( struct dce110_opp *opp110, const struct bit_depth_reduction_params *params) { /*Disable spatial (random) dithering*/ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 0, FMT_SPATIAL_DITHER_DEPTH, 0, FMT_SPATIAL_DITHER_MODE, 0); REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 0, FMT_FRAME_RANDOM_ENABLE, 0, FMT_RGB_RANDOM_ENABLE, 0); REG_UPDATE(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, 0); if (params->flags.SPATIAL_DITHER_ENABLED == 0) return; /* only use FRAME_COUNTER_MAX if frameRandom == 1*/ if (opp110->opp_mask->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX && opp110->opp_mask->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP) { if (params->flags.FRAME_RANDOM == 1) { if (params->flags.SPATIAL_DITHER_DEPTH == 0 || params->flags.SPATIAL_DITHER_DEPTH == 1) { REG_UPDATE_2(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2); } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) { REG_UPDATE_2(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1); } else return; } else { REG_UPDATE_2(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0); } } /* Set seed for random values for * spatial dithering for R,G,B channels */ REG_UPDATE(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, params->r_seed_value); REG_UPDATE(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, params->g_seed_value); REG_UPDATE(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, params->b_seed_value); /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero * offset for the R/Cr channel, lower 4LSB * is forced to zeros. Typically set to 0 * RGB and 0x80000 YCbCr. */ /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero * offset for the G/Y channel, lower 4LSB is * forced to zeros. Typically set to 0 RGB * and 0x80000 YCbCr. */ /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero * offset for the B/Cb channel, lower 4LSB is * forced to zeros. Typically set to 0 RGB and * 0x80000 YCbCr. */ /* Disable High pass filter * Reset only at startup * Set RGB data dithered with x^28+x^3+1 */ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM, FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM, FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM); /* Set spatial dithering bit depth * Set spatial dithering mode * (default is Seed patterrn AAAA...) * Enable spatial dithering */ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH, FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE, FMT_SPATIAL_DITHER_EN, 1); } /* * SetTemporalDither (Frame Modulation) * 1) set temporal dither depth * 2) select pattern: from hard-coded pattern or programmable pattern * 3) select optimized strips for BGR or RGB LCD sub-pixel * 4) set s matrix * 5) set t matrix * 6) set grey level for 0.25, 0.5, 0.75 * 7) enable temporal dithering */ static void set_temporal_dither( struct dce110_opp *opp110, const struct bit_depth_reduction_params *params) { /*Disable temporal (frame modulation) dithering first*/ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, 0, FMT_TEMPORAL_DITHER_RESET, 0, FMT_TEMPORAL_DITHER_OFFSET, 0); REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, 0, FMT_TEMPORAL_LEVEL, 0); REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, 0, FMT_50FRC_SEL, 0, FMT_75FRC_SEL, 0); /* no 10bpc dither on DCE11*/ if (params->flags.FRAME_MODULATION_ENABLED == 0 || params->flags.FRAME_MODULATION_DEPTH == 2) return; /* Set temporal dithering depth*/ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, params->flags.FRAME_MODULATION_DEPTH, FMT_TEMPORAL_DITHER_RESET, 0, FMT_TEMPORAL_DITHER_OFFSET, 0); /*Select legacy pattern based on FRC and Temporal level*/ if (REG(FMT_TEMPORAL_DITHER_PATTERN_CONTROL)) { REG_WRITE(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, 0); /*Set s matrix*/ REG_WRITE(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, 0); /*Set t matrix*/ REG_WRITE(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, 0); } /*Select patterns for 0.25, 0.5 and 0.75 grey level*/ REG_UPDATE(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, params->flags.TEMPORAL_LEVEL); REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, params->flags.FRC25, FMT_50FRC_SEL, params->flags.FRC50, FMT_75FRC_SEL, params->flags.FRC75); /*Enable bit reduction by temporal (frame modulation) dithering*/ REG_UPDATE(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, 1); } /* * Set Clamping * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping) * 1 for 8 bpc * 2 for 10 bpc * 3 for 12 bpc * 7 for programable * 2) Enable clamp if Limited range requested */ void dce110_opp_set_clamping( struct dce110_opp *opp110, const struct clamping_and_pixel_encoding_params *params) { REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 0, FMT_CLAMP_COLOR_FORMAT, 0); switch (params->clamping_level) { case CLAMPING_FULL_RANGE: break; case CLAMPING_LIMITED_RANGE_8BPC: REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 1, FMT_CLAMP_COLOR_FORMAT, 1); break; case CLAMPING_LIMITED_RANGE_10BPC: REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 1, FMT_CLAMP_COLOR_FORMAT, 2); break; case CLAMPING_LIMITED_RANGE_12BPC: REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 1, FMT_CLAMP_COLOR_FORMAT, 3); break; case CLAMPING_LIMITED_RANGE_PROGRAMMABLE: /*Set clamp control*/ REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 1, FMT_CLAMP_COLOR_FORMAT, 7); /*set the defaults*/ REG_SET_2(FMT_CLAMP_COMPONENT_R, 0, FMT_CLAMP_LOWER_R, 0x10, FMT_CLAMP_UPPER_R, 0xFEF); REG_SET_2(FMT_CLAMP_COMPONENT_G, 0, FMT_CLAMP_LOWER_G, 0x10, FMT_CLAMP_UPPER_G, 0xFEF); REG_SET_2(FMT_CLAMP_COMPONENT_B, 0, FMT_CLAMP_LOWER_B, 0x10, FMT_CLAMP_UPPER_B, 0xFEF); break; default: break; } } #if defined(CONFIG_DRM_AMD_DC_SI) /* * Set Clamping for DCE6 parts * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping) * 1 for 8 bpc * 2 for 10 bpc * 3 for 12 bpc * 7 for programable * 2) Enable clamp if Limited range requested */ static void dce60_opp_set_clamping( struct dce110_opp *opp110, const struct clamping_and_pixel_encoding_params *params) { REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 0, FMT_CLAMP_COLOR_FORMAT, 0); switch (params->clamping_level) { case CLAMPING_FULL_RANGE: break; case CLAMPING_LIMITED_RANGE_8BPC: REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 1, FMT_CLAMP_COLOR_FORMAT, 1); break; case CLAMPING_LIMITED_RANGE_10BPC: REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 1, FMT_CLAMP_COLOR_FORMAT, 2); break; case CLAMPING_LIMITED_RANGE_12BPC: REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 1, FMT_CLAMP_COLOR_FORMAT, 3); break; case CLAMPING_LIMITED_RANGE_PROGRAMMABLE: /*Set clamp control*/ REG_SET_2(FMT_CLAMP_CNTL, 0, FMT_CLAMP_DATA_EN, 1, FMT_CLAMP_COLOR_FORMAT, 7); /* DCE6 does have FMT_CLAMP_COMPONENT_{R,G,B} registers */ break; default: break; } } #endif /* * set_pixel_encoding * * Set Pixel Encoding * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly * 1: YCbCr 4:2:2 */ static void set_pixel_encoding( struct dce110_opp *opp110, const struct clamping_and_pixel_encoding_params *params) { if (opp110->opp_mask->FMT_CBCR_BIT_REDUCTION_BYPASS) REG_UPDATE_3(FMT_CONTROL, FMT_PIXEL_ENCODING, 0, FMT_SUBSAMPLING_MODE, 0, FMT_CBCR_BIT_REDUCTION_BYPASS, 0); else REG_UPDATE_2(FMT_CONTROL, FMT_PIXEL_ENCODING, 0, FMT_SUBSAMPLING_MODE, 0); if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) { REG_UPDATE_2(FMT_CONTROL, FMT_PIXEL_ENCODING, 1, FMT_SUBSAMPLING_ORDER, 0); } if (params->pixel_encoding == PIXEL_ENCODING_YCBCR420) { REG_UPDATE_3(FMT_CONTROL, FMT_PIXEL_ENCODING, 2, FMT_SUBSAMPLING_MODE, 2, FMT_CBCR_BIT_REDUCTION_BYPASS, 1); } } #if defined(CONFIG_DRM_AMD_DC_SI) /* * dce60_set_pixel_encoding * DCE6 has no FMT_SUBSAMPLING_{MODE,ORDER} bits in FMT_CONTROL reg * Set Pixel Encoding * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly * 1: YCbCr 4:2:2 */ static void dce60_set_pixel_encoding( struct dce110_opp *opp110, const struct clamping_and_pixel_encoding_params *params) { if (opp110->opp_mask->FMT_CBCR_BIT_REDUCTION_BYPASS) REG_UPDATE_2(FMT_CONTROL, FMT_PIXEL_ENCODING, 0, FMT_CBCR_BIT_REDUCTION_BYPASS, 0); else REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) { REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1); } if (params->pixel_encoding == PIXEL_ENCODING_YCBCR420) { REG_UPDATE_2(FMT_CONTROL, FMT_PIXEL_ENCODING, 2, FMT_CBCR_BIT_REDUCTION_BYPASS, 1); } } #endif void dce110_opp_program_bit_depth_reduction( struct output_pixel_processor *opp, const struct bit_depth_reduction_params *params) { struct dce110_opp *opp110 = TO_DCE110_OPP(opp); set_truncation(opp110, params); set_spatial_dither(opp110, params); set_temporal_dither(opp110, params); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_opp_program_bit_depth_reduction( struct output_pixel_processor *opp, const struct bit_depth_reduction_params *params) { struct dce110_opp *opp110 = TO_DCE110_OPP(opp); dce60_set_truncation(opp110, params); set_spatial_dither(opp110, params); set_temporal_dither(opp110, params); } #endif void dce110_opp_program_clamping_and_pixel_encoding( struct output_pixel_processor *opp, const struct clamping_and_pixel_encoding_params *params) { struct dce110_opp *opp110 = TO_DCE110_OPP(opp); dce110_opp_set_clamping(opp110, params); set_pixel_encoding(opp110, params); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_opp_program_clamping_and_pixel_encoding( struct output_pixel_processor *opp, const struct clamping_and_pixel_encoding_params *params) { struct dce110_opp *opp110 = TO_DCE110_OPP(opp); dce60_opp_set_clamping(opp110, params); dce60_set_pixel_encoding(opp110, params); } #endif static void program_formatter_420_memory(struct output_pixel_processor *opp) { struct dce110_opp *opp110 = TO_DCE110_OPP(opp); uint32_t fmt_mem_cntl_value; /* Program source select*/ /* Use HW default source select for FMT_MEMORYx_CONTROL */ /* Use that value for FMT_SRC_SELECT as well*/ REG_GET(CONTROL, FMT420_MEM0_SOURCE_SEL, &fmt_mem_cntl_value); REG_UPDATE(FMT_CONTROL, FMT_SRC_SELECT, fmt_mem_cntl_value); /* Turn on the memory */ REG_UPDATE(CONTROL, FMT420_MEM0_PWR_FORCE, 0); } void dce110_opp_set_dyn_expansion( struct output_pixel_processor *opp, enum dc_color_space color_sp, enum dc_color_depth color_dpth, enum signal_type signal) { struct dce110_opp *opp110 = TO_DCE110_OPP(opp); REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, 0, FMT_DYNAMIC_EXP_MODE, 0); /*00 - 10-bit -> 12-bit dynamic expansion*/ /*01 - 8-bit -> 12-bit dynamic expansion*/ if (signal == SIGNAL_TYPE_HDMI_TYPE_A || signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { switch (color_dpth) { case COLOR_DEPTH_888: REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, 1, FMT_DYNAMIC_EXP_MODE, 1); break; case COLOR_DEPTH_101010: REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, 1, FMT_DYNAMIC_EXP_MODE, 0); break; case COLOR_DEPTH_121212: REG_UPDATE_2( FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/ FMT_DYNAMIC_EXP_MODE, 0); break; default: break; } } } static void program_formatter_reset_dig_resync_fifo(struct output_pixel_processor *opp) { struct dce110_opp *opp110 = TO_DCE110_OPP(opp); /* clear previous phase lock status*/ REG_UPDATE(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED_CLEAR, 1); /* poll until FMT_420_PIXEL_PHASE_LOCKED become 1*/ REG_WAIT(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, 1, 10, 10); } void dce110_opp_program_fmt( struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth, struct clamping_and_pixel_encoding_params *clamping) { /* dithering is affected by <CrtcSourceSelect>, hence should be * programmed afterwards */ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420) program_formatter_420_memory(opp); dce110_opp_program_bit_depth_reduction( opp, fmt_bit_depth); dce110_opp_program_clamping_and_pixel_encoding( opp, clamping); if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420) program_formatter_reset_dig_resync_fifo(opp); return; } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_opp_program_fmt( struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth, struct clamping_and_pixel_encoding_params *clamping) { /* dithering is affected by <CrtcSourceSelect>, hence should be * programmed afterwards */ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420) program_formatter_420_memory(opp); dce60_opp_program_bit_depth_reduction( opp, fmt_bit_depth); dce60_opp_program_clamping_and_pixel_encoding( opp, clamping); if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420) program_formatter_reset_dig_resync_fifo(opp); return; } #endif /*****************************************/ /* Constructor, Destructor */ /*****************************************/ static const struct opp_funcs funcs = { .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion, .opp_destroy = dce110_opp_destroy, .opp_program_fmt = dce110_opp_program_fmt, .opp_program_bit_depth_reduction = dce110_opp_program_bit_depth_reduction }; #if defined(CONFIG_DRM_AMD_DC_SI) static const struct opp_funcs dce60_opp_funcs = { .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion, .opp_destroy = dce110_opp_destroy, .opp_program_fmt = dce60_opp_program_fmt, .opp_program_bit_depth_reduction = dce60_opp_program_bit_depth_reduction }; #endif void dce110_opp_construct(struct dce110_opp *opp110, struct dc_context *ctx, uint32_t inst, const struct dce_opp_registers *regs, const struct dce_opp_shift *opp_shift, const struct dce_opp_mask *opp_mask) { opp110->base.funcs = &funcs; opp110->base.ctx = ctx; opp110->base.inst = inst; opp110->regs = regs; opp110->opp_shift = opp_shift; opp110->opp_mask = opp_mask; } #if defined(CONFIG_DRM_AMD_DC_SI) void dce60_opp_construct(struct dce110_opp *opp110, struct dc_context *ctx, uint32_t inst, const struct dce_opp_registers *regs, const struct dce_opp_shift *opp_shift, const struct dce_opp_mask *opp_mask) { opp110->base.funcs = &dce60_opp_funcs; opp110->base.ctx = ctx; opp110->base.inst = inst; opp110->regs = regs; opp110->opp_shift = opp_shift; opp110->opp_mask = opp_mask; } #endif void dce110_opp_destroy(struct output_pixel_processor **opp) { if (*opp) kfree(FROM_DCE11_OPP(*opp)); *opp = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "dce_audio.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #define DCE_AUD(audio)\ container_of(audio, struct dce_audio, base) #define CTX \ aud->base.ctx #define DC_LOGGER_INIT() #define REG(reg)\ (aud->regs->reg) #undef FN #define FN(reg_name, field_name) \ aud->shifts->field_name, aud->masks->field_name #define IX_REG(reg)\ ix ## reg #define AZ_REG_READ(reg_name) \ read_indirect_azalia_reg(audio, IX_REG(reg_name)) #define AZ_REG_WRITE(reg_name, value) \ write_indirect_azalia_reg(audio, IX_REG(reg_name), value) static void write_indirect_azalia_reg(struct audio *audio, uint32_t reg_index, uint32_t reg_data) { struct dce_audio *aud = DCE_AUD(audio); /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */ REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0, AZALIA_ENDPOINT_REG_INDEX, reg_index); /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0, AZALIA_ENDPOINT_REG_DATA, reg_data); } static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index) { struct dce_audio *aud = DCE_AUD(audio); uint32_t value = 0; /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */ REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0, AZALIA_ENDPOINT_REG_INDEX, reg_index); /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA); return value; } static bool is_audio_format_supported( const struct audio_info *audio_info, enum audio_format_code audio_format_code, uint32_t *format_index) { uint32_t index; uint32_t max_channe_index = 0; bool found = false; if (audio_info == NULL) return found; /* pass through whole array */ for (index = 0; index < audio_info->mode_count; index++) { if (audio_info->modes[index].format_code == audio_format_code) { if (found) { /* format has multiply entries, choose one with * highst number of channels */ if (audio_info->modes[index].channel_count > audio_info->modes[max_channe_index].channel_count) { max_channe_index = index; } } else { /* format found, save it's index */ found = true; max_channe_index = index; } } } /* return index */ if (found && format_index != NULL) *format_index = max_channe_index; return found; } /*For HDMI, calculate if specified sample rates can fit into a given timing */ static void check_audio_bandwidth_hdmi( const struct audio_crtc_info *crtc_info, uint32_t channel_count, union audio_sample_rates *sample_rates) { uint32_t samples; uint32_t h_blank; bool limit_freq_to_48_khz = false; bool limit_freq_to_88_2_khz = false; bool limit_freq_to_96_khz = false; bool limit_freq_to_174_4_khz = false; if (!crtc_info) return; /* For two channels supported return whatever sink support,unmodified*/ if (channel_count > 2) { /* Based on HDMI spec 1.3 Table 7.5 */ if ((crtc_info->requested_pixel_clock_100Hz <= 270000) && (crtc_info->v_active <= 576) && !(crtc_info->interlaced) && !(crtc_info->pixel_repetition == 2 || crtc_info->pixel_repetition == 4)) { limit_freq_to_48_khz = true; } else if ((crtc_info->requested_pixel_clock_100Hz <= 270000) && (crtc_info->v_active <= 576) && (crtc_info->interlaced) && (crtc_info->pixel_repetition == 2)) { limit_freq_to_88_2_khz = true; } else if ((crtc_info->requested_pixel_clock_100Hz <= 540000) && (crtc_info->v_active <= 576) && !(crtc_info->interlaced)) { limit_freq_to_174_4_khz = true; } } /* Also do some calculation for the available Audio Bandwidth for the * 8 ch (i.e. for the Layout 1 => ch > 2) */ h_blank = crtc_info->h_total - crtc_info->h_active; if (crtc_info->pixel_repetition) h_blank *= crtc_info->pixel_repetition; /*based on HDMI spec 1.3 Table 7.5 */ h_blank -= 58; /*for Control Period */ h_blank -= 16; samples = h_blank * 10; /* Number of Audio Packets (multiplied by 10) per Line (for 8 ch number * of Audio samples per line multiplied by 10 - Layout 1) */ samples /= 32; samples *= crtc_info->v_active; /*Number of samples multiplied by 10, per second */ samples *= crtc_info->refresh_rate; /*Number of Audio samples per second */ samples /= 10; /* @todo do it after deep color is implemented * 8xx - deep color bandwidth scaling * Extra bandwidth is avaliable in deep color b/c link runs faster than * pixel rate. This has the effect of allowing more tmds characters to * be transmitted during blank */ switch (crtc_info->color_depth) { case COLOR_DEPTH_888: samples *= 4; break; case COLOR_DEPTH_101010: samples *= 5; break; case COLOR_DEPTH_121212: samples *= 6; break; default: samples *= 4; break; } samples /= 4; /*check limitation*/ if (samples < 88200) limit_freq_to_48_khz = true; else if (samples < 96000) limit_freq_to_88_2_khz = true; else if (samples < 176400) limit_freq_to_96_khz = true; else if (samples < 192000) limit_freq_to_174_4_khz = true; if (sample_rates != NULL) { /* limit frequencies */ if (limit_freq_to_174_4_khz) sample_rates->rate.RATE_192 = 0; if (limit_freq_to_96_khz) { sample_rates->rate.RATE_192 = 0; sample_rates->rate.RATE_176_4 = 0; } if (limit_freq_to_88_2_khz) { sample_rates->rate.RATE_192 = 0; sample_rates->rate.RATE_176_4 = 0; sample_rates->rate.RATE_96 = 0; } if (limit_freq_to_48_khz) { sample_rates->rate.RATE_192 = 0; sample_rates->rate.RATE_176_4 = 0; sample_rates->rate.RATE_96 = 0; sample_rates->rate.RATE_88_2 = 0; } } } /*For DP SST, calculate if specified sample rates can fit into a given timing */ static void check_audio_bandwidth_dpsst( const struct audio_crtc_info *crtc_info, uint32_t channel_count, union audio_sample_rates *sample_rates) { /* do nothing */ } /*For DP MST, calculate if specified sample rates can fit into a given timing */ static void check_audio_bandwidth_dpmst( const struct audio_crtc_info *crtc_info, uint32_t channel_count, union audio_sample_rates *sample_rates) { /* do nothing */ } static void check_audio_bandwidth( const struct audio_crtc_info *crtc_info, uint32_t channel_count, enum signal_type signal, union audio_sample_rates *sample_rates) { switch (signal) { case SIGNAL_TYPE_HDMI_TYPE_A: check_audio_bandwidth_hdmi( crtc_info, channel_count, sample_rates); break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: check_audio_bandwidth_dpsst( crtc_info, channel_count, sample_rates); break; case SIGNAL_TYPE_DISPLAY_PORT_MST: check_audio_bandwidth_dpmst( crtc_info, channel_count, sample_rates); break; default: break; } } /* expose/not expose HBR capability to Audio driver */ static void set_high_bit_rate_capable( struct audio *audio, bool capable) { uint32_t value = 0; /* set high bit rate audio capable*/ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR); set_reg_field_value(value, capable, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, HBR_CAPABLE); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value); } /* set video latency in ms/2+1 */ static void set_video_latency( struct audio *audio, int latency_in_ms) { uint32_t value = 0; if ((latency_in_ms < 0) || (latency_in_ms > 255)) return; value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC); set_reg_field_value(value, latency_in_ms, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, VIDEO_LIPSYNC); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, value); } /* set audio latency in ms/2+1 */ static void set_audio_latency( struct audio *audio, int latency_in_ms) { uint32_t value = 0; if (latency_in_ms < 0) latency_in_ms = 0; if (latency_in_ms > 255) latency_in_ms = 255; value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC); set_reg_field_value(value, latency_in_ms, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, AUDIO_LIPSYNC); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, value); } void dce_aud_az_enable(struct audio *audio) { uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); DC_LOGGER_INIT(); set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE); set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, AUDIO_ENABLED); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); DC_LOG_HW_AUDIO("\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n", audio->inst, value); } void dce_aud_az_disable(struct audio *audio) { uint32_t value; DC_LOGGER_INIT(); value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, AUDIO_ENABLED); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); DC_LOG_HW_AUDIO("\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n", audio->inst, value); } void dce_aud_az_configure( struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, const struct audio_info *audio_info) { struct dce_audio *aud = DCE_AUD(audio); uint32_t speakers = audio_info->flags.info.ALLSPEAKERS; uint32_t value; uint32_t field = 0; enum audio_format_code audio_format_code; uint32_t format_index; uint32_t index; bool is_ac3_supported = false; union audio_sample_rates sample_rate; uint32_t strlen = 0; value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); /* Speaker Allocation */ /* uint32_t value; uint32_t field = 0;*/ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); set_reg_field_value(value, speakers, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, SPEAKER_ALLOCATION); /* LFE_PLAYBACK_LEVEL = LFEPBL * LFEPBL = 0 : Unknown or refer to other information * LFEPBL = 1 : 0dB playback * LFEPBL = 2 : +10dB playback * LFE_BL = 3 : Reserved */ set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, LFE_PLAYBACK_LEVEL); /* todo: according to reg spec LFE_PLAYBACK_LEVEL is read only. * why are we writing to it? DCE8 does not write this */ set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, HDMI_CONNECTION); set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, DP_CONNECTION); field = get_reg_field_value(value, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, EXTRA_CONNECTION_INFO); field &= ~0x1; set_reg_field_value(value, field, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, EXTRA_CONNECTION_INFO); /* set audio for output signal */ switch (signal) { case SIGNAL_TYPE_HDMI_TYPE_A: set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, HDMI_CONNECTION); break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, DP_CONNECTION); break; default: BREAK_TO_DEBUGGER(); break; } AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, value); /* ACP Data - Supports AI */ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA); set_reg_field_value( value, audio_info->flags.info.SUPPORT_AI, AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA, SUPPORTS_AI); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA, value); /* Audio Descriptors */ /* pass through all formats */ for (format_index = 0; format_index < AUDIO_FORMAT_CODE_COUNT; format_index++) { audio_format_code = (AUDIO_FORMAT_CODE_FIRST + format_index); /* those are unsupported, skip programming */ if (audio_format_code == AUDIO_FORMAT_CODE_1BITAUDIO || audio_format_code == AUDIO_FORMAT_CODE_DST) continue; value = 0; /* check if supported */ if (is_audio_format_supported( audio_info, audio_format_code, &index)) { const struct audio_mode *audio_mode = &audio_info->modes[index]; union audio_sample_rates sample_rates = audio_mode->sample_rates; uint8_t byte2 = audio_mode->max_bit_rate; uint8_t channel_count = audio_mode->channel_count; /* adjust specific properties */ switch (audio_format_code) { case AUDIO_FORMAT_CODE_LINEARPCM: { check_audio_bandwidth( crtc_info, channel_count, signal, &sample_rates); byte2 = audio_mode->sample_size; set_reg_field_value(value, sample_rates.all, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, SUPPORTED_FREQUENCIES_STEREO); } break; case AUDIO_FORMAT_CODE_AC3: is_ac3_supported = true; break; case AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS: case AUDIO_FORMAT_CODE_DTS_HD: case AUDIO_FORMAT_CODE_MAT_MLP: case AUDIO_FORMAT_CODE_DST: case AUDIO_FORMAT_CODE_WMAPRO: byte2 = audio_mode->vendor_specific; break; default: break; } /* fill audio format data */ set_reg_field_value(value, channel_count - 1, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, MAX_CHANNELS); set_reg_field_value(value, sample_rates.all, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, SUPPORTED_FREQUENCIES); set_reg_field_value(value, byte2, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, DESCRIPTOR_BYTE_2); } /* if */ AZ_REG_WRITE( AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 + format_index, value); } /* for */ if (is_ac3_supported) /* todo: this reg global. why program global register? */ REG_WRITE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS, 0x05); /* check for 192khz/8-Ch support for HBR requirements */ sample_rate.all = 0; sample_rate.rate.RATE_192 = 1; check_audio_bandwidth( crtc_info, 8, signal, &sample_rate); set_high_bit_rate_capable(audio, sample_rate.rate.RATE_192); /* Audio and Video Lipsync */ set_video_latency(audio, audio_info->video_latency); set_audio_latency(audio, audio_info->audio_latency); value = 0; set_reg_field_value(value, audio_info->manufacture_id, AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0, MANUFACTURER_ID); set_reg_field_value(value, audio_info->product_id, AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0, PRODUCT_ID); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0, value); value = 0; /*get display name string length */ while (audio_info->display_name[strlen++] != '\0') { if (strlen >= MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS) break; } set_reg_field_value(value, strlen, AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1, SINK_DESCRIPTION_LEN); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1, value); DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n", audio->inst, value, audio_info->display_name); /* *write the port ID: *PORT_ID0 = display index *PORT_ID1 = 16bit BDF *(format MSB->LSB: 8bit Bus, 5bit Device, 3bit Function) */ value = 0; set_reg_field_value(value, audio_info->port_id[0], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2, PORT_ID0); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2, value); value = 0; set_reg_field_value(value, audio_info->port_id[1], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3, PORT_ID1); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3, value); /*write the 18 char monitor string */ value = 0; set_reg_field_value(value, audio_info->display_name[0], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, DESCRIPTION0); set_reg_field_value(value, audio_info->display_name[1], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, DESCRIPTION1); set_reg_field_value(value, audio_info->display_name[2], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, DESCRIPTION2); set_reg_field_value(value, audio_info->display_name[3], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, DESCRIPTION3); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, value); value = 0; set_reg_field_value(value, audio_info->display_name[4], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, DESCRIPTION4); set_reg_field_value(value, audio_info->display_name[5], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, DESCRIPTION5); set_reg_field_value(value, audio_info->display_name[6], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, DESCRIPTION6); set_reg_field_value(value, audio_info->display_name[7], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, DESCRIPTION7); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, value); value = 0; set_reg_field_value(value, audio_info->display_name[8], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, DESCRIPTION8); set_reg_field_value(value, audio_info->display_name[9], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, DESCRIPTION9); set_reg_field_value(value, audio_info->display_name[10], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, DESCRIPTION10); set_reg_field_value(value, audio_info->display_name[11], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, DESCRIPTION11); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, value); value = 0; set_reg_field_value(value, audio_info->display_name[12], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, DESCRIPTION12); set_reg_field_value(value, audio_info->display_name[13], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, DESCRIPTION13); set_reg_field_value(value, audio_info->display_name[14], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, DESCRIPTION14); set_reg_field_value(value, audio_info->display_name[15], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, DESCRIPTION15); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, value); value = 0; set_reg_field_value(value, audio_info->display_name[16], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, DESCRIPTION16); set_reg_field_value(value, audio_info->display_name[17], AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, DESCRIPTION17); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value); value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); } /* * todo: wall clk related functionality probably belong to clock_src. */ /* search pixel clock value for Azalia HDMI Audio */ static void get_azalia_clock_info_hdmi( uint32_t crtc_pixel_clock_100hz, uint32_t actual_pixel_clock_100Hz, struct azalia_clock_info *azalia_clock_info) { /* audio_dto_phase= 24 * 10,000; * 24MHz in [100Hz] units */ azalia_clock_info->audio_dto_phase = 24 * 10000; /* audio_dto_module = PCLKFrequency * 10,000; * [khz] -> [100Hz] */ azalia_clock_info->audio_dto_module = actual_pixel_clock_100Hz; } static void get_azalia_clock_info_dp( uint32_t requested_pixel_clock_100Hz, const struct audio_pll_info *pll_info, struct azalia_clock_info *azalia_clock_info) { /* Reported dpDtoSourceClockInkhz value for * DCE8 already adjusted for SS, do not need any * adjustment here anymore */ /*audio_dto_phase = 24 * 10,000; * 24MHz in [100Hz] units */ azalia_clock_info->audio_dto_phase = 24 * 10000; /*audio_dto_module = dpDtoSourceClockInkhz * 10,000; * [khz] ->[100Hz] */ azalia_clock_info->audio_dto_module = pll_info->dp_dto_source_clock_in_khz * 10; } void dce_aud_wall_dto_setup( struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, const struct audio_pll_info *pll_info) { struct dce_audio *aud = DCE_AUD(audio); struct azalia_clock_info clock_info = { 0 }; if (dc_is_hdmi_tmds_signal(signal)) { uint32_t src_sel; /*DTO0 Programming goal: -generate 24MHz, 128*Fs from 24MHz -use DTO0 when an active HDMI port is connected (optionally a DP is connected) */ /* calculate DTO settings */ get_azalia_clock_info_hdmi( crtc_info->requested_pixel_clock_100Hz, crtc_info->calculated_pixel_clock_100Hz, &clock_info); DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock_100Hz = %d"\ "calculated_pixel_clock_100Hz =%d\n"\ "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\ crtc_info->requested_pixel_clock_100Hz,\ crtc_info->calculated_pixel_clock_100Hz,\ clock_info.audio_dto_module,\ clock_info.audio_dto_phase); /* On TN/SI, Program DTO source select and DTO select before programming DTO modulo and DTO phase. These bits must be programmed first, otherwise there will be no HDMI audio at boot up. This is a HW sequence change (different from old ASICs). Caution when changing this programming sequence. HDMI enabled, using DTO0 program master CRTC for DTO0 */ src_sel = pll_info->dto_source - DTO_SOURCE_ID0; REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel, DCCG_AUDIO_DTO_SEL, 0); /* module */ REG_UPDATE(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module); /* phase */ REG_UPDATE(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase); } else { /*DTO1 Programming goal: -generate 24MHz, 512*Fs, 128*Fs from 24MHz -default is to used DTO1, and switch to DTO0 when an audio master HDMI port is connected -use as default for DP calculate DTO settings */ get_azalia_clock_info_dp( crtc_info->requested_pixel_clock_100Hz, pll_info, &clock_info); /* Program DTO select before programming DTO modulo and DTO phase. default to use DTO1 */ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 1); /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1) * Select 512fs for DP TODO: web register definition * does not match register header file * DCE11 version it's commented out while DCE8 it's set to 1 */ /* module */ REG_UPDATE(DCCG_AUDIO_DTO1_MODULE, DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module); /* phase */ REG_UPDATE(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase); REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1); } } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_aud_wall_dto_setup( struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, const struct audio_pll_info *pll_info) { struct dce_audio *aud = DCE_AUD(audio); struct azalia_clock_info clock_info = { 0 }; if (dc_is_hdmi_signal(signal)) { uint32_t src_sel; /*DTO0 Programming goal: -generate 24MHz, 128*Fs from 24MHz -use DTO0 when an active HDMI port is connected (optionally a DP is connected) */ /* calculate DTO settings */ get_azalia_clock_info_hdmi( crtc_info->requested_pixel_clock_100Hz, crtc_info->calculated_pixel_clock_100Hz, &clock_info); DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock_100Hz = %d"\ "calculated_pixel_clock_100Hz =%d\n"\ "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\ crtc_info->requested_pixel_clock_100Hz,\ crtc_info->calculated_pixel_clock_100Hz,\ clock_info.audio_dto_module,\ clock_info.audio_dto_phase); /* On TN/SI, Program DTO source select and DTO select before programming DTO modulo and DTO phase. These bits must be programmed first, otherwise there will be no HDMI audio at boot up. This is a HW sequence change (different from old ASICs). Caution when changing this programming sequence. HDMI enabled, using DTO0 program master CRTC for DTO0 */ src_sel = pll_info->dto_source - DTO_SOURCE_ID0; REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel, DCCG_AUDIO_DTO_SEL, 0); /* module */ REG_UPDATE(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module); /* phase */ REG_UPDATE(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase); } else { /*DTO1 Programming goal: -generate 24MHz, 128*Fs from 24MHz (DCE6 does not support 512*Fs) -default is to used DTO1, and switch to DTO0 when an audio master HDMI port is connected -use as default for DP calculate DTO settings */ get_azalia_clock_info_dp( crtc_info->requested_pixel_clock_100Hz, pll_info, &clock_info); /* Program DTO select before programming DTO modulo and DTO phase. default to use DTO1 */ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 1); /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1) * Cannot select 512fs for DP * * DCE6 has no DCCG_AUDIO_DTO2_USE_512FBR_DTO mask */ /* module */ REG_UPDATE(DCCG_AUDIO_DTO1_MODULE, DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module); /* phase */ REG_UPDATE(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase); /* DCE6 has no DCCG_AUDIO_DTO2_USE_512FBR_DTO mask in DCCG_AUDIO_DTO_SOURCE reg */ } } #endif static bool dce_aud_endpoint_valid(struct audio *audio) { uint32_t value; uint32_t port_connectivity; value = AZ_REG_READ( AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); port_connectivity = get_reg_field_value(value, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT, PORT_CONNECTIVITY); return !(port_connectivity == 1); } /* initialize HW state */ void dce_aud_hw_init( struct audio *audio) { uint32_t value; struct dce_audio *aud = DCE_AUD(audio); /* we only need to program the following registers once, so we only do it for the inst 0*/ if (audio->inst != 0) return; /* Suport R5 - 32khz * Suport R6 - 44.1khz * Suport R7 - 48khz */ /*disable clock gating before write to endpoint register*/ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); REG_UPDATE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, AUDIO_RATE_CAPABILITIES, 0x70); /*Keep alive bit to verify HW block in BU. */ REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, CLKSTOP, 1, EPSS, 1); set_reg_field_value(value, 0, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, CLOCK_GATING_DISABLE); AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); } static const struct audio_funcs funcs = { .endpoint_valid = dce_aud_endpoint_valid, .hw_init = dce_aud_hw_init, .wall_dto_setup = dce_aud_wall_dto_setup, .az_enable = dce_aud_az_enable, .az_disable = dce_aud_az_disable, .az_configure = dce_aud_az_configure, .destroy = dce_aud_destroy, }; #if defined(CONFIG_DRM_AMD_DC_SI) static const struct audio_funcs dce60_funcs = { .endpoint_valid = dce_aud_endpoint_valid, .hw_init = dce_aud_hw_init, .wall_dto_setup = dce60_aud_wall_dto_setup, .az_enable = dce_aud_az_enable, .az_disable = dce_aud_az_disable, .az_configure = dce_aud_az_configure, .destroy = dce_aud_destroy, }; #endif void dce_aud_destroy(struct audio **audio) { struct dce_audio *aud = DCE_AUD(*audio); kfree(aud); *audio = NULL; } struct audio *dce_audio_create( struct dc_context *ctx, unsigned int inst, const struct dce_audio_registers *reg, const struct dce_audio_shift *shifts, const struct dce_audio_mask *masks ) { struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL); if (audio == NULL) { ASSERT_CRITICAL(audio); return NULL; } audio->base.ctx = ctx; audio->base.inst = inst; audio->base.funcs = &funcs; audio->regs = reg; audio->shifts = shifts; audio->masks = masks; return &audio->base; } #if defined(CONFIG_DRM_AMD_DC_SI) struct audio *dce60_audio_create( struct dc_context *ctx, unsigned int inst, const struct dce_audio_registers *reg, const struct dce_audio_shift *shifts, const struct dce_audio_mask *masks ) { struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL); if (audio == NULL) { ASSERT_CRITICAL(audio); return NULL; } audio->base.ctx = ctx; audio->base.inst = inst; audio->base.funcs = &dce60_funcs; audio->regs = reg; audio->shifts = shifts; audio->masks = masks; return &audio->base; } #endif
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "dc_types.h" #include "core_types.h" #include "include/grph_object_id.h" #include "include/logger_interface.h" #include "dce_clock_source.h" #include "clk_mgr.h" #include "reg_helper.h" #define REG(reg)\ (clk_src->regs->reg) #define CTX \ clk_src->base.ctx #define DC_LOGGER_INIT() #undef FN #define FN(reg_name, field_name) \ clk_src->cs_shift->field_name, clk_src->cs_mask->field_name #define FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM 6 #define CALC_PLL_CLK_SRC_ERR_TOLERANCE 1 #define MAX_PLL_CALC_ERROR 0xFFFFFFFF #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) static const struct spread_spectrum_data *get_ss_data_entry( struct dce110_clk_src *clk_src, enum signal_type signal, uint32_t pix_clk_khz) { uint32_t entrys_num; uint32_t i; struct spread_spectrum_data *ss_parm = NULL; struct spread_spectrum_data *ret = NULL; switch (signal) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: ss_parm = clk_src->dvi_ss_params; entrys_num = clk_src->dvi_ss_params_cnt; break; case SIGNAL_TYPE_HDMI_TYPE_A: ss_parm = clk_src->hdmi_ss_params; entrys_num = clk_src->hdmi_ss_params_cnt; break; case SIGNAL_TYPE_LVDS: ss_parm = clk_src->lvds_ss_params; entrys_num = clk_src->lvds_ss_params_cnt; break; case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_VIRTUAL: ss_parm = clk_src->dp_ss_params; entrys_num = clk_src->dp_ss_params_cnt; break; default: ss_parm = NULL; entrys_num = 0; break; } if (ss_parm == NULL) return ret; for (i = 0; i < entrys_num; ++i, ++ss_parm) { if (ss_parm->freq_range_khz >= pix_clk_khz) { ret = ss_parm; break; } } return ret; } /** * calculate_fb_and_fractional_fb_divider - Calculates feedback and fractional * feedback dividers values * * @calc_pll_cs: Pointer to clock source information * @target_pix_clk_100hz: Desired frequency in 100 Hz * @ref_divider: Reference divider (already known) * @post_divider: Post Divider (already known) * @feedback_divider_param: Pointer where to store * calculated feedback divider value * @fract_feedback_divider_param: Pointer where to store * calculated fract feedback divider value * * return: * It fills the locations pointed by feedback_divider_param * and fract_feedback_divider_param * It returns - true if feedback divider not 0 * - false should never happen) */ static bool calculate_fb_and_fractional_fb_divider( struct calc_pll_clock_source *calc_pll_cs, uint32_t target_pix_clk_100hz, uint32_t ref_divider, uint32_t post_divider, uint32_t *feedback_divider_param, uint32_t *fract_feedback_divider_param) { uint64_t feedback_divider; feedback_divider = (uint64_t)target_pix_clk_100hz * ref_divider * post_divider; feedback_divider *= 10; /* additional factor, since we divide by 10 afterwards */ feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor); feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz * 10ull); /*Round to the number of precision * The following code replace the old code (ullfeedbackDivider + 5)/10 * for example if the difference between the number * of fractional feedback decimal point and the fractional FB Divider precision * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/ feedback_divider += 5ULL * calc_pll_cs->fract_fb_divider_precision_factor; feedback_divider = div_u64(feedback_divider, calc_pll_cs->fract_fb_divider_precision_factor * 10); feedback_divider *= (uint64_t) (calc_pll_cs->fract_fb_divider_precision_factor); *feedback_divider_param = div_u64_rem( feedback_divider, calc_pll_cs->fract_fb_divider_factor, fract_feedback_divider_param); if (*feedback_divider_param != 0) return true; return false; } /** * calc_fb_divider_checking_tolerance - Calculates Feedback and * Fractional Feedback divider values * for passed Reference and Post divider, * checking for tolerance. * @calc_pll_cs: Pointer to clock source information * @pll_settings: Pointer to PLL settings * @ref_divider: Reference divider (already known) * @post_divider: Post Divider (already known) * @tolerance: Tolerance for Calculated Pixel Clock to be within * * return: * It fills the PLLSettings structure with PLL Dividers values * if calculated values are within required tolerance * It returns - true if error is within tolerance * - false if error is not within tolerance */ static bool calc_fb_divider_checking_tolerance( struct calc_pll_clock_source *calc_pll_cs, struct pll_settings *pll_settings, uint32_t ref_divider, uint32_t post_divider, uint32_t tolerance) { uint32_t feedback_divider; uint32_t fract_feedback_divider; uint32_t actual_calculated_clock_100hz; uint32_t abs_err; uint64_t actual_calc_clk_100hz; calculate_fb_and_fractional_fb_divider( calc_pll_cs, pll_settings->adjusted_pix_clk_100hz, ref_divider, post_divider, &feedback_divider, &fract_feedback_divider); /*Actual calculated value*/ actual_calc_clk_100hz = (uint64_t)feedback_divider * calc_pll_cs->fract_fb_divider_factor + fract_feedback_divider; actual_calc_clk_100hz *= calc_pll_cs->ref_freq_khz * 10; actual_calc_clk_100hz = div_u64(actual_calc_clk_100hz, ref_divider * post_divider * calc_pll_cs->fract_fb_divider_factor); actual_calculated_clock_100hz = (uint32_t)(actual_calc_clk_100hz); abs_err = (actual_calculated_clock_100hz > pll_settings->adjusted_pix_clk_100hz) ? actual_calculated_clock_100hz - pll_settings->adjusted_pix_clk_100hz : pll_settings->adjusted_pix_clk_100hz - actual_calculated_clock_100hz; if (abs_err <= tolerance) { /*found good values*/ pll_settings->reference_freq = calc_pll_cs->ref_freq_khz; pll_settings->reference_divider = ref_divider; pll_settings->feedback_divider = feedback_divider; pll_settings->fract_feedback_divider = fract_feedback_divider; pll_settings->pix_clk_post_divider = post_divider; pll_settings->calculated_pix_clk_100hz = actual_calculated_clock_100hz; pll_settings->vco_freq = div_u64((u64)actual_calculated_clock_100hz * post_divider, 10); return true; } return false; } static bool calc_pll_dividers_in_range( struct calc_pll_clock_source *calc_pll_cs, struct pll_settings *pll_settings, uint32_t min_ref_divider, uint32_t max_ref_divider, uint32_t min_post_divider, uint32_t max_post_divider, uint32_t err_tolerance) { uint32_t ref_divider; uint32_t post_divider; uint32_t tolerance; /* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25% * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/ tolerance = (pll_settings->adjusted_pix_clk_100hz * err_tolerance) / 100000; if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE) tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE; for ( post_divider = max_post_divider; post_divider >= min_post_divider; --post_divider) { for ( ref_divider = min_ref_divider; ref_divider <= max_ref_divider; ++ref_divider) { if (calc_fb_divider_checking_tolerance( calc_pll_cs, pll_settings, ref_divider, post_divider, tolerance)) { return true; } } } return false; } static uint32_t calculate_pixel_clock_pll_dividers( struct calc_pll_clock_source *calc_pll_cs, struct pll_settings *pll_settings) { uint32_t err_tolerance; uint32_t min_post_divider; uint32_t max_post_divider; uint32_t min_ref_divider; uint32_t max_ref_divider; if (pll_settings->adjusted_pix_clk_100hz == 0) { DC_LOG_ERROR( "%s Bad requested pixel clock", __func__); return MAX_PLL_CALC_ERROR; } /* 1) Find Post divider ranges */ if (pll_settings->pix_clk_post_divider) { min_post_divider = pll_settings->pix_clk_post_divider; max_post_divider = pll_settings->pix_clk_post_divider; } else { min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider; if (min_post_divider * pll_settings->adjusted_pix_clk_100hz < calc_pll_cs->min_vco_khz * 10) { min_post_divider = calc_pll_cs->min_vco_khz * 10 / pll_settings->adjusted_pix_clk_100hz; if ((min_post_divider * pll_settings->adjusted_pix_clk_100hz) < calc_pll_cs->min_vco_khz * 10) min_post_divider++; } max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider; if (max_post_divider * pll_settings->adjusted_pix_clk_100hz > calc_pll_cs->max_vco_khz * 10) max_post_divider = calc_pll_cs->max_vco_khz * 10 / pll_settings->adjusted_pix_clk_100hz; } /* 2) Find Reference divider ranges * When SS is enabled, or for Display Port even without SS, * pll_settings->referenceDivider is not zero. * So calculate PPLL FB and fractional FB divider * using the passed reference divider*/ if (pll_settings->reference_divider) { min_ref_divider = pll_settings->reference_divider; max_ref_divider = pll_settings->reference_divider; } else { min_ref_divider = ((calc_pll_cs->ref_freq_khz / calc_pll_cs->max_pll_input_freq_khz) > calc_pll_cs->min_pll_ref_divider) ? calc_pll_cs->ref_freq_khz / calc_pll_cs->max_pll_input_freq_khz : calc_pll_cs->min_pll_ref_divider; max_ref_divider = ((calc_pll_cs->ref_freq_khz / calc_pll_cs->min_pll_input_freq_khz) < calc_pll_cs->max_pll_ref_divider) ? calc_pll_cs->ref_freq_khz / calc_pll_cs->min_pll_input_freq_khz : calc_pll_cs->max_pll_ref_divider; } /* If some parameters are invalid we could have scenario when "min">"max" * which produced endless loop later. * We should investigate why we get the wrong parameters. * But to follow the similar logic when "adjustedPixelClock" is set to be 0 * it is better to return here than cause system hang/watchdog timeout later. * ## SVS Wed 15 Jul 2009 */ if (min_post_divider > max_post_divider) { DC_LOG_ERROR( "%s Post divider range is invalid", __func__); return MAX_PLL_CALC_ERROR; } if (min_ref_divider > max_ref_divider) { DC_LOG_ERROR( "%s Reference divider range is invalid", __func__); return MAX_PLL_CALC_ERROR; } /* 3) Try to find PLL dividers given ranges * starting with minimal error tolerance. * Increase error tolerance until PLL dividers found*/ err_tolerance = MAX_PLL_CALC_ERROR; while (!calc_pll_dividers_in_range( calc_pll_cs, pll_settings, min_ref_divider, max_ref_divider, min_post_divider, max_post_divider, err_tolerance)) err_tolerance += (err_tolerance > 10) ? (err_tolerance / 10) : 1; return err_tolerance; } static bool pll_adjust_pix_clk( struct dce110_clk_src *clk_src, struct pixel_clk_params *pix_clk_params, struct pll_settings *pll_settings) { uint32_t actual_pix_clk_100hz = 0; uint32_t requested_clk_100hz = 0; struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = { 0 }; enum bp_result bp_result; switch (pix_clk_params->signal_type) { case SIGNAL_TYPE_HDMI_TYPE_A: { requested_clk_100hz = pix_clk_params->requested_pix_clk_100hz; if (pix_clk_params->pixel_encoding != PIXEL_ENCODING_YCBCR422) { switch (pix_clk_params->color_depth) { case COLOR_DEPTH_101010: requested_clk_100hz = (requested_clk_100hz * 5) >> 2; break; /* x1.25*/ case COLOR_DEPTH_121212: requested_clk_100hz = (requested_clk_100hz * 6) >> 2; break; /* x1.5*/ case COLOR_DEPTH_161616: requested_clk_100hz = requested_clk_100hz * 2; break; /* x2.0*/ default: break; } } actual_pix_clk_100hz = requested_clk_100hz; } break; case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_EDP: requested_clk_100hz = pix_clk_params->requested_sym_clk * 10; actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; break; default: requested_clk_100hz = pix_clk_params->requested_pix_clk_100hz; actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; break; } bp_adjust_pixel_clock_params.pixel_clock = requested_clk_100hz / 10; bp_adjust_pixel_clock_params. encoder_object_id = pix_clk_params->encoder_object_id; bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type; bp_adjust_pixel_clock_params. ss_enable = pix_clk_params->flags.ENABLE_SS; bp_result = clk_src->bios->funcs->adjust_pixel_clock( clk_src->bios, &bp_adjust_pixel_clock_params); if (bp_result == BP_RESULT_OK) { pll_settings->actual_pix_clk_100hz = actual_pix_clk_100hz; pll_settings->adjusted_pix_clk_100hz = bp_adjust_pixel_clock_params.adjusted_pixel_clock * 10; pll_settings->reference_divider = bp_adjust_pixel_clock_params.reference_divider; pll_settings->pix_clk_post_divider = bp_adjust_pixel_clock_params.pixel_clock_post_divider; return true; } return false; } /* * Calculate PLL Dividers for given Clock Value. * First will call VBIOS Adjust Exec table to check if requested Pixel clock * will be Adjusted based on usage. * Then it will calculate PLL Dividers for this Adjusted clock using preferred * method (Maximum VCO frequency). * * \return * Calculation error in units of 0.01% */ static uint32_t dce110_get_pix_clk_dividers_helper ( struct dce110_clk_src *clk_src, struct pll_settings *pll_settings, struct pixel_clk_params *pix_clk_params) { uint32_t field = 0; uint32_t pll_calc_error = MAX_PLL_CALC_ERROR; DC_LOGGER_INIT(); /* Check if reference clock is external (not pcie/xtalin) * HW Dce80 spec: * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB * 04 - HSYNCA, 05 - GENLK_CLK, 06 - PCIE_REFCLK, 07 - DVOCLK0 */ REG_GET(PLL_CNTL, PLL_REF_DIV_SRC, &field); pll_settings->use_external_clk = (field > 1); /* VBIOS by default enables DP SS (spread on IDCLK) for DCE 8.0 always * (we do not care any more from SI for some older DP Sink which * does not report SS support, no known issues) */ if ((pix_clk_params->flags.ENABLE_SS) || (dc_is_dp_signal(pix_clk_params->signal_type))) { const struct spread_spectrum_data *ss_data = get_ss_data_entry( clk_src, pix_clk_params->signal_type, pll_settings->adjusted_pix_clk_100hz / 10); if (NULL != ss_data) pll_settings->ss_percentage = ss_data->percentage; } /* Check VBIOS AdjustPixelClock Exec table */ if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) { /* Should never happen, ASSERT and fill up values to be able * to continue. */ DC_LOG_ERROR( "%s: Failed to adjust pixel clock!!", __func__); pll_settings->actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; pll_settings->adjusted_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; if (dc_is_dp_signal(pix_clk_params->signal_type)) pll_settings->adjusted_pix_clk_100hz = 1000000; } /* Calculate Dividers */ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) /*Calculate Dividers by HDMI object, no SS case or SS case */ pll_calc_error = calculate_pixel_clock_pll_dividers( &clk_src->calc_pll_hdmi, pll_settings); else /*Calculate Dividers by default object, no SS case or SS case */ pll_calc_error = calculate_pixel_clock_pll_dividers( &clk_src->calc_pll, pll_settings); return pll_calc_error; } static void dce112_get_pix_clk_dividers_helper ( struct dce110_clk_src *clk_src, struct pll_settings *pll_settings, struct pixel_clk_params *pix_clk_params) { uint32_t actual_pixel_clock_100hz; actual_pixel_clock_100hz = pix_clk_params->requested_pix_clk_100hz; /* Calculate Dividers */ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) { switch (pix_clk_params->color_depth) { case COLOR_DEPTH_101010: actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2; actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10; break; case COLOR_DEPTH_121212: actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2; actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10; break; case COLOR_DEPTH_161616: actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2; break; default: break; } } pll_settings->actual_pix_clk_100hz = actual_pixel_clock_100hz; pll_settings->adjusted_pix_clk_100hz = actual_pixel_clock_100hz; pll_settings->calculated_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; } static uint32_t dce110_get_pix_clk_dividers( struct clock_source *cs, struct pixel_clk_params *pix_clk_params, struct pll_settings *pll_settings) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); uint32_t pll_calc_error = MAX_PLL_CALC_ERROR; DC_LOGGER_INIT(); if (pix_clk_params == NULL || pll_settings == NULL || pix_clk_params->requested_pix_clk_100hz == 0) { DC_LOG_ERROR( "%s: Invalid parameters!!\n", __func__); return pll_calc_error; } memset(pll_settings, 0, sizeof(*pll_settings)); if (cs->id == CLOCK_SOURCE_ID_DP_DTO || cs->id == CLOCK_SOURCE_ID_EXTERNAL) { pll_settings->adjusted_pix_clk_100hz = clk_src->ext_clk_khz * 10; pll_settings->calculated_pix_clk_100hz = clk_src->ext_clk_khz * 10; pll_settings->actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; return 0; } pll_calc_error = dce110_get_pix_clk_dividers_helper(clk_src, pll_settings, pix_clk_params); return pll_calc_error; } static uint32_t dce112_get_pix_clk_dividers( struct clock_source *cs, struct pixel_clk_params *pix_clk_params, struct pll_settings *pll_settings) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); DC_LOGGER_INIT(); if (pix_clk_params == NULL || pll_settings == NULL || pix_clk_params->requested_pix_clk_100hz == 0) { DC_LOG_ERROR( "%s: Invalid parameters!!\n", __func__); return -1; } memset(pll_settings, 0, sizeof(*pll_settings)); if (cs->id == CLOCK_SOURCE_ID_DP_DTO || cs->id == CLOCK_SOURCE_ID_EXTERNAL) { pll_settings->adjusted_pix_clk_100hz = clk_src->ext_clk_khz * 10; pll_settings->calculated_pix_clk_100hz = clk_src->ext_clk_khz * 10; pll_settings->actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; return -1; } dce112_get_pix_clk_dividers_helper(clk_src, pll_settings, pix_clk_params); return 0; } static bool disable_spread_spectrum(struct dce110_clk_src *clk_src) { enum bp_result result; struct bp_spread_spectrum_parameters bp_ss_params = {0}; bp_ss_params.pll_id = clk_src->base.id; /*Call ASICControl to process ATOMBIOS Exec table*/ result = clk_src->bios->funcs->enable_spread_spectrum_on_ppll( clk_src->bios, &bp_ss_params, false); return result == BP_RESULT_OK; } static bool calculate_ss( const struct pll_settings *pll_settings, const struct spread_spectrum_data *ss_data, struct delta_sigma_data *ds_data) { struct fixed31_32 fb_div; struct fixed31_32 ss_amount; struct fixed31_32 ss_nslip_amount; struct fixed31_32 ss_ds_frac_amount; struct fixed31_32 ss_step_size; struct fixed31_32 modulation_time; if (ds_data == NULL) return false; if (ss_data == NULL) return false; if (ss_data->percentage == 0) return false; if (pll_settings == NULL) return false; memset(ds_data, 0, sizeof(struct delta_sigma_data)); /* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/ /* 6 decimal point support in fractional feedback divider */ fb_div = dc_fixpt_from_fraction( pll_settings->fract_feedback_divider, 1000000); fb_div = dc_fixpt_add_int(fb_div, pll_settings->feedback_divider); ds_data->ds_frac_amount = 0; /*spreadSpectrumPercentage is in the unit of .01%, * so have to divided by 100 * 100*/ ss_amount = dc_fixpt_mul( fb_div, dc_fixpt_from_fraction(ss_data->percentage, 100 * ss_data->percentage_divider)); ds_data->feedback_amount = dc_fixpt_floor(ss_amount); ss_nslip_amount = dc_fixpt_sub(ss_amount, dc_fixpt_from_int(ds_data->feedback_amount)); ss_nslip_amount = dc_fixpt_mul_int(ss_nslip_amount, 10); ds_data->nfrac_amount = dc_fixpt_floor(ss_nslip_amount); ss_ds_frac_amount = dc_fixpt_sub(ss_nslip_amount, dc_fixpt_from_int(ds_data->nfrac_amount)); ss_ds_frac_amount = dc_fixpt_mul_int(ss_ds_frac_amount, 65536); ds_data->ds_frac_amount = dc_fixpt_floor(ss_ds_frac_amount); /* compute SS_STEP_SIZE_DSFRAC */ modulation_time = dc_fixpt_from_fraction( pll_settings->reference_freq * 1000, pll_settings->reference_divider * ss_data->modulation_freq_hz); if (ss_data->flags.CENTER_SPREAD) modulation_time = dc_fixpt_div_int(modulation_time, 4); else modulation_time = dc_fixpt_div_int(modulation_time, 2); ss_step_size = dc_fixpt_div(ss_amount, modulation_time); /* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/ ss_step_size = dc_fixpt_mul_int(ss_step_size, 65536 * 10); ds_data->ds_frac_size = dc_fixpt_floor(ss_step_size); return true; } static bool enable_spread_spectrum( struct dce110_clk_src *clk_src, enum signal_type signal, struct pll_settings *pll_settings) { struct bp_spread_spectrum_parameters bp_params = {0}; struct delta_sigma_data d_s_data; const struct spread_spectrum_data *ss_data = NULL; ss_data = get_ss_data_entry( clk_src, signal, pll_settings->calculated_pix_clk_100hz / 10); /* Pixel clock PLL has been programmed to generate desired pixel clock, * now enable SS on pixel clock */ /* TODO is it OK to return true not doing anything ??*/ if (ss_data != NULL && pll_settings->ss_percentage != 0) { if (calculate_ss(pll_settings, ss_data, &d_s_data)) { bp_params.ds.feedback_amount = d_s_data.feedback_amount; bp_params.ds.nfrac_amount = d_s_data.nfrac_amount; bp_params.ds.ds_frac_size = d_s_data.ds_frac_size; bp_params.ds_frac_amount = d_s_data.ds_frac_amount; bp_params.flags.DS_TYPE = 1; bp_params.pll_id = clk_src->base.id; bp_params.percentage = ss_data->percentage; if (ss_data->flags.CENTER_SPREAD) bp_params.flags.CENTER_SPREAD = 1; if (ss_data->flags.EXTERNAL_SS) bp_params.flags.EXTERNAL_SS = 1; if (BP_RESULT_OK != clk_src->bios->funcs-> enable_spread_spectrum_on_ppll( clk_src->bios, &bp_params, true)) return false; } else return false; } return true; } static void dce110_program_pixel_clk_resync( struct dce110_clk_src *clk_src, enum signal_type signal_type, enum dc_color_depth colordepth) { REG_UPDATE(RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, 0); /* 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1) 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4) 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2) 48 bit mode: TMDS clock = 2 x pixel clock (2:1) */ if (signal_type != SIGNAL_TYPE_HDMI_TYPE_A) return; switch (colordepth) { case COLOR_DEPTH_888: REG_UPDATE(RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, 0); break; case COLOR_DEPTH_101010: REG_UPDATE(RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, 1); break; case COLOR_DEPTH_121212: REG_UPDATE(RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, 2); break; case COLOR_DEPTH_161616: REG_UPDATE(RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, 3); break; default: break; } } static void dce112_program_pixel_clk_resync( struct dce110_clk_src *clk_src, enum signal_type signal_type, enum dc_color_depth colordepth, bool enable_ycbcr420) { uint32_t deep_color_cntl = 0; uint32_t double_rate_enable = 0; /* 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1) 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4) 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2) 48 bit mode: TMDS clock = 2 x pixel clock (2:1) */ if (signal_type == SIGNAL_TYPE_HDMI_TYPE_A) { double_rate_enable = enable_ycbcr420 ? 1 : 0; switch (colordepth) { case COLOR_DEPTH_888: deep_color_cntl = 0; break; case COLOR_DEPTH_101010: deep_color_cntl = 1; break; case COLOR_DEPTH_121212: deep_color_cntl = 2; break; case COLOR_DEPTH_161616: deep_color_cntl = 3; break; default: break; } } if (clk_src->cs_mask->PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE) REG_UPDATE_2(PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, deep_color_cntl, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, double_rate_enable); else REG_UPDATE(PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, deep_color_cntl); } static bool dce110_program_pix_clk( struct clock_source *clock_source, struct pixel_clk_params *pix_clk_params, enum dp_link_encoding encoding, struct pll_settings *pll_settings) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; /* First disable SS * ATOMBIOS will enable by default SS on PLL for DP, * do not disable it here */ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL && !dc_is_dp_signal(pix_clk_params->signal_type) && clock_source->ctx->dce_version <= DCE_VERSION_11_0) disable_spread_spectrum(clk_src); /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ bp_pc_params.controller_id = pix_clk_params->controller_id; bp_pc_params.pll_id = clock_source->id; bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz; bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; bp_pc_params.signal_type = pix_clk_params->signal_type; bp_pc_params.reference_divider = pll_settings->reference_divider; bp_pc_params.feedback_divider = pll_settings->feedback_divider; bp_pc_params.fractional_feedback_divider = pll_settings->fract_feedback_divider; bp_pc_params.pixel_clock_post_divider = pll_settings->pix_clk_post_divider; bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC = pll_settings->use_external_clk; switch (pix_clk_params->color_depth) { case COLOR_DEPTH_101010: bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30; break; case COLOR_DEPTH_121212: bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36; break; case COLOR_DEPTH_161616: bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48; break; default: break; } if (clk_src->bios->funcs->set_pixel_clock( clk_src->bios, &bp_pc_params) != BP_RESULT_OK) return false; /* Enable SS * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock), * based on HW display PLL team, SS control settings should be programmed * during PLL Reset, but they do not have effect * until SS_EN is asserted.*/ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL && !dc_is_dp_signal(pix_clk_params->signal_type)) { if (pix_clk_params->flags.ENABLE_SS) if (!enable_spread_spectrum(clk_src, pix_clk_params->signal_type, pll_settings)) return false; /* Resync deep color DTO */ dce110_program_pixel_clk_resync(clk_src, pix_clk_params->signal_type, pix_clk_params->color_depth); } return true; } static bool dce112_program_pix_clk( struct clock_source *clock_source, struct pixel_clk_params *pix_clk_params, enum dp_link_encoding encoding, struct pll_settings *pll_settings) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; /* First disable SS * ATOMBIOS will enable by default SS on PLL for DP, * do not disable it here */ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL && !dc_is_dp_signal(pix_clk_params->signal_type) && clock_source->ctx->dce_version <= DCE_VERSION_11_0) disable_spread_spectrum(clk_src); /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ bp_pc_params.controller_id = pix_clk_params->controller_id; bp_pc_params.pll_id = clock_source->id; bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz; bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; bp_pc_params.signal_type = pix_clk_params->signal_type; if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC = pll_settings->use_external_clk; bp_pc_params.flags.SET_XTALIN_REF_SRC = !pll_settings->use_external_clk; if (pix_clk_params->flags.SUPPORT_YCBCR420) { bp_pc_params.flags.SUPPORT_YUV_420 = 1; } } if (clk_src->bios->funcs->set_pixel_clock( clk_src->bios, &bp_pc_params) != BP_RESULT_OK) return false; /* Resync deep color DTO */ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) dce112_program_pixel_clk_resync(clk_src, pix_clk_params->signal_type, pix_clk_params->color_depth, pix_clk_params->flags.SUPPORT_YCBCR420); return true; } static bool dcn31_program_pix_clk( struct clock_source *clock_source, struct pixel_clk_params *pix_clk_params, enum dp_link_encoding encoding, struct pll_settings *pll_settings) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz; const struct pixel_rate_range_table_entry *e = look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10); struct bp_pixel_clock_parameters bp_pc_params = {0}; enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; // For these signal types Driver to program DP_DTO without calling VBIOS Command table if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) { if (e) { /* Set DTO values: phase = target clock, modulo = reference clock*/ REG_WRITE(PHASE[inst], e->target_pixel_rate_khz * e->mult_factor); REG_WRITE(MODULO[inst], dp_dto_ref_khz * e->div_factor); } else { /* Set DTO values: phase = target clock, modulo = reference clock*/ REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100); REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000); } /* Enable DTO */ if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) if (encoding == DP_128b_132b_ENCODING) REG_UPDATE_2(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1, PIPE0_DTO_SRC_SEL, 2); else REG_UPDATE_2(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1, PIPE0_DTO_SRC_SEL, 1); else REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); } else { if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) REG_UPDATE(PIXEL_RATE_CNTL[inst], PIPE0_DTO_SRC_SEL, 0); /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ bp_pc_params.controller_id = pix_clk_params->controller_id; bp_pc_params.pll_id = clock_source->id; bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz; bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; bp_pc_params.signal_type = pix_clk_params->signal_type; // Make sure we send the correct color depth to DMUB for HDMI if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) { switch (pix_clk_params->color_depth) { case COLOR_DEPTH_888: bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; break; case COLOR_DEPTH_101010: bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_30; break; case COLOR_DEPTH_121212: bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_36; break; case COLOR_DEPTH_161616: bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_48; break; default: bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; break; } bp_pc_params.color_depth = bp_pc_colour_depth; } if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC = pll_settings->use_external_clk; bp_pc_params.flags.SET_XTALIN_REF_SRC = !pll_settings->use_external_clk; if (pix_clk_params->flags.SUPPORT_YCBCR420) { bp_pc_params.flags.SUPPORT_YUV_420 = 1; } } if (clk_src->bios->funcs->set_pixel_clock( clk_src->bios, &bp_pc_params) != BP_RESULT_OK) return false; /* Resync deep color DTO */ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) dce112_program_pixel_clk_resync(clk_src, pix_clk_params->signal_type, pix_clk_params->color_depth, pix_clk_params->flags.SUPPORT_YCBCR420); } return true; } static bool dce110_clock_source_power_down( struct clock_source *clk_src) { struct dce110_clk_src *dce110_clk_src = TO_DCE110_CLK_SRC(clk_src); enum bp_result bp_result; struct bp_pixel_clock_parameters bp_pixel_clock_params = {0}; if (clk_src->dp_clk_src) return true; /* If Pixel Clock is 0 it means Power Down Pll*/ bp_pixel_clock_params.controller_id = CONTROLLER_ID_UNDEFINED; bp_pixel_clock_params.pll_id = clk_src->id; bp_pixel_clock_params.flags.FORCE_PROGRAMMING_OF_PLL = 1; /*Call ASICControl to process ATOMBIOS Exec table*/ bp_result = dce110_clk_src->bios->funcs->set_pixel_clock( dce110_clk_src->bios, &bp_pixel_clock_params); return bp_result == BP_RESULT_OK; } static bool get_pixel_clk_frequency_100hz( const struct clock_source *clock_source, unsigned int inst, unsigned int *pixel_clk_khz) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); unsigned int clock_hz = 0; unsigned int modulo_hz = 0; if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) { clock_hz = REG_READ(PHASE[inst]); if (clock_source->ctx->dc->hwss.enable_vblanks_synchronization && clock_source->ctx->dc->config.vblank_alignment_max_frame_time_diff > 0) { /* NOTE: In case VBLANK syncronization is enabled, MODULO may * not be programmed equal to DPREFCLK */ modulo_hz = REG_READ(MODULO[inst]); if (modulo_hz) *pixel_clk_khz = div_u64((uint64_t)clock_hz* clock_source->ctx->dc->clk_mgr->dprefclk_khz*10, modulo_hz); else *pixel_clk_khz = 0; } else { /* NOTE: There is agreement with VBIOS here that MODULO is * programmed equal to DPREFCLK, in which case PHASE will be * equivalent to pixel clock. */ *pixel_clk_khz = clock_hz / 100; } return true; } return false; } /* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */ const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = { // /1.001 rates {25170, 25180, 25200, 1000, 1001}, //25.2MHz -> 25.17 {59340, 59350, 59400, 1000, 1001}, //59.4Mhz -> 59.340 {74170, 74180, 74250, 1000, 1001}, //74.25Mhz -> 74.1758 {89910, 90000, 90000, 1000, 1001}, //90Mhz -> 89.91 {125870, 125880, 126000, 1000, 1001}, //126Mhz -> 125.87 {148350, 148360, 148500, 1000, 1001}, //148.5Mhz -> 148.3516 {167830, 167840, 168000, 1000, 1001}, //168Mhz -> 167.83 {222520, 222530, 222750, 1000, 1001}, //222.75Mhz -> 222.527 {257140, 257150, 257400, 1000, 1001}, //257.4Mhz -> 257.1429 {296700, 296710, 297000, 1000, 1001}, //297Mhz -> 296.7033 {342850, 342860, 343200, 1000, 1001}, //343.2Mhz -> 342.857 {395600, 395610, 396000, 1000, 1001}, //396Mhz -> 395.6 {409090, 409100, 409500, 1000, 1001}, //409.5Mhz -> 409.091 {445050, 445060, 445500, 1000, 1001}, //445.5Mhz -> 445.055 {467530, 467540, 468000, 1000, 1001}, //468Mhz -> 467.5325 {519230, 519240, 519750, 1000, 1001}, //519.75Mhz -> 519.231 {525970, 525980, 526500, 1000, 1001}, //526.5Mhz -> 525.974 {545450, 545460, 546000, 1000, 1001}, //546Mhz -> 545.455 {593400, 593410, 594000, 1000, 1001}, //594Mhz -> 593.4066 {623370, 623380, 624000, 1000, 1001}, //624Mhz -> 623.377 {692300, 692310, 693000, 1000, 1001}, //693Mhz -> 692.308 {701290, 701300, 702000, 1000, 1001}, //702Mhz -> 701.2987 {791200, 791210, 792000, 1000, 1001}, //792Mhz -> 791.209 {890100, 890110, 891000, 1000, 1001}, //891Mhz -> 890.1099 {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz -> 1186.8131 // *1.001 rates {27020, 27030, 27000, 1001, 1000}, //27Mhz {54050, 54060, 54000, 1001, 1000}, //54Mhz {108100, 108110, 108000, 1001, 1000},//108Mhz }; const struct pixel_rate_range_table_entry *look_up_in_video_optimized_rate_tlb( unsigned int pixel_rate_khz) { int i; for (i = 0; i < NUM_ELEMENTS(video_optimized_pixel_rates); i++) { const struct pixel_rate_range_table_entry *e = &video_optimized_pixel_rates[i]; if (e->range_min_khz <= pixel_rate_khz && pixel_rate_khz <= e->range_max_khz) { return e; } } return NULL; } static bool dcn20_program_pix_clk( struct clock_source *clock_source, struct pixel_clk_params *pix_clk_params, enum dp_link_encoding encoding, struct pll_settings *pll_settings) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; dce112_program_pix_clk(clock_source, pix_clk_params, encoding, pll_settings); if (clock_source->ctx->dc->hwss.enable_vblanks_synchronization && clock_source->ctx->dc->config.vblank_alignment_max_frame_time_diff > 0) { /* NOTE: In case VBLANK syncronization is enabled, * we need to set modulo to default DPREFCLK first * dce112_program_pix_clk does not set default DPREFCLK */ REG_WRITE(MODULO[inst], clock_source->ctx->dc->clk_mgr->dprefclk_khz*1000); } return true; } static bool dcn20_override_dp_pix_clk( struct clock_source *clock_source, unsigned int inst, unsigned int pixel_clk, unsigned int ref_clk) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 0); REG_WRITE(PHASE[inst], pixel_clk); REG_WRITE(MODULO[inst], ref_clk); REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); return true; } static const struct clock_source_funcs dcn20_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dcn20_program_pix_clk, .get_pix_clk_dividers = dce112_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz, .override_dp_pix_clk = dcn20_override_dp_pix_clk }; static bool dcn3_program_pix_clk( struct clock_source *clock_source, struct pixel_clk_params *pix_clk_params, enum dp_link_encoding encoding, struct pll_settings *pll_settings) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz; const struct pixel_rate_range_table_entry *e = look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10); // For these signal types Driver to program DP_DTO without calling VBIOS Command table if (dc_is_dp_signal(pix_clk_params->signal_type)) { if (e) { /* Set DTO values: phase = target clock, modulo = reference clock*/ REG_WRITE(PHASE[inst], e->target_pixel_rate_khz * e->mult_factor); REG_WRITE(MODULO[inst], dp_dto_ref_khz * e->div_factor); } else { /* Set DTO values: phase = target clock, modulo = reference clock*/ REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100); REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000); } /* Enable DTO */ if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) REG_UPDATE_2(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1, PIPE0_DTO_SRC_SEL, 1); else REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); } else // For other signal types(HDMI_TYPE_A, DVI) Driver still to call VBIOS Command table dce112_program_pix_clk(clock_source, pix_clk_params, encoding, pll_settings); return true; } static uint32_t dcn3_get_pix_clk_dividers( struct clock_source *cs, struct pixel_clk_params *pix_clk_params, struct pll_settings *pll_settings) { unsigned long long actual_pix_clk_100Hz = pix_clk_params ? pix_clk_params->requested_pix_clk_100hz : 0; DC_LOGGER_INIT(); if (pix_clk_params == NULL || pll_settings == NULL || pix_clk_params->requested_pix_clk_100hz == 0) { DC_LOG_ERROR( "%s: Invalid parameters!!\n", __func__); return -1; } memset(pll_settings, 0, sizeof(*pll_settings)); /* Adjust for HDMI Type A deep color */ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) { switch (pix_clk_params->color_depth) { case COLOR_DEPTH_101010: actual_pix_clk_100Hz = (actual_pix_clk_100Hz * 5) >> 2; break; case COLOR_DEPTH_121212: actual_pix_clk_100Hz = (actual_pix_clk_100Hz * 6) >> 2; break; case COLOR_DEPTH_161616: actual_pix_clk_100Hz = actual_pix_clk_100Hz * 2; break; default: break; } } pll_settings->actual_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz; pll_settings->adjusted_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz; pll_settings->calculated_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz; return 0; } static const struct clock_source_funcs dcn3_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dcn3_program_pix_clk, .get_pix_clk_dividers = dcn3_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; static const struct clock_source_funcs dcn31_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dcn31_program_pix_clk, .get_pix_clk_dividers = dcn3_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; /*****************************************/ /* Constructor */ /*****************************************/ static const struct clock_source_funcs dce112_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dce112_program_pix_clk, .get_pix_clk_dividers = dce112_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; static const struct clock_source_funcs dce110_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dce110_program_pix_clk, .get_pix_clk_dividers = dce110_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; static void get_ss_info_from_atombios( struct dce110_clk_src *clk_src, enum as_signal_type as_signal, struct spread_spectrum_data *spread_spectrum_data[], uint32_t *ss_entries_num) { enum bp_result bp_result = BP_RESULT_FAILURE; struct spread_spectrum_info *ss_info; struct spread_spectrum_data *ss_data; struct spread_spectrum_info *ss_info_cur; struct spread_spectrum_data *ss_data_cur; uint32_t i; DC_LOGGER_INIT(); if (ss_entries_num == NULL) { DC_LOG_SYNC( "Invalid entry !!!\n"); return; } if (spread_spectrum_data == NULL) { DC_LOG_SYNC( "Invalid array pointer!!!\n"); return; } spread_spectrum_data[0] = NULL; *ss_entries_num = 0; *ss_entries_num = clk_src->bios->funcs->get_ss_entry_number( clk_src->bios, as_signal); if (*ss_entries_num == 0) return; ss_info = kcalloc(*ss_entries_num, sizeof(struct spread_spectrum_info), GFP_KERNEL); ss_info_cur = ss_info; if (ss_info == NULL) return; ss_data = kcalloc(*ss_entries_num, sizeof(struct spread_spectrum_data), GFP_KERNEL); if (ss_data == NULL) goto out_free_info; for (i = 0, ss_info_cur = ss_info; i < (*ss_entries_num); ++i, ++ss_info_cur) { bp_result = clk_src->bios->funcs->get_spread_spectrum_info( clk_src->bios, as_signal, i, ss_info_cur); if (bp_result != BP_RESULT_OK) goto out_free_data; } for (i = 0, ss_info_cur = ss_info, ss_data_cur = ss_data; i < (*ss_entries_num); ++i, ++ss_info_cur, ++ss_data_cur) { if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) { DC_LOG_SYNC( "Invalid ATOMBIOS SS Table!!!\n"); goto out_free_data; } /* for HDMI check SS percentage, * if it is > 6 (0.06%), the ATOMBIOS table info is invalid*/ if (as_signal == AS_SIGNAL_TYPE_HDMI && ss_info_cur->spread_spectrum_percentage > 6){ /* invalid input, do nothing */ DC_LOG_SYNC( "Invalid SS percentage "); DC_LOG_SYNC( "for HDMI in ATOMBIOS info Table!!!\n"); continue; } if (ss_info_cur->spread_percentage_divider == 1000) { /* Keep previous precision from ATOMBIOS for these * in case new precision set by ATOMBIOS for these * (otherwise all code in DCE specific classes * for all previous ASICs would need * to be updated for SS calculations, * Audio SS compensation and DP DTO SS compensation * which assumes fixed SS percentage Divider = 100)*/ ss_info_cur->spread_spectrum_percentage /= 10; ss_info_cur->spread_percentage_divider = 100; } ss_data_cur->freq_range_khz = ss_info_cur->target_clock_range; ss_data_cur->percentage = ss_info_cur->spread_spectrum_percentage; ss_data_cur->percentage_divider = ss_info_cur->spread_percentage_divider; ss_data_cur->modulation_freq_hz = ss_info_cur->spread_spectrum_range; if (ss_info_cur->type.CENTER_MODE) ss_data_cur->flags.CENTER_SPREAD = 1; if (ss_info_cur->type.EXTERNAL) ss_data_cur->flags.EXTERNAL_SS = 1; } *spread_spectrum_data = ss_data; kfree(ss_info); return; out_free_data: kfree(ss_data); *ss_entries_num = 0; out_free_info: kfree(ss_info); } static void ss_info_from_atombios_create( struct dce110_clk_src *clk_src) { get_ss_info_from_atombios( clk_src, AS_SIGNAL_TYPE_DISPLAY_PORT, &clk_src->dp_ss_params, &clk_src->dp_ss_params_cnt); get_ss_info_from_atombios( clk_src, AS_SIGNAL_TYPE_HDMI, &clk_src->hdmi_ss_params, &clk_src->hdmi_ss_params_cnt); get_ss_info_from_atombios( clk_src, AS_SIGNAL_TYPE_DVI, &clk_src->dvi_ss_params, &clk_src->dvi_ss_params_cnt); get_ss_info_from_atombios( clk_src, AS_SIGNAL_TYPE_LVDS, &clk_src->lvds_ss_params, &clk_src->lvds_ss_params_cnt); } static bool calc_pll_max_vco_construct( struct calc_pll_clock_source *calc_pll_cs, struct calc_pll_clock_source_init_data *init_data) { uint32_t i; struct dc_firmware_info *fw_info; if (calc_pll_cs == NULL || init_data == NULL || init_data->bp == NULL) return false; if (!init_data->bp->fw_info_valid) return false; fw_info = &init_data->bp->fw_info; calc_pll_cs->ctx = init_data->ctx; calc_pll_cs->ref_freq_khz = fw_info->pll_info.crystal_frequency; calc_pll_cs->min_vco_khz = fw_info->pll_info.min_output_pxl_clk_pll_frequency; calc_pll_cs->max_vco_khz = fw_info->pll_info.max_output_pxl_clk_pll_frequency; if (init_data->max_override_input_pxl_clk_pll_freq_khz != 0) calc_pll_cs->max_pll_input_freq_khz = init_data->max_override_input_pxl_clk_pll_freq_khz; else calc_pll_cs->max_pll_input_freq_khz = fw_info->pll_info.max_input_pxl_clk_pll_frequency; if (init_data->min_override_input_pxl_clk_pll_freq_khz != 0) calc_pll_cs->min_pll_input_freq_khz = init_data->min_override_input_pxl_clk_pll_freq_khz; else calc_pll_cs->min_pll_input_freq_khz = fw_info->pll_info.min_input_pxl_clk_pll_frequency; calc_pll_cs->min_pix_clock_pll_post_divider = init_data->min_pix_clk_pll_post_divider; calc_pll_cs->max_pix_clock_pll_post_divider = init_data->max_pix_clk_pll_post_divider; calc_pll_cs->min_pll_ref_divider = init_data->min_pll_ref_divider; calc_pll_cs->max_pll_ref_divider = init_data->max_pll_ref_divider; if (init_data->num_fract_fb_divider_decimal_point == 0 || init_data->num_fract_fb_divider_decimal_point_precision > init_data->num_fract_fb_divider_decimal_point) { DC_LOG_ERROR( "The dec point num or precision is incorrect!"); return false; } if (init_data->num_fract_fb_divider_decimal_point_precision == 0) { DC_LOG_ERROR( "Incorrect fract feedback divider precision num!"); return false; } calc_pll_cs->fract_fb_divider_decimal_points_num = init_data->num_fract_fb_divider_decimal_point; calc_pll_cs->fract_fb_divider_precision = init_data->num_fract_fb_divider_decimal_point_precision; calc_pll_cs->fract_fb_divider_factor = 1; for (i = 0; i < calc_pll_cs->fract_fb_divider_decimal_points_num; ++i) calc_pll_cs->fract_fb_divider_factor *= 10; calc_pll_cs->fract_fb_divider_precision_factor = 1; for ( i = 0; i < (calc_pll_cs->fract_fb_divider_decimal_points_num - calc_pll_cs->fract_fb_divider_precision); ++i) calc_pll_cs->fract_fb_divider_precision_factor *= 10; return true; } bool dce110_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask) { struct calc_pll_clock_source_init_data calc_pll_cs_init_data_hdmi; struct calc_pll_clock_source_init_data calc_pll_cs_init_data; clk_src->base.ctx = ctx; clk_src->bios = bios; clk_src->base.id = id; clk_src->base.funcs = &dce110_clk_src_funcs; clk_src->regs = regs; clk_src->cs_shift = cs_shift; clk_src->cs_mask = cs_mask; if (!clk_src->bios->fw_info_valid) { ASSERT_CRITICAL(false); goto unexpected_failure; } clk_src->ext_clk_khz = clk_src->bios->fw_info.external_clock_source_frequency_for_dp; /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */ calc_pll_cs_init_data.bp = bios; calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1; calc_pll_cs_init_data.max_pix_clk_pll_post_divider = clk_src->cs_mask->PLL_POST_DIV_PIXCLK; calc_pll_cs_init_data.min_pll_ref_divider = 1; calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV; /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0; /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0; /*numberOfFractFBDividerDecimalPoints*/ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point = FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; /*number of decimal point to round off for fractional feedback divider value*/ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision = FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; calc_pll_cs_init_data.ctx = ctx; /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */ calc_pll_cs_init_data_hdmi.bp = bios; calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1; calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider = clk_src->cs_mask->PLL_POST_DIV_PIXCLK; calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1; calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV; /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500; /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000; /*numberOfFractFBDividerDecimalPoints*/ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point = FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; /*number of decimal point to round off for fractional feedback divider value*/ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision = FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; calc_pll_cs_init_data_hdmi.ctx = ctx; clk_src->ref_freq_khz = clk_src->bios->fw_info.pll_info.crystal_frequency; if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL) return true; /* PLL only from here on */ ss_info_from_atombios_create(clk_src); if (!calc_pll_max_vco_construct( &clk_src->calc_pll, &calc_pll_cs_init_data)) { ASSERT_CRITICAL(false); goto unexpected_failure; } calc_pll_cs_init_data_hdmi. min_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz/2; calc_pll_cs_init_data_hdmi. max_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz; if (!calc_pll_max_vco_construct( &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) { ASSERT_CRITICAL(false); goto unexpected_failure; } return true; unexpected_failure: return false; } bool dce112_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask) { clk_src->base.ctx = ctx; clk_src->bios = bios; clk_src->base.id = id; clk_src->base.funcs = &dce112_clk_src_funcs; clk_src->regs = regs; clk_src->cs_shift = cs_shift; clk_src->cs_mask = cs_mask; if (!clk_src->bios->fw_info_valid) { ASSERT_CRITICAL(false); return false; } clk_src->ext_clk_khz = clk_src->bios->fw_info.external_clock_source_frequency_for_dp; return true; } bool dcn20_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask) { bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask); clk_src->base.funcs = &dcn20_clk_src_funcs; return ret; } bool dcn3_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask) { bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask); clk_src->base.funcs = &dcn3_clk_src_funcs; return ret; } bool dcn31_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask) { bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask); clk_src->base.funcs = &dcn31_clk_src_funcs; return ret; } bool dcn301_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask) { bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask); clk_src->base.funcs = &dcn3_clk_src_funcs; return ret; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "link_encoder.h" #include "dce_link_encoder.h" #include "stream_encoder.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #include "dce/dce_11_0_enum.h" #ifndef DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT #define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT 0xa #endif #ifndef DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK #define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK 0x00000400L #endif #ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK #define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L #endif #ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT #define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c #endif #define CTX \ enc110->base.ctx #define DC_LOGGER \ enc110->base.ctx->logger #define REG(reg)\ (enc110->link_regs->reg) #define AUX_REG(reg)\ (enc110->aux_regs->reg) #define HPD_REG(reg)\ (enc110->hpd_regs->reg) #define DEFAULT_AUX_MAX_DATA_SIZE 16 #define AUX_MAX_DEFER_WRITE_RETRY 20 /* * @brief * Trigger Source Select * ASIC-dependent, actual values for register programming */ #define DCE110_DIG_FE_SOURCE_SELECT_INVALID 0x0 #define DCE110_DIG_FE_SOURCE_SELECT_DIGA 0x1 #define DCE110_DIG_FE_SOURCE_SELECT_DIGB 0x2 #define DCE110_DIG_FE_SOURCE_SELECT_DIGC 0x4 #define DCE110_DIG_FE_SOURCE_SELECT_DIGD 0x08 #define DCE110_DIG_FE_SOURCE_SELECT_DIGE 0x10 #define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20 #define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 enum { DP_MST_UPDATE_MAX_RETRY = 50 }; #define DIG_REG(reg)\ (reg + enc110->offsets.dig) #define DP_REG(reg)\ (reg + enc110->offsets.dp) static const struct link_encoder_funcs dce110_lnk_enc_funcs = { .validate_output_with_stream = dce110_link_encoder_validate_output_with_stream, .hw_init = dce110_link_encoder_hw_init, .setup = dce110_link_encoder_setup, .enable_tmds_output = dce110_link_encoder_enable_tmds_output, .enable_dp_output = dce110_link_encoder_enable_dp_output, .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output, .enable_lvds_output = dce110_link_encoder_enable_lvds_output, .disable_output = dce110_link_encoder_disable_output, .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern, .update_mst_stream_allocation_table = dce110_link_encoder_update_mst_stream_allocation_table, .psr_program_dp_dphy_fast_training = dce110_psr_program_dp_dphy_fast_training, .psr_program_secondary_packet = dce110_psr_program_secondary_packet, .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe, .enable_hpd = dce110_link_encoder_enable_hpd, .disable_hpd = dce110_link_encoder_disable_hpd, .is_dig_enabled = dce110_is_dig_enabled, .destroy = dce110_link_encoder_destroy, .get_max_link_cap = dce110_link_encoder_get_max_link_cap, .get_dig_frontend = dce110_get_dig_frontend, }; static enum bp_result link_transmitter_control( struct dce110_link_encoder *enc110, struct bp_transmitter_control *cntl) { enum bp_result result; struct dc_bios *bp = enc110->base.ctx->dc_bios; result = bp->funcs->transmitter_control(bp, cntl); return result; } static void enable_phy_bypass_mode( struct dce110_link_encoder *enc110, bool enable) { /* This register resides in DP back end block; * transmitter is used for the offset */ REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable); } static void disable_prbs_symbols( struct dce110_link_encoder *enc110, bool disable) { /* This register resides in DP back end block; * transmitter is used for the offset */ REG_UPDATE_4(DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, disable, DPHY_ATEST_SEL_LANE1, disable, DPHY_ATEST_SEL_LANE2, disable, DPHY_ATEST_SEL_LANE3, disable); } static void disable_prbs_mode( struct dce110_link_encoder *enc110) { REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0); } static void program_pattern_symbols( struct dce110_link_encoder *enc110, uint16_t pattern_symbols[8]) { /* This register resides in DP back end block; * transmitter is used for the offset */ REG_SET_3(DP_DPHY_SYM0, 0, DPHY_SYM1, pattern_symbols[0], DPHY_SYM2, pattern_symbols[1], DPHY_SYM3, pattern_symbols[2]); /* This register resides in DP back end block; * transmitter is used for the offset */ REG_SET_3(DP_DPHY_SYM1, 0, DPHY_SYM4, pattern_symbols[3], DPHY_SYM5, pattern_symbols[4], DPHY_SYM6, pattern_symbols[5]); /* This register resides in DP back end block; * transmitter is used for the offset */ REG_SET_2(DP_DPHY_SYM2, 0, DPHY_SYM7, pattern_symbols[6], DPHY_SYM8, pattern_symbols[7]); } static void set_dp_phy_pattern_d102( struct dce110_link_encoder *enc110) { /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); /* For 10-bit PRBS or debug symbols * please use the following sequence: */ /* Enable debug symbols on the lanes */ disable_prbs_symbols(enc110, true); /* Disable PRBS mode */ disable_prbs_mode(enc110); /* Program debug symbols to be output */ { uint16_t pattern_symbols[8] = { 0x2AA, 0x2AA, 0x2AA, 0x2AA, 0x2AA, 0x2AA, 0x2AA, 0x2AA }; program_pattern_symbols(enc110, pattern_symbols); } /* Enable phy bypass mode to enable the test pattern */ enable_phy_bypass_mode(enc110, true); } static void set_link_training_complete( struct dce110_link_encoder *enc110, bool complete) { /* This register resides in DP back end block; * transmitter is used for the offset */ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete); } unsigned int dce110_get_dig_frontend(struct link_encoder *enc) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); u32 value; enum engine_id result; REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value); switch (value) { case DCE110_DIG_FE_SOURCE_SELECT_DIGA: result = ENGINE_ID_DIGA; break; case DCE110_DIG_FE_SOURCE_SELECT_DIGB: result = ENGINE_ID_DIGB; break; case DCE110_DIG_FE_SOURCE_SELECT_DIGC: result = ENGINE_ID_DIGC; break; case DCE110_DIG_FE_SOURCE_SELECT_DIGD: result = ENGINE_ID_DIGD; break; case DCE110_DIG_FE_SOURCE_SELECT_DIGE: result = ENGINE_ID_DIGE; break; case DCE110_DIG_FE_SOURCE_SELECT_DIGF: result = ENGINE_ID_DIGF; break; case DCE110_DIG_FE_SOURCE_SELECT_DIGG: result = ENGINE_ID_DIGG; break; default: // invalid source select DIG result = ENGINE_ID_UNKNOWN; } return result; } void dce110_link_encoder_set_dp_phy_pattern_training_pattern( struct link_encoder *enc, uint32_t index) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); /* Write Training Pattern */ REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index); /* Set HW Register Training Complete to false */ set_link_training_complete(enc110, false); /* Disable PHY Bypass mode to output Training Pattern */ enable_phy_bypass_mode(enc110, false); /* Disable PRBS mode */ disable_prbs_mode(enc110); } static void setup_panel_mode( struct dce110_link_encoder *enc110, enum dp_panel_mode panel_mode) { uint32_t value; struct dc_context *ctx = enc110->base.ctx; /* if psp set panel mode, dal should be program it */ if (ctx->dc->caps.psp_setup_panel_mode) return; ASSERT(REG(DP_DPHY_INTERNAL_CTRL)); value = REG_READ(DP_DPHY_INTERNAL_CTRL); switch (panel_mode) { case DP_PANEL_MODE_EDP: value = 0x1; break; case DP_PANEL_MODE_SPECIAL: value = 0x11; break; default: value = 0x0; break; } REG_WRITE(DP_DPHY_INTERNAL_CTRL, value); } static void set_dp_phy_pattern_symbol_error( struct dce110_link_encoder *enc110) { /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); /* program correct panel mode*/ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT); /* A PRBS23 pattern is used for most DP electrical measurements. */ /* Enable PRBS symbols on the lanes */ disable_prbs_symbols(enc110, false); /* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */ REG_UPDATE_2(DP_DPHY_PRBS_CNTL, DPHY_PRBS_SEL, 1, DPHY_PRBS_EN, 1); /* Enable phy bypass mode to enable the test pattern */ enable_phy_bypass_mode(enc110, true); } static void set_dp_phy_pattern_prbs7( struct dce110_link_encoder *enc110) { /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); /* A PRBS7 pattern is used for most DP electrical measurements. */ /* Enable PRBS symbols on the lanes */ disable_prbs_symbols(enc110, false); /* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */ REG_UPDATE_2(DP_DPHY_PRBS_CNTL, DPHY_PRBS_SEL, 0, DPHY_PRBS_EN, 1); /* Enable phy bypass mode to enable the test pattern */ enable_phy_bypass_mode(enc110, true); } static void set_dp_phy_pattern_80bit_custom( struct dce110_link_encoder *enc110, const uint8_t *pattern) { /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); /* Enable debug symbols on the lanes */ disable_prbs_symbols(enc110, true); /* Enable PHY bypass mode to enable the test pattern */ /* TODO is it really needed ? */ enable_phy_bypass_mode(enc110, true); /* Program 80 bit custom pattern */ { uint16_t pattern_symbols[8]; pattern_symbols[0] = ((pattern[1] & 0x03) << 8) | pattern[0]; pattern_symbols[1] = ((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f); pattern_symbols[2] = ((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f); pattern_symbols[3] = (pattern[4] << 2) | ((pattern[3] >> 6) & 0x03); pattern_symbols[4] = ((pattern[6] & 0x03) << 8) | pattern[5]; pattern_symbols[5] = ((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f); pattern_symbols[6] = ((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f); pattern_symbols[7] = (pattern[9] << 2) | ((pattern[8] >> 6) & 0x03); program_pattern_symbols(enc110, pattern_symbols); } /* Enable phy bypass mode to enable the test pattern */ enable_phy_bypass_mode(enc110, true); } static void set_dp_phy_pattern_hbr2_compliance_cp2520_2( struct dce110_link_encoder *enc110, unsigned int cp2520_pattern) { /* previously there is a register DP_HBR2_EYE_PATTERN * that is enabled to get the pattern. * But it does not work with the latest spec change, * so we are programming the following registers manually. * * The following settings have been confirmed * by Nick Chorney and Sandra Liu */ /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); /* Setup DIG encoder in DP SST mode */ enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT); /* ensure normal panel mode. */ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT); /* no vbid after BS (SR) * DP_LINK_FRAMING_CNTL changed history Sandra Liu * 11000260 / 11000104 / 110000FC */ REG_UPDATE_3(DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, 0xFC, DP_VBID_DISABLE, 1, DP_VID_ENHANCED_FRAME_MODE, 1); /* swap every BS with SR */ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0); /* select cp2520 patterns */ if (REG(DP_DPHY_HBR2_PATTERN_CONTROL)) REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL, DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern); else /* pre-DCE11 can only generate CP2520 pattern 2 */ ASSERT(cp2520_pattern == 2); /* set link training complete */ set_link_training_complete(enc110, true); /* disable video stream */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2( struct dce110_link_encoder *enc110, unsigned int cp2520_pattern) { /* previously there is a register DP_HBR2_EYE_PATTERN * that is enabled to get the pattern. * But it does not work with the latest spec change, * so we are programming the following registers manually. * * The following settings have been confirmed * by Nick Chorney and Sandra Liu */ /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); /* Setup DIG encoder in DP SST mode */ enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT); /* ensure normal panel mode. */ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT); /* no vbid after BS (SR) * DP_LINK_FRAMING_CNTL changed history Sandra Liu * 11000260 / 11000104 / 110000FC */ REG_UPDATE_3(DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, 0xFC, DP_VBID_DISABLE, 1, DP_VID_ENHANCED_FRAME_MODE, 1); /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip swap BS with SR */ /* select cp2520 patterns */ if (REG(DP_DPHY_HBR2_PATTERN_CONTROL)) REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL, DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern); else /* pre-DCE11 can only generate CP2520 pattern 2 */ ASSERT(cp2520_pattern == 2); /* set link training complete */ set_link_training_complete(enc110, true); /* disable video stream */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); } #endif static void set_dp_phy_pattern_passthrough_mode( struct dce110_link_encoder *enc110, enum dp_panel_mode panel_mode) { /* program correct panel mode */ setup_panel_mode(enc110, panel_mode); /* restore LINK_FRAMING_CNTL and DPHY_SCRAMBLER_BS_COUNT * in case we were doing HBR2 compliance pattern before */ REG_UPDATE_3(DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, 0x2000, DP_VBID_DISABLE, 0, DP_VID_ENHANCED_FRAME_MODE, 1); REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF); /* set link training complete */ set_link_training_complete(enc110, true); /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); /* Disable PRBS mode */ disable_prbs_mode(enc110); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_set_dp_phy_pattern_passthrough_mode( struct dce110_link_encoder *enc110, enum dp_panel_mode panel_mode) { /* program correct panel mode */ setup_panel_mode(enc110, panel_mode); /* restore LINK_FRAMING_CNTL * in case we were doing HBR2 compliance pattern before */ REG_UPDATE_3(DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, 0x2000, DP_VBID_DISABLE, 0, DP_VID_ENHANCED_FRAME_MODE, 1); /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip DPHY_SCRAMBLER_BS_COUNT restore */ /* set link training complete */ set_link_training_complete(enc110, true); /* Disable PHY Bypass mode to setup the test pattern */ enable_phy_bypass_mode(enc110, false); /* Disable PRBS mode */ disable_prbs_mode(enc110); } #endif /* return value is bit-vector */ static uint8_t get_frontend_source( enum engine_id engine) { switch (engine) { case ENGINE_ID_DIGA: return DCE110_DIG_FE_SOURCE_SELECT_DIGA; case ENGINE_ID_DIGB: return DCE110_DIG_FE_SOURCE_SELECT_DIGB; case ENGINE_ID_DIGC: return DCE110_DIG_FE_SOURCE_SELECT_DIGC; case ENGINE_ID_DIGD: return DCE110_DIG_FE_SOURCE_SELECT_DIGD; case ENGINE_ID_DIGE: return DCE110_DIG_FE_SOURCE_SELECT_DIGE; case ENGINE_ID_DIGF: return DCE110_DIG_FE_SOURCE_SELECT_DIGF; case ENGINE_ID_DIGG: return DCE110_DIG_FE_SOURCE_SELECT_DIGG; default: ASSERT_CRITICAL(false); return DCE110_DIG_FE_SOURCE_SELECT_INVALID; } } static void configure_encoder( struct dce110_link_encoder *enc110, const struct dc_link_settings *link_settings) { /* set number of lanes */ REG_SET(DP_CONFIG, 0, DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE); /* setup scrambler */ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_configure_encoder( struct dce110_link_encoder *enc110, const struct dc_link_settings *link_settings) { /* set number of lanes */ REG_SET(DP_CONFIG, 0, DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE); /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip setup scrambler */ } #endif static void aux_initialize( struct dce110_link_encoder *enc110) { struct dc_context *ctx = enc110->base.ctx; enum hpd_source_id hpd_source = enc110->base.hpd_source; uint32_t addr = AUX_REG(AUX_CONTROL); uint32_t value = dm_read_reg(ctx, addr); set_reg_field_value(value, hpd_source, AUX_CONTROL, AUX_HPD_SEL); set_reg_field_value(value, 0, AUX_CONTROL, AUX_LS_READ_EN); dm_write_reg(ctx, addr, value); addr = AUX_REG(AUX_DPHY_RX_CONTROL0); value = dm_read_reg(ctx, addr); /* 1/4 window (the maximum allowed) */ set_reg_field_value(value, 1, AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW); dm_write_reg(ctx, addr, value); } void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc, bool exit_link_training_required) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); if (exit_link_training_required) REG_UPDATE(DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, 1); else { REG_UPDATE(DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, 0); /*In DCE 11, we are able to pre-program a Force SR register * to be able to trigger SR symbol after 5 idle patterns * transmitted. Upon PSR Exit, DMCU can trigger * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to * DPHY_LOAD_BS_COUNT_START and the internal counter * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be * replaced by SR symbol once. */ REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5); } } void dce110_psr_program_secondary_packet(struct link_encoder *enc, unsigned int sdp_transmit_line_num_deadline) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); REG_UPDATE_2(DP_SEC_CNTL1, DP_SEC_GSP0_LINE_NUM, sdp_transmit_line_num_deadline, DP_SEC_GSP0_PRIORITY, 1); } bool dce110_is_dig_enabled(struct link_encoder *enc) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); uint32_t value; REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value); return value; } static void link_encoder_disable(struct dce110_link_encoder *enc110) { /* reset training pattern */ REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0, DPHY_TRAINING_PATTERN_SEL, 0); /* reset training complete */ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0); /* reset panel mode */ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT); } static void hpd_initialize( struct dce110_link_encoder *enc110) { /* Associate HPD with DIG_BE */ enum hpd_source_id hpd_source = enc110->base.hpd_source; REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source); } bool dce110_link_encoder_validate_dvi_output( const struct dce110_link_encoder *enc110, enum signal_type connector_signal, enum signal_type signal, const struct dc_crtc_timing *crtc_timing) { uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK; if (signal == SIGNAL_TYPE_DVI_DUAL_LINK) max_pixel_clock *= 2; /* This handles the case of HDMI downgrade to DVI we don't want to * we don't want to cap the pixel clock if the DDI is not DVI. */ if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK && connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) max_pixel_clock = enc110->base.features.max_hdmi_pixel_clock; /* DVI only support RGB pixel encoding */ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB) return false; /*connect DVI via adpater's HDMI connector*/ if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK || connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) && signal != SIGNAL_TYPE_HDMI_TYPE_A && crtc_timing->pix_clk_100hz > (TMDS_MAX_PIXEL_CLOCK * 10)) return false; if (crtc_timing->pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10)) return false; if (crtc_timing->pix_clk_100hz > (max_pixel_clock * 10)) return false; /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */ switch (crtc_timing->display_color_depth) { case COLOR_DEPTH_666: case COLOR_DEPTH_888: break; case COLOR_DEPTH_101010: case COLOR_DEPTH_161616: if (signal != SIGNAL_TYPE_DVI_DUAL_LINK) return false; break; default: return false; } return true; } static bool dce110_link_encoder_validate_hdmi_output( const struct dce110_link_encoder *enc110, const struct dc_crtc_timing *crtc_timing, int adjusted_pix_clk_khz) { enum dc_color_depth max_deep_color = enc110->base.features.max_hdmi_deep_color; if (max_deep_color < crtc_timing->display_color_depth) return false; if (crtc_timing->display_color_depth < COLOR_DEPTH_888) return false; if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) return false; if ((adjusted_pix_clk_khz == 0) || (adjusted_pix_clk_khz > enc110->base.features.max_hdmi_pixel_clock)) return false; /* DCE11 HW does not support 420 */ if (!enc110->base.features.hdmi_ycbcr420_supported && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; if ((!enc110->base.features.flags.bits.HDMI_6GB_EN || enc110->base.ctx->dc->debug.hdmi20_disable) && adjusted_pix_clk_khz >= 300000) return false; if (enc110->base.ctx->dc->debug.hdmi20_disable && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; return true; } bool dce110_link_encoder_validate_dp_output( const struct dce110_link_encoder *enc110, const struct dc_crtc_timing *crtc_timing) { if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; return true; } void dce110_link_encoder_construct( struct dce110_link_encoder *enc110, const struct encoder_init_data *init_data, const struct encoder_feature_support *enc_features, const struct dce110_link_enc_registers *link_regs, const struct dce110_link_enc_aux_registers *aux_regs, const struct dce110_link_enc_hpd_registers *hpd_regs) { struct bp_encoder_cap_info bp_cap_info = {0}; const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; enum bp_result result = BP_RESULT_OK; enc110->base.funcs = &dce110_lnk_enc_funcs; enc110->base.ctx = init_data->ctx; enc110->base.id = init_data->encoder; enc110->base.hpd_source = init_data->hpd_source; enc110->base.connector = init_data->connector; enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; enc110->base.features = *enc_features; enc110->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin * while doing the DP sink detect */ /* if (dal_adapter_service_is_feature_supported(as, FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) enc110->base.features.flags.bits. DP_SINK_DETECT_POLL_DATA_PIN = true;*/ enc110->base.output_signals = SIGNAL_TYPE_DVI_SINGLE_LINK | SIGNAL_TYPE_DVI_DUAL_LINK | SIGNAL_TYPE_LVDS | SIGNAL_TYPE_DISPLAY_PORT | SIGNAL_TYPE_DISPLAY_PORT_MST | SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. * Prefer DIG assignment is decided by board design. * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design * and VBIOS will filter out 7 UNIPHY for DCE 8.0. * By this, adding DIGG should not hurt DCE 8.0. * This will let DCE 8.1 share DCE 8.0 as much as possible */ enc110->link_regs = link_regs; enc110->aux_regs = aux_regs; enc110->hpd_regs = hpd_regs; switch (enc110->base.transmitter) { case TRANSMITTER_UNIPHY_A: enc110->base.preferred_engine = ENGINE_ID_DIGA; break; case TRANSMITTER_UNIPHY_B: enc110->base.preferred_engine = ENGINE_ID_DIGB; break; case TRANSMITTER_UNIPHY_C: enc110->base.preferred_engine = ENGINE_ID_DIGC; break; case TRANSMITTER_UNIPHY_D: enc110->base.preferred_engine = ENGINE_ID_DIGD; break; case TRANSMITTER_UNIPHY_E: enc110->base.preferred_engine = ENGINE_ID_DIGE; break; case TRANSMITTER_UNIPHY_F: enc110->base.preferred_engine = ENGINE_ID_DIGF; break; case TRANSMITTER_UNIPHY_G: enc110->base.preferred_engine = ENGINE_ID_DIGG; break; default: ASSERT_CRITICAL(false); enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } /* default to one to mirror Windows behavior */ enc110->base.features.flags.bits.HDMI_6GB_EN = 1; result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios, enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ if (BP_RESULT_OK == result) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; } else { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); } if (enc110->base.ctx->dc->debug.hdmi20_disable) { enc110->base.features.flags.bits.HDMI_6GB_EN = 0; } } bool dce110_link_encoder_validate_output_with_stream( struct link_encoder *enc, const struct dc_stream_state *stream) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); bool is_valid; switch (stream->signal) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: is_valid = dce110_link_encoder_validate_dvi_output( enc110, stream->link->connector_signal, stream->signal, &stream->timing); break; case SIGNAL_TYPE_HDMI_TYPE_A: is_valid = dce110_link_encoder_validate_hdmi_output( enc110, &stream->timing, stream->phy_pix_clk); break; case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: is_valid = dce110_link_encoder_validate_dp_output( enc110, &stream->timing); break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_LVDS: is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB; break; case SIGNAL_TYPE_VIRTUAL: is_valid = true; break; default: is_valid = false; break; } return is_valid; } void dce110_link_encoder_hw_init( struct link_encoder *enc) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; cntl.action = TRANSMITTER_CONTROL_INIT; cntl.engine_id = ENGINE_ID_UNKNOWN; cntl.transmitter = enc110->base.transmitter; cntl.connector_obj_id = enc110->base.connector; cntl.lanes_number = LANE_COUNT_FOUR; cntl.coherent = false; cntl.hpd_sel = enc110->base.hpd_source; if (enc110->base.connector.id == CONNECTOR_ID_EDP) cntl.signal = SIGNAL_TYPE_EDP; result = link_transmitter_control(enc110, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); return; } if (enc110->base.connector.id == CONNECTOR_ID_LVDS) { cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS; result = link_transmitter_control(enc110, &cntl); ASSERT(result == BP_RESULT_OK); } aux_initialize(enc110); /* reinitialize HPD. * hpd_initialize() will pass DIG_FE id to HW context. * All other routine within HW context will use fe_engine_offset * as DIG_FE id even caller pass DIG_FE id. * So this routine must be called first. */ hpd_initialize(enc110); } void dce110_link_encoder_destroy(struct link_encoder **enc) { kfree(TO_DCE110_LINK_ENC(*enc)); *enc = NULL; } void dce110_link_encoder_setup( struct link_encoder *enc, enum signal_type signal) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); switch (signal) { case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: /* DP SST */ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0); break; case SIGNAL_TYPE_LVDS: /* LVDS */ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1); break; case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: /* TMDS-DVI */ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2); break; case SIGNAL_TYPE_HDMI_TYPE_A: /* TMDS-HDMI */ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3); break; case SIGNAL_TYPE_DISPLAY_PORT_MST: /* DP MST */ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5); break; default: ASSERT_CRITICAL(false); /* invalid mode ! */ break; } } /* TODO: still need depth or just pass in adjusted pixel clock? */ void dce110_link_encoder_enable_tmds_output( struct link_encoder *enc, enum clock_source_id clock_source, enum dc_color_depth color_depth, enum signal_type signal, uint32_t pixel_clock) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* Enable the PHY */ cntl.connector_obj_id = enc110->base.connector; cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = enc->preferred_engine; cntl.transmitter = enc110->base.transmitter; cntl.pll_id = clock_source; cntl.signal = signal; if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK) cntl.lanes_number = 8; else cntl.lanes_number = 4; cntl.hpd_sel = enc110->base.hpd_source; cntl.pixel_clock = pixel_clock; cntl.color_depth = color_depth; result = link_transmitter_control(enc110, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); } } /* TODO: still need depth or just pass in adjusted pixel clock? */ void dce110_link_encoder_enable_lvds_output( struct link_encoder *enc, enum clock_source_id clock_source, uint32_t pixel_clock) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* Enable the PHY */ cntl.connector_obj_id = enc110->base.connector; cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = enc->preferred_engine; cntl.transmitter = enc110->base.transmitter; cntl.pll_id = clock_source; cntl.signal = SIGNAL_TYPE_LVDS; cntl.lanes_number = 4; cntl.hpd_sel = enc110->base.hpd_source; cntl.pixel_clock = pixel_clock; result = link_transmitter_control(enc110, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); } } /* enables DP PHY output */ void dce110_link_encoder_enable_dp_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* Enable the PHY */ /* number_of_lanes is used for pixel clock adjust, * but it's not passed to asic_control. * We need to set number of lanes manually. */ configure_encoder(enc110, link_settings); cntl.connector_obj_id = enc110->base.connector; cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = enc->preferred_engine; cntl.transmitter = enc110->base.transmitter; cntl.pll_id = clock_source; cntl.signal = SIGNAL_TYPE_DISPLAY_PORT; cntl.lanes_number = link_settings->lane_count; cntl.hpd_sel = enc110->base.hpd_source; cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* TODO: check if undefined works */ cntl.color_depth = COLOR_DEPTH_UNDEFINED; result = link_transmitter_control(enc110, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); } } /* enables DP PHY output in MST mode */ void dce110_link_encoder_enable_dp_mst_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* Enable the PHY */ /* number_of_lanes is used for pixel clock adjust, * but it's not passed to asic_control. * We need to set number of lanes manually. */ configure_encoder(enc110, link_settings); cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = ENGINE_ID_UNKNOWN; cntl.transmitter = enc110->base.transmitter; cntl.pll_id = clock_source; cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; cntl.lanes_number = link_settings->lane_count; cntl.hpd_sel = enc110->base.hpd_source; cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* TODO: check if undefined works */ cntl.color_depth = COLOR_DEPTH_UNDEFINED; result = link_transmitter_control(enc110, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); } } #if defined(CONFIG_DRM_AMD_DC_SI) /* enables DP PHY output */ static void dce60_link_encoder_enable_dp_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* Enable the PHY */ /* number_of_lanes is used for pixel clock adjust, * but it's not passed to asic_control. * We need to set number of lanes manually. */ dce60_configure_encoder(enc110, link_settings); cntl.connector_obj_id = enc110->base.connector; cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = enc->preferred_engine; cntl.transmitter = enc110->base.transmitter; cntl.pll_id = clock_source; cntl.signal = SIGNAL_TYPE_DISPLAY_PORT; cntl.lanes_number = link_settings->lane_count; cntl.hpd_sel = enc110->base.hpd_source; cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* TODO: check if undefined works */ cntl.color_depth = COLOR_DEPTH_UNDEFINED; result = link_transmitter_control(enc110, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); } } /* enables DP PHY output in MST mode */ static void dce60_link_encoder_enable_dp_mst_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; /* Enable the PHY */ /* number_of_lanes is used for pixel clock adjust, * but it's not passed to asic_control. * We need to set number of lanes manually. */ dce60_configure_encoder(enc110, link_settings); cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = ENGINE_ID_UNKNOWN; cntl.transmitter = enc110->base.transmitter; cntl.pll_id = clock_source; cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; cntl.lanes_number = link_settings->lane_count; cntl.hpd_sel = enc110->base.hpd_source; cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* TODO: check if undefined works */ cntl.color_depth = COLOR_DEPTH_UNDEFINED; result = link_transmitter_control(enc110, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); } } #endif /* * @brief * Disable transmitter and its encoder */ void dce110_link_encoder_disable_output( struct link_encoder *enc, enum signal_type signal) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; enum bp_result result; if (!dce110_is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ return; } /* Power-down RX and disable GPU PHY should be paired. * Disabling PHY without powering down RX may cause * symbol lock loss, on which we will get DP Sink interrupt. */ /* There is a case for the DP active dongles * where we want to disable the PHY but keep RX powered, * for those we need to ignore DP Sink interrupt * by checking lane count that has been set * on the last do_enable_output(). */ /* disable transmitter */ cntl.action = TRANSMITTER_CONTROL_DISABLE; cntl.transmitter = enc110->base.transmitter; cntl.hpd_sel = enc110->base.hpd_source; cntl.signal = signal; cntl.connector_obj_id = enc110->base.connector; result = link_transmitter_control(enc110, &cntl); if (result != BP_RESULT_OK) { DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__); BREAK_TO_DEBUGGER(); return; } /* disable encoder */ if (dc_is_dp_signal(signal)) link_encoder_disable(enc110); } void dce110_link_encoder_dp_set_lane_settings( struct link_encoder *enc, const struct dc_link_settings *link_settings, const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); union dpcd_training_lane_set training_lane_set = { { 0 } }; int32_t lane = 0; struct bp_transmitter_control cntl = { 0 }; if (!link_settings) { BREAK_TO_DEBUGGER(); return; } cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; cntl.transmitter = enc110->base.transmitter; cntl.connector_obj_id = enc110->base.connector; cntl.lanes_number = link_settings->lane_count; cntl.hpd_sel = enc110->base.hpd_source; cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; for (lane = 0; lane < link_settings->lane_count; lane++) { /* translate lane settings */ training_lane_set.bits.VOLTAGE_SWING_SET = lane_settings[lane].VOLTAGE_SWING; training_lane_set.bits.PRE_EMPHASIS_SET = lane_settings[lane].PRE_EMPHASIS; /* post cursor 2 setting only applies to HBR2 link rate */ if (link_settings->link_rate == LINK_RATE_HIGH2) { /* this is passed to VBIOS * to program post cursor 2 level */ training_lane_set.bits.POST_CURSOR2_SET = lane_settings[lane].POST_CURSOR2; } cntl.lane_select = lane; cntl.lane_settings = training_lane_set.raw; /* call VBIOS table to set voltage swing and pre-emphasis */ link_transmitter_control(enc110, &cntl); } } /* set DP PHY test and training patterns */ void dce110_link_encoder_dp_set_phy_pattern( struct link_encoder *enc, const struct encoder_set_dp_phy_pattern_param *param) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); switch (param->dp_phy_pattern) { case DP_TEST_PATTERN_TRAINING_PATTERN1: dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0); break; case DP_TEST_PATTERN_TRAINING_PATTERN2: dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1); break; case DP_TEST_PATTERN_TRAINING_PATTERN3: dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2); break; case DP_TEST_PATTERN_TRAINING_PATTERN4: dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3); break; case DP_TEST_PATTERN_D102: set_dp_phy_pattern_d102(enc110); break; case DP_TEST_PATTERN_SYMBOL_ERROR: set_dp_phy_pattern_symbol_error(enc110); break; case DP_TEST_PATTERN_PRBS7: set_dp_phy_pattern_prbs7(enc110); break; case DP_TEST_PATTERN_80BIT_CUSTOM: set_dp_phy_pattern_80bit_custom( enc110, param->custom_pattern); break; case DP_TEST_PATTERN_CP2520_1: set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 1); break; case DP_TEST_PATTERN_CP2520_2: set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 2); break; case DP_TEST_PATTERN_CP2520_3: set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 3); break; case DP_TEST_PATTERN_VIDEO_MODE: { set_dp_phy_pattern_passthrough_mode( enc110, param->dp_panel_mode); break; } default: /* invalid phy pattern */ ASSERT_CRITICAL(false); break; } } #if defined(CONFIG_DRM_AMD_DC_SI) /* set DP PHY test and training patterns */ static void dce60_link_encoder_dp_set_phy_pattern( struct link_encoder *enc, const struct encoder_set_dp_phy_pattern_param *param) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); switch (param->dp_phy_pattern) { case DP_TEST_PATTERN_TRAINING_PATTERN1: dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0); break; case DP_TEST_PATTERN_TRAINING_PATTERN2: dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1); break; case DP_TEST_PATTERN_TRAINING_PATTERN3: dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2); break; case DP_TEST_PATTERN_TRAINING_PATTERN4: dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3); break; case DP_TEST_PATTERN_D102: set_dp_phy_pattern_d102(enc110); break; case DP_TEST_PATTERN_SYMBOL_ERROR: set_dp_phy_pattern_symbol_error(enc110); break; case DP_TEST_PATTERN_PRBS7: set_dp_phy_pattern_prbs7(enc110); break; case DP_TEST_PATTERN_80BIT_CUSTOM: set_dp_phy_pattern_80bit_custom( enc110, param->custom_pattern); break; case DP_TEST_PATTERN_CP2520_1: dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 1); break; case DP_TEST_PATTERN_CP2520_2: dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 2); break; case DP_TEST_PATTERN_CP2520_3: dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 3); break; case DP_TEST_PATTERN_VIDEO_MODE: { dce60_set_dp_phy_pattern_passthrough_mode( enc110, param->dp_panel_mode); break; } default: /* invalid phy pattern */ ASSERT_CRITICAL(false); break; } } #endif static void fill_stream_allocation_row_info( const struct link_mst_stream_allocation *stream_allocation, uint32_t *src, uint32_t *slots) { const struct stream_encoder *stream_enc = stream_allocation->stream_enc; if (stream_enc) { *src = stream_enc->id; *slots = stream_allocation->slot_count; } else { *src = 0; *slots = 0; } } /* programs DP MST VC payload allocation */ void dce110_link_encoder_update_mst_stream_allocation_table( struct link_encoder *enc, const struct link_mst_stream_allocation_table *table) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); uint32_t value1 = 0; uint32_t value2 = 0; uint32_t slots = 0; uint32_t src = 0; uint32_t retries = 0; /* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/ /* --- Set MSE Stream Attribute - * Setup VC Payload Table on Tx Side, * Issue allocation change trigger * to commit payload on both tx and rx side */ /* we should clean-up table each time */ if (table->stream_count >= 1) { fill_stream_allocation_row_info( &table->stream_allocations[0], &src, &slots); } else { src = 0; slots = 0; } REG_UPDATE_2(DP_MSE_SAT0, DP_MSE_SAT_SRC0, src, DP_MSE_SAT_SLOT_COUNT0, slots); if (table->stream_count >= 2) { fill_stream_allocation_row_info( &table->stream_allocations[1], &src, &slots); } else { src = 0; slots = 0; } REG_UPDATE_2(DP_MSE_SAT0, DP_MSE_SAT_SRC1, src, DP_MSE_SAT_SLOT_COUNT1, slots); if (table->stream_count >= 3) { fill_stream_allocation_row_info( &table->stream_allocations[2], &src, &slots); } else { src = 0; slots = 0; } REG_UPDATE_2(DP_MSE_SAT1, DP_MSE_SAT_SRC2, src, DP_MSE_SAT_SLOT_COUNT2, slots); if (table->stream_count >= 4) { fill_stream_allocation_row_info( &table->stream_allocations[3], &src, &slots); } else { src = 0; slots = 0; } REG_UPDATE_2(DP_MSE_SAT1, DP_MSE_SAT_SRC3, src, DP_MSE_SAT_SLOT_COUNT3, slots); /* --- wait for transaction finish */ /* send allocation change trigger (ACT) ? * this step first sends the ACT, * then double buffers the SAT into the hardware * making the new allocation active on the DP MST mode link */ /* DP_MSE_SAT_UPDATE: * 0 - No Action * 1 - Update SAT with trigger * 2 - Update SAT without trigger */ REG_UPDATE(DP_MSE_SAT_UPDATE, DP_MSE_SAT_UPDATE, 1); /* wait for update to complete * (i.e. DP_MSE_SAT_UPDATE field is reset to 0) * then wait for the transmission * of at least 16 MTP headers on immediate local link. * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0 * a value of 1 indicates that DP MST mode * is in the 16 MTP keepout region after a VC has been added. * MST stream bandwidth (VC rate) can be configured * after this bit is cleared */ do { udelay(10); REG_READ(DP_MSE_SAT_UPDATE); REG_GET(DP_MSE_SAT_UPDATE, DP_MSE_SAT_UPDATE, &value1); REG_GET(DP_MSE_SAT_UPDATE, DP_MSE_16_MTP_KEEPOUT, &value2); /* bit field DP_MSE_SAT_UPDATE is set to 1 already */ if (!value1 && !value2) break; ++retries; } while (retries < DP_MST_UPDATE_MAX_RETRY); } void dce110_link_encoder_connect_dig_be_to_fe( struct link_encoder *enc, enum engine_id engine, bool connect) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); uint32_t field; if (engine != ENGINE_ID_UNKNOWN) { REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field); if (connect) field |= get_frontend_source(engine); else field &= ~get_frontend_source(engine); REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field); } } void dce110_link_encoder_enable_hpd(struct link_encoder *enc) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct dc_context *ctx = enc110->base.ctx; uint32_t addr = HPD_REG(DC_HPD_CONTROL); uint32_t hpd_enable = 0; uint32_t value = dm_read_reg(ctx, addr); hpd_enable = get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN); if (hpd_enable == 0) set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN); } void dce110_link_encoder_disable_hpd(struct link_encoder *enc) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); struct dc_context *ctx = enc110->base.ctx; uint32_t addr = HPD_REG(DC_HPD_CONTROL); uint32_t value = dm_read_reg(ctx, addr); set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN); } void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { /* Set Default link settings */ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH, LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0}; /* Higher link settings based on feature supported */ if (enc->features.flags.bits.IS_HBR2_CAPABLE) max_link_cap.link_rate = LINK_RATE_HIGH2; if (enc->features.flags.bits.IS_HBR3_CAPABLE) max_link_cap.link_rate = LINK_RATE_HIGH3; *link_settings = max_link_cap; } #if defined(CONFIG_DRM_AMD_DC_SI) static const struct link_encoder_funcs dce60_lnk_enc_funcs = { .validate_output_with_stream = dce110_link_encoder_validate_output_with_stream, .hw_init = dce110_link_encoder_hw_init, .setup = dce110_link_encoder_setup, .enable_tmds_output = dce110_link_encoder_enable_tmds_output, .enable_dp_output = dce60_link_encoder_enable_dp_output, .enable_dp_mst_output = dce60_link_encoder_enable_dp_mst_output, .enable_lvds_output = dce110_link_encoder_enable_lvds_output, .disable_output = dce110_link_encoder_disable_output, .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dce60_link_encoder_dp_set_phy_pattern, .update_mst_stream_allocation_table = dce110_link_encoder_update_mst_stream_allocation_table, .psr_program_dp_dphy_fast_training = dce110_psr_program_dp_dphy_fast_training, .psr_program_secondary_packet = dce110_psr_program_secondary_packet, .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe, .enable_hpd = dce110_link_encoder_enable_hpd, .disable_hpd = dce110_link_encoder_disable_hpd, .is_dig_enabled = dce110_is_dig_enabled, .destroy = dce110_link_encoder_destroy, .get_max_link_cap = dce110_link_encoder_get_max_link_cap, .get_dig_frontend = dce110_get_dig_frontend }; void dce60_link_encoder_construct( struct dce110_link_encoder *enc110, const struct encoder_init_data *init_data, const struct encoder_feature_support *enc_features, const struct dce110_link_enc_registers *link_regs, const struct dce110_link_enc_aux_registers *aux_regs, const struct dce110_link_enc_hpd_registers *hpd_regs) { struct bp_encoder_cap_info bp_cap_info = {0}; const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; enum bp_result result = BP_RESULT_OK; enc110->base.funcs = &dce60_lnk_enc_funcs; enc110->base.ctx = init_data->ctx; enc110->base.id = init_data->encoder; enc110->base.hpd_source = init_data->hpd_source; enc110->base.connector = init_data->connector; enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; enc110->base.features = *enc_features; enc110->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin * while doing the DP sink detect */ /* if (dal_adapter_service_is_feature_supported(as, FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) enc110->base.features.flags.bits. DP_SINK_DETECT_POLL_DATA_PIN = true;*/ enc110->base.output_signals = SIGNAL_TYPE_DVI_SINGLE_LINK | SIGNAL_TYPE_DVI_DUAL_LINK | SIGNAL_TYPE_LVDS | SIGNAL_TYPE_DISPLAY_PORT | SIGNAL_TYPE_DISPLAY_PORT_MST | SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. * Prefer DIG assignment is decided by board design. * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design * and VBIOS will filter out 7 UNIPHY for DCE 8.0. * By this, adding DIGG should not hurt DCE 8.0. * This will let DCE 8.1 share DCE 8.0 as much as possible */ enc110->link_regs = link_regs; enc110->aux_regs = aux_regs; enc110->hpd_regs = hpd_regs; switch (enc110->base.transmitter) { case TRANSMITTER_UNIPHY_A: enc110->base.preferred_engine = ENGINE_ID_DIGA; break; case TRANSMITTER_UNIPHY_B: enc110->base.preferred_engine = ENGINE_ID_DIGB; break; case TRANSMITTER_UNIPHY_C: enc110->base.preferred_engine = ENGINE_ID_DIGC; break; case TRANSMITTER_UNIPHY_D: enc110->base.preferred_engine = ENGINE_ID_DIGD; break; case TRANSMITTER_UNIPHY_E: enc110->base.preferred_engine = ENGINE_ID_DIGE; break; case TRANSMITTER_UNIPHY_F: enc110->base.preferred_engine = ENGINE_ID_DIGF; break; case TRANSMITTER_UNIPHY_G: enc110->base.preferred_engine = ENGINE_ID_DIGG; break; default: ASSERT_CRITICAL(false); enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } /* default to one to mirror Windows behavior */ enc110->base.features.flags.bits.HDMI_6GB_EN = 1; result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios, enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ if (BP_RESULT_OK == result) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; } else { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); } if (enc110->base.ctx->dc->debug.hdmi20_disable) { enc110->base.features.flags.bits.HDMI_6GB_EN = 0; } } #endif
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce_ipp.h" #include "reg_helper.h" #include "dm_services.h" #define REG(reg) \ (ipp_dce->regs->reg) #undef FN #define FN(reg_name, field_name) \ ipp_dce->ipp_shift->field_name, ipp_dce->ipp_mask->field_name #define CTX \ ipp_dce->base.ctx static void dce_ipp_cursor_set_position( struct input_pixel_processor *ipp, const struct dc_cursor_position *position, const struct dc_cursor_mi_param *param) { struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp); /* lock cursor registers */ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, true); /* Flag passed in structure differentiates cursor enable/disable. */ /* Update if it differs from cached state. */ REG_UPDATE(CUR_CONTROL, CURSOR_EN, position->enable); REG_SET_2(CUR_POSITION, 0, CURSOR_X_POSITION, position->x, CURSOR_Y_POSITION, position->y); REG_SET_2(CUR_HOT_SPOT, 0, CURSOR_HOT_SPOT_X, position->x_hotspot, CURSOR_HOT_SPOT_Y, position->y_hotspot); /* unlock cursor registers */ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, false); } static void dce_ipp_cursor_set_attributes( struct input_pixel_processor *ipp, const struct dc_cursor_attributes *attributes) { struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp); int mode; /* Lock cursor registers */ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, true); /* Program cursor control */ switch (attributes->color_format) { case CURSOR_MODE_MONO: mode = 0; break; case CURSOR_MODE_COLOR_1BIT_AND: mode = 1; break; case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: mode = 2; break; case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: mode = 3; break; default: BREAK_TO_DEBUGGER(); /* unsupported */ mode = 0; } REG_UPDATE_3(CUR_CONTROL, CURSOR_MODE, mode, CURSOR_2X_MAGNIFY, attributes->attribute_flags.bits.ENABLE_MAGNIFICATION, CUR_INV_TRANS_CLAMP, attributes->attribute_flags.bits.INVERSE_TRANSPARENT_CLAMPING); if (attributes->color_format == CURSOR_MODE_MONO) { REG_SET_3(CUR_COLOR1, 0, CUR_COLOR1_BLUE, 0, CUR_COLOR1_GREEN, 0, CUR_COLOR1_RED, 0); REG_SET_3(CUR_COLOR2, 0, CUR_COLOR2_BLUE, 0xff, CUR_COLOR2_GREEN, 0xff, CUR_COLOR2_RED, 0xff); } /* * Program cursor size -- NOTE: HW spec specifies that HW register * stores size as (height - 1, width - 1) */ REG_SET_2(CUR_SIZE, 0, CURSOR_WIDTH, attributes->width-1, CURSOR_HEIGHT, attributes->height-1); /* Program cursor surface address */ /* SURFACE_ADDRESS_HIGH: Higher order bits (39:32) of hardware cursor * surface base address in byte. It is 4K byte aligned. * The correct way to program cursor surface address is to first write * to CUR_SURFACE_ADDRESS_HIGH, and then write to CUR_SURFACE_ADDRESS */ REG_SET(CUR_SURFACE_ADDRESS_HIGH, 0, CURSOR_SURFACE_ADDRESS_HIGH, attributes->address.high_part); REG_SET(CUR_SURFACE_ADDRESS, 0, CURSOR_SURFACE_ADDRESS, attributes->address.low_part); /* Unlock Cursor registers. */ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, false); } static void dce_ipp_program_prescale(struct input_pixel_processor *ipp, struct ipp_prescale_params *params) { struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp); /* set to bypass mode first before change */ REG_UPDATE(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); REG_SET_2(PRESCALE_VALUES_GRPH_R, 0, GRPH_PRESCALE_SCALE_R, params->scale, GRPH_PRESCALE_BIAS_R, params->bias); REG_SET_2(PRESCALE_VALUES_GRPH_G, 0, GRPH_PRESCALE_SCALE_G, params->scale, GRPH_PRESCALE_BIAS_G, params->bias); REG_SET_2(PRESCALE_VALUES_GRPH_B, 0, GRPH_PRESCALE_SCALE_B, params->scale, GRPH_PRESCALE_BIAS_B, params->bias); if (params->mode != IPP_PRESCALE_MODE_BYPASS) { REG_UPDATE(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 0); /* If prescale is in use, then legacy lut should be bypassed */ REG_UPDATE(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 1); } } static void dce_ipp_program_input_lut( struct input_pixel_processor *ipp, const struct dc_gamma *gamma) { int i; struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp); /* power on LUT memory */ if (REG(DCFE_MEM_PWR_CTRL)) REG_SET(DCFE_MEM_PWR_CTRL, 0, DCP_LUT_MEM_PWR_DIS, 1); /* enable all */ REG_SET(DC_LUT_WRITE_EN_MASK, 0, DC_LUT_WRITE_EN_MASK, 0x7); /* 256 entry mode */ REG_UPDATE(DC_LUT_RW_MODE, DC_LUT_RW_MODE, 0); /* LUT-256, unsigned, integer, new u0.12 format */ REG_SET_3(DC_LUT_CONTROL, 0, DC_LUT_DATA_R_FORMAT, 3, DC_LUT_DATA_G_FORMAT, 3, DC_LUT_DATA_B_FORMAT, 3); /* start from index 0 */ REG_SET(DC_LUT_RW_INDEX, 0, DC_LUT_RW_INDEX, 0); for (i = 0; i < gamma->num_entries; i++) { REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR, dc_fixpt_round( gamma->entries.red[i])); REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR, dc_fixpt_round( gamma->entries.green[i])); REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR, dc_fixpt_round( gamma->entries.blue[i])); } /* power off LUT memory */ if (REG(DCFE_MEM_PWR_CTRL)) REG_SET(DCFE_MEM_PWR_CTRL, 0, DCP_LUT_MEM_PWR_DIS, 0); /* bypass prescale, enable legacy LUT */ REG_UPDATE(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); REG_UPDATE(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); } static void dce_ipp_set_degamma( struct input_pixel_processor *ipp, enum ipp_degamma_mode mode) { struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp); uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0; ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS || mode == IPP_DEGAMMA_MODE_HW_sRGB); REG_SET_3(DEGAMMA_CONTROL, 0, GRPH_DEGAMMA_MODE, degamma_type, CURSOR_DEGAMMA_MODE, degamma_type, CURSOR2_DEGAMMA_MODE, degamma_type); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_ipp_set_degamma( struct input_pixel_processor *ipp, enum ipp_degamma_mode mode) { struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp); uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0; ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS || mode == IPP_DEGAMMA_MODE_HW_sRGB); /* DCE6 does not have CURSOR2_DEGAMMA_MODE bit in DEGAMMA_CONTROL reg */ REG_SET_2(DEGAMMA_CONTROL, 0, GRPH_DEGAMMA_MODE, degamma_type, CURSOR_DEGAMMA_MODE, degamma_type); } #endif static const struct ipp_funcs dce_ipp_funcs = { .ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes, .ipp_cursor_set_position = dce_ipp_cursor_set_position, .ipp_program_prescale = dce_ipp_program_prescale, .ipp_program_input_lut = dce_ipp_program_input_lut, .ipp_set_degamma = dce_ipp_set_degamma }; #if defined(CONFIG_DRM_AMD_DC_SI) static const struct ipp_funcs dce60_ipp_funcs = { .ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes, .ipp_cursor_set_position = dce_ipp_cursor_set_position, .ipp_program_prescale = dce_ipp_program_prescale, .ipp_program_input_lut = dce_ipp_program_input_lut, .ipp_set_degamma = dce60_ipp_set_degamma }; #endif /*****************************************/ /* Constructor, Destructor */ /*****************************************/ void dce_ipp_construct( struct dce_ipp *ipp_dce, struct dc_context *ctx, int inst, const struct dce_ipp_registers *regs, const struct dce_ipp_shift *ipp_shift, const struct dce_ipp_mask *ipp_mask) { ipp_dce->base.ctx = ctx; ipp_dce->base.inst = inst; ipp_dce->base.funcs = &dce_ipp_funcs; ipp_dce->regs = regs; ipp_dce->ipp_shift = ipp_shift; ipp_dce->ipp_mask = ipp_mask; } #if defined(CONFIG_DRM_AMD_DC_SI) void dce60_ipp_construct( struct dce_ipp *ipp_dce, struct dc_context *ctx, int inst, const struct dce_ipp_registers *regs, const struct dce_ipp_shift *ipp_shift, const struct dce_ipp_mask *ipp_mask) { ipp_dce->base.ctx = ctx; ipp_dce->base.inst = inst; ipp_dce->base.funcs = &dce60_ipp_funcs; ipp_dce->regs = regs; ipp_dce->ipp_shift = ipp_shift; ipp_dce->ipp_mask = ipp_mask; } #endif void dce_ipp_destroy(struct input_pixel_processor **ipp) { kfree(TO_DCE_IPP(*ipp)); *ipp = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dmub_hw_lock_mgr.h" #include "dc_dmub_srv.h" #include "dc_types.h" #include "core_types.h" void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, bool lock, union dmub_hw_lock_flags *hw_locks, struct dmub_hw_lock_inst_flags *inst_flags) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.lock_hw.header.type = DMUB_CMD__HW_LOCK; cmd.lock_hw.header.sub_type = 0; cmd.lock_hw.header.payload_bytes = sizeof(struct dmub_cmd_lock_hw_data); cmd.lock_hw.lock_hw_data.client = HW_LOCK_CLIENT_DRIVER; cmd.lock_hw.lock_hw_data.lock = lock; cmd.lock_hw.lock_hw_data.hw_locks.u8All = hw_locks->u8All; memcpy(&cmd.lock_hw.lock_hw_data.inst_flags, inst_flags, sizeof(struct dmub_hw_lock_inst_flags)); if (!lock) cmd.lock_hw.lock_hw_data.should_release = 1; dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_cmd_lock_hw hw_lock_cmd) { union dmub_inbox0_data_register data = { 0 }; data.inbox0_cmd_lock_hw = hw_lock_cmd; dc_dmub_srv_clear_inbox0_ack(dmub_srv); dc_dmub_srv_send_inbox0_cmd(dmub_srv, data); dc_dmub_srv_wait_for_inbox0_ack(dmub_srv); } bool should_use_dmub_lock(struct dc_link *link) { if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true; return false; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
/* * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "core_types.h" #include "link_encoder.h" #include "dce_dmcu.h" #include "dm_services.h" #include "reg_helper.h" #include "fixed31_32.h" #include "dc.h" #define TO_DCE_DMCU(dmcu)\ container_of(dmcu, struct dce_dmcu, base) #define REG(reg) \ (dmcu_dce->regs->reg) #undef FN #define FN(reg_name, field_name) \ dmcu_dce->dmcu_shift->field_name, dmcu_dce->dmcu_mask->field_name #define CTX \ dmcu_dce->base.ctx /* PSR related commands */ #define PSR_ENABLE 0x20 #define PSR_EXIT 0x21 #define PSR_SET 0x23 #define PSR_SET_WAITLOOP 0x31 #define MCP_INIT_DMCU 0x88 #define MCP_INIT_IRAM 0x89 #define MCP_SYNC_PHY_LOCK 0x90 #define MCP_SYNC_PHY_UNLOCK 0x91 #define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ #define CRC_WIN_NOTIFY 0x92 #define CRC_STOP_UPDATE 0x93 #define MCP_SEND_EDID_CEA 0xA0 #define EDID_CEA_CMD_ACK 1 #define EDID_CEA_CMD_NACK 2 #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L // PSP FW version #define mmMP0_SMN_C2PMSG_58 0x1607A //Register access policy version #define mmMP0_SMN_C2PMSG_91 0x1609B static const uint32_t abm_gain_stepsize = 0x0060; static bool dce_dmcu_init(struct dmcu *dmcu) { // Do nothing return true; } static bool dce_dmcu_load_iram(struct dmcu *dmcu, unsigned int start_offset, const char *src, unsigned int bytes) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int count = 0; /* Enable write access to IRAM */ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1, IRAM_WR_ADDR_AUTO_INC, 1); REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10); REG_WRITE(DMCU_IRAM_WR_CTRL, start_offset); for (count = 0; count < bytes; count++) REG_WRITE(DMCU_IRAM_WR_DATA, src[count]); /* Disable write access to IRAM to allow dynamic sleep state */ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0, IRAM_WR_ADDR_AUTO_INC, 0); return true; } static void dce_get_dmcu_psr_state(struct dmcu *dmcu, enum dc_psr_state *state) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); uint32_t psr_state_offset = 0xf0; /* Enable write access to IRAM */ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1); REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10); /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */ REG_WRITE(DMCU_IRAM_RD_CTRL, psr_state_offset); /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/ *state = (enum dc_psr_state)REG_READ(DMCU_IRAM_RD_DATA); /* Disable write access to IRAM after finished using IRAM * in order to allow dynamic sleep state */ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0); } static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int dmcu_max_retry_on_wait_reg_ready = 801; unsigned int dmcu_wait_reg_ready_interval = 100; unsigned int retryCount; enum dc_psr_state state = PSR_STATE0; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, dmcu_wait_reg_ready_interval, dmcu_max_retry_on_wait_reg_ready); /* setDMCUParam_Cmd */ if (enable) REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_ENABLE); else REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_EXIT); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); if (wait == true) { for (retryCount = 0; retryCount <= 100; retryCount++) { dce_get_dmcu_psr_state(dmcu, &state); if (enable) { if (state != PSR_STATE0) break; } else { if (state == PSR_STATE0) break; } udelay(10); } } } static bool dce_dmcu_setup_psr(struct dmcu *dmcu, struct dc_link *link, struct psr_context *psr_context) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int dmcu_max_retry_on_wait_reg_ready = 801; unsigned int dmcu_wait_reg_ready_interval = 100; union dce_dmcu_psr_config_data_reg1 masterCmdData1; union dce_dmcu_psr_config_data_reg2 masterCmdData2; union dce_dmcu_psr_config_data_reg3 masterCmdData3; link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc, psr_context->psrExitLinkTrainingRequired); /* Enable static screen interrupts for PSR supported display */ /* Disable the interrupt coming from other displays. */ REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN1_INT_TO_UC_EN, 0, STATIC_SCREEN2_INT_TO_UC_EN, 0, STATIC_SCREEN3_INT_TO_UC_EN, 0, STATIC_SCREEN4_INT_TO_UC_EN, 0); switch (psr_context->controllerId) { /* Driver uses case 1 for unconfigured */ case 1: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN1_INT_TO_UC_EN, 1); break; case 2: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN2_INT_TO_UC_EN, 1); break; case 3: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN3_INT_TO_UC_EN, 1); break; case 4: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN4_INT_TO_UC_EN, 1); break; case 5: /* CZ/NL only has 4 CRTC!! * really valid. * There is no interrupt enable mask for these instances. */ break; case 6: /* CZ/NL only has 4 CRTC!! * These are here because they are defined in HW regspec, * but not really valid. There is no interrupt enable mask * for these instances. */ break; default: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN1_INT_TO_UC_EN, 1); break; } link->link_enc->funcs->psr_program_secondary_packet(link->link_enc, psr_context->sdpTransmitLineNumDeadline); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, dmcu_wait_reg_ready_interval, dmcu_max_retry_on_wait_reg_ready); /* setDMCUParam_PSRHostConfigData */ masterCmdData1.u32All = 0; masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames; masterCmdData1.bits.hyst_lines = psr_context->hyst_lines; masterCmdData1.bits.rfb_update_auto_en = psr_context->rfb_update_auto_en; masterCmdData1.bits.dp_port_num = psr_context->transmitterId; masterCmdData1.bits.dcp_sel = psr_context->controllerId; masterCmdData1.bits.phy_type = psr_context->phyType; masterCmdData1.bits.frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; masterCmdData1.bits.aux_chan = psr_context->channel; masterCmdData1.bits.aux_repeat = psr_context->aux_repeats; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32All); masterCmdData2.u32All = 0; masterCmdData2.bits.dig_fe = psr_context->engineId; masterCmdData2.bits.dig_be = psr_context->transmitterId; masterCmdData2.bits.skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock; masterCmdData2.bits.frame_delay = psr_context->frame_delay; masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId; masterCmdData2.bits.num_of_controllers = psr_context->numberOfControllers; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2), masterCmdData2.u32All); masterCmdData3.u32All = 0; masterCmdData3.bits.psr_level = psr_context->psr_level.u32all; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3), masterCmdData3.u32All); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); return true; } static bool dce_is_dmcu_initialized(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int dmcu_uc_reset; /* microcontroller is not running */ REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset); /* DMCU is not running */ if (dmcu_uc_reset) return false; return true; } static void dce_psr_wait_loop( struct dmcu *dmcu, unsigned int wait_loop_number) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1; if (dmcu->cached_wait_loop_number == wait_loop_number) return; /* DMCU is not running */ if (!dce_is_dmcu_initialized(dmcu)) return; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); masterCmdData1.u32 = 0; masterCmdData1.bits.wait_loop = wait_loop_number; dmcu->cached_wait_loop_number = wait_loop_number; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); } static void dce_get_psr_wait_loop( struct dmcu *dmcu, unsigned int *psr_wait_loop_number) { *psr_wait_loop_number = dmcu->cached_wait_loop_number; return; } static void dcn10_get_dmcu_version(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); uint32_t dmcu_version_offset = 0xf1; /* Enable write access to IRAM */ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1, IRAM_RD_ADDR_AUTO_INC, 1); REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10); /* Write address to IRAM_RD_ADDR and read from DATA register */ REG_WRITE(DMCU_IRAM_RD_CTRL, dmcu_version_offset); dmcu->dmcu_version.interface_version = REG_READ(DMCU_IRAM_RD_DATA); dmcu->dmcu_version.abm_version = REG_READ(DMCU_IRAM_RD_DATA); dmcu->dmcu_version.psr_version = REG_READ(DMCU_IRAM_RD_DATA); dmcu->dmcu_version.build_version = ((REG_READ(DMCU_IRAM_RD_DATA) << 8) | REG_READ(DMCU_IRAM_RD_DATA)); /* Disable write access to IRAM to allow dynamic sleep state */ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0, IRAM_RD_ADDR_AUTO_INC, 0); } static void dcn10_dmcu_enable_fractional_pwm(struct dmcu *dmcu, uint32_t fractional_pwm) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); /* Wait until microcontroller is ready to process interrupt */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); /* Set PWM fractional enable/disable */ REG_WRITE(MASTER_COMM_DATA_REG1, fractional_pwm); /* Set command to enable or disable fractional PWM microcontroller */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_BL_SET_PWM_FRAC); /* Notify microcontroller of new command */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* Ensure command has been executed before continuing */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); } static bool dcn10_dmcu_init(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); const struct dc_config *config = &dmcu->ctx->dc->config; bool status = false; struct dc_context *ctx = dmcu->ctx; unsigned int i; // 5 4 3 2 1 0 // F E D C B A - bit 0 is A, bit 5 is F unsigned int tx_interrupt_mask = 0; PERF_TRACE(); /* Definition of DC_DMCU_SCRATCH * 0 : firmare not loaded * 1 : PSP load DMCU FW but not initialized * 2 : Firmware already initialized */ dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH); for (i = 0; i < ctx->dc->link_count; i++) { if (ctx->dc->links[i]->link_enc->features.flags.bits.DP_IS_USB_C) { if (ctx->dc->links[i]->link_enc->transmitter >= TRANSMITTER_UNIPHY_A && ctx->dc->links[i]->link_enc->transmitter <= TRANSMITTER_UNIPHY_F) { tx_interrupt_mask |= 1 << ctx->dc->links[i]->link_enc->transmitter; } } } switch (dmcu->dmcu_state) { case DMCU_UNLOADED: status = false; break; case DMCU_LOADED_UNINITIALIZED: /* Wait until microcontroller is ready to process interrupt */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); /* Set initialized ramping boundary value */ REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF); /* Set backlight ramping stepsize */ REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize); REG_WRITE(MASTER_COMM_DATA_REG3, tx_interrupt_mask); /* Set command to initialize microcontroller */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_INIT_DMCU); /* Notify microcontroller of new command */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* Ensure command has been executed before continuing */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); // Check state is initialized dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH); // If microcontroller is not in running state, fail if (dmcu->dmcu_state == DMCU_RUNNING) { /* Retrieve and cache the DMCU firmware version. */ dcn10_get_dmcu_version(dmcu); /* Initialize DMCU to use fractional PWM or not */ dcn10_dmcu_enable_fractional_pwm(dmcu, (config->disable_fractional_pwm == false) ? 1 : 0); status = true; } else { status = false; } break; case DMCU_RUNNING: status = true; break; default: status = false; break; } PERF_TRACE(); return status; } static bool dcn21_dmcu_init(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); uint32_t dmcub_psp_version = REG_READ(DMCUB_SCRATCH15); if (dmcu->auto_load_dmcu && dmcub_psp_version == 0) { return false; } return dcn10_dmcu_init(dmcu); } static bool dcn10_dmcu_load_iram(struct dmcu *dmcu, unsigned int start_offset, const char *src, unsigned int bytes) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int count = 0; /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return false; /* Enable write access to IRAM */ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1, IRAM_WR_ADDR_AUTO_INC, 1); REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10); REG_WRITE(DMCU_IRAM_WR_CTRL, start_offset); for (count = 0; count < bytes; count++) REG_WRITE(DMCU_IRAM_WR_DATA, src[count]); /* Disable write access to IRAM to allow dynamic sleep state */ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0, IRAM_WR_ADDR_AUTO_INC, 0); /* Wait until microcontroller is ready to process interrupt */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); /* Set command to signal IRAM is loaded and to initialize IRAM */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_INIT_IRAM); /* Notify microcontroller of new command */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* Ensure command has been executed before continuing */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); return true; } static void dcn10_get_dmcu_psr_state(struct dmcu *dmcu, enum dc_psr_state *state) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); uint32_t psr_state_offset = 0xf0; /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return; /* Enable write access to IRAM */ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1); REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10); /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */ REG_WRITE(DMCU_IRAM_RD_CTRL, psr_state_offset); /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/ *state = (enum dc_psr_state)REG_READ(DMCU_IRAM_RD_DATA); /* Disable write access to IRAM after finished using IRAM * in order to allow dynamic sleep state */ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0); } static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int dmcu_max_retry_on_wait_reg_ready = 801; unsigned int dmcu_wait_reg_ready_interval = 100; unsigned int retryCount; enum dc_psr_state state = PSR_STATE0; /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, dmcu_wait_reg_ready_interval, dmcu_max_retry_on_wait_reg_ready); /* setDMCUParam_Cmd */ if (enable) REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_ENABLE); else REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_EXIT); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* Below loops 1000 x 500us = 500 ms. * Exit PSR may need to wait 1-2 frames to power up. Timeout after at * least a few frames. Should never hit the max retry assert below. */ if (wait == true) { for (retryCount = 0; retryCount <= 1000; retryCount++) { dcn10_get_dmcu_psr_state(dmcu, &state); if (enable) { if (state != PSR_STATE0) break; } else { if (state == PSR_STATE0) break; } fsleep(500); } /* assert if max retry hit */ if (retryCount >= 1000) ASSERT(0); } } static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu, struct dc_link *link, struct psr_context *psr_context) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int dmcu_max_retry_on_wait_reg_ready = 801; unsigned int dmcu_wait_reg_ready_interval = 100; union dce_dmcu_psr_config_data_reg1 masterCmdData1; union dce_dmcu_psr_config_data_reg2 masterCmdData2; union dce_dmcu_psr_config_data_reg3 masterCmdData3; /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return false; link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc, psr_context->psrExitLinkTrainingRequired); /* Enable static screen interrupts for PSR supported display */ /* Disable the interrupt coming from other displays. */ REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN1_INT_TO_UC_EN, 0, STATIC_SCREEN2_INT_TO_UC_EN, 0, STATIC_SCREEN3_INT_TO_UC_EN, 0, STATIC_SCREEN4_INT_TO_UC_EN, 0); switch (psr_context->controllerId) { /* Driver uses case 1 for unconfigured */ case 1: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN1_INT_TO_UC_EN, 1); break; case 2: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN2_INT_TO_UC_EN, 1); break; case 3: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN3_INT_TO_UC_EN, 1); break; case 4: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN4_INT_TO_UC_EN, 1); break; case 5: /* CZ/NL only has 4 CRTC!! * really valid. * There is no interrupt enable mask for these instances. */ break; case 6: /* CZ/NL only has 4 CRTC!! * These are here because they are defined in HW regspec, * but not really valid. There is no interrupt enable mask * for these instances. */ break; default: REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK, STATIC_SCREEN1_INT_TO_UC_EN, 1); break; } link->link_enc->funcs->psr_program_secondary_packet(link->link_enc, psr_context->sdpTransmitLineNumDeadline); if (psr_context->allow_smu_optimizations) REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, dmcu_wait_reg_ready_interval, dmcu_max_retry_on_wait_reg_ready); /* setDMCUParam_PSRHostConfigData */ masterCmdData1.u32All = 0; masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames; masterCmdData1.bits.hyst_lines = psr_context->hyst_lines; masterCmdData1.bits.rfb_update_auto_en = psr_context->rfb_update_auto_en; masterCmdData1.bits.dp_port_num = psr_context->transmitterId; masterCmdData1.bits.dcp_sel = psr_context->controllerId; masterCmdData1.bits.phy_type = psr_context->phyType; masterCmdData1.bits.frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; masterCmdData1.bits.aux_chan = psr_context->channel; masterCmdData1.bits.aux_repeat = psr_context->aux_repeats; masterCmdData1.bits.allow_smu_optimizations = psr_context->allow_smu_optimizations; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32All); masterCmdData2.u32All = 0; masterCmdData2.bits.dig_fe = psr_context->engineId; masterCmdData2.bits.dig_be = psr_context->transmitterId; masterCmdData2.bits.skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock; masterCmdData2.bits.frame_delay = psr_context->frame_delay; masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId; masterCmdData2.bits.num_of_controllers = psr_context->numberOfControllers; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2), masterCmdData2.u32All); masterCmdData3.u32All = 0; masterCmdData3.bits.psr_level = psr_context->psr_level.u32all; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3), masterCmdData3.u32All); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); return true; } static void dcn10_psr_wait_loop( struct dmcu *dmcu, unsigned int wait_loop_number) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1; /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return; if (wait_loop_number != 0) { /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); masterCmdData1.u32 = 0; masterCmdData1.bits.wait_loop = wait_loop_number; dmcu->cached_wait_loop_number = wait_loop_number; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); } } static void dcn10_get_psr_wait_loop( struct dmcu *dmcu, unsigned int *psr_wait_loop_number) { *psr_wait_loop_number = dmcu->cached_wait_loop_number; return; } static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu) { /* microcontroller is not running */ if (dmcu->dmcu_state != DMCU_RUNNING) return false; return true; } static bool dcn20_lock_phy(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return false; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_SYNC_PHY_LOCK); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); return true; } static bool dcn20_unlock_phy(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return false; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_SYNC_PHY_UNLOCK); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); return true; } static bool dcn10_send_edid_cea(struct dmcu *dmcu, int offset, int total_length, uint8_t *data, int length) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); uint32_t header, data1, data2; /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return false; if (length > 8 || length <= 0) return false; header = ((uint32_t)offset & 0xFFFF) << 16 | (total_length & 0xFFFF); data1 = (((uint32_t)data[0]) << 24) | (((uint32_t)data[1]) << 16) | (((uint32_t)data[2]) << 8) | ((uint32_t)data[3]); data2 = (((uint32_t)data[4]) << 24) | (((uint32_t)data[5]) << 16) | (((uint32_t)data[6]) << 8) | ((uint32_t)data[7]); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_SEND_EDID_CEA); REG_WRITE(MASTER_COMM_DATA_REG1, header); REG_WRITE(MASTER_COMM_DATA_REG2, data1); REG_WRITE(MASTER_COMM_DATA_REG3, data2); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000); return true; } static bool dcn10_get_scp_results(struct dmcu *dmcu, uint32_t *cmd, uint32_t *data1, uint32_t *data2, uint32_t *data3) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return false; *cmd = REG_READ(SLAVE_COMM_CMD_REG); *data1 = REG_READ(SLAVE_COMM_DATA_REG1); *data2 = REG_READ(SLAVE_COMM_DATA_REG2); *data3 = REG_READ(SLAVE_COMM_DATA_REG3); /* clear SCP interrupt */ REG_UPDATE(SLAVE_COMM_CNTL_REG, SLAVE_COMM_INTERRUPT, 0); return true; } static bool dcn10_recv_amd_vsdb(struct dmcu *dmcu, int *version, int *min_frame_rate, int *max_frame_rate) { uint32_t data[4]; int cmd, ack, len; if (!dcn10_get_scp_results(dmcu, &data[0], &data[1], &data[2], &data[3])) return false; cmd = data[0] & 0x3FF; len = (data[0] >> 10) & 0x3F; ack = data[1]; if (cmd != MCP_SEND_EDID_CEA || ack != EDID_CEA_CMD_ACK || len != 12) return false; if ((data[2] & 0xFF)) { *version = (data[2] >> 8) & 0xFF; *min_frame_rate = (data[3] >> 16) & 0xFFFF; *max_frame_rate = data[3] & 0xFFFF; return true; } return false; } static bool dcn10_recv_edid_cea_ack(struct dmcu *dmcu, int *offset) { uint32_t data[4]; int cmd, ack; if (!dcn10_get_scp_results(dmcu, &data[0], &data[1], &data[2], &data[3])) return false; cmd = data[0] & 0x3FF; ack = data[1]; if (cmd != MCP_SEND_EDID_CEA) return false; if (ack == EDID_CEA_CMD_ACK) return true; *offset = data[2]; /* nack */ return false; } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) static void dcn10_forward_crc_window(struct dmcu *dmcu, struct rect *rect, struct otg_phy_mux *mux_mapping) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int dmcu_max_retry_on_wait_reg_ready = 801; unsigned int dmcu_wait_reg_ready_interval = 100; unsigned int crc_start = 0, crc_end = 0, otg_phy_mux = 0; int x_start, y_start, x_end, y_end; /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return; if (!rect) return; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, dmcu_wait_reg_ready_interval, dmcu_max_retry_on_wait_reg_ready); x_start = rect->x; y_start = rect->y; x_end = x_start + rect->width; y_end = y_start + rect->height; /* build up nitification data */ crc_start = (((unsigned int) x_start) << 16) | y_start; crc_end = (((unsigned int) x_end) << 16) | y_end; otg_phy_mux = (((unsigned int) mux_mapping->otg_output_num) << 16) | mux_mapping->phy_output_num; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), crc_start); dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2), crc_end); dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3), otg_phy_mux); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, CRC_WIN_NOTIFY); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); } static void dcn10_stop_crc_win_update(struct dmcu *dmcu, struct otg_phy_mux *mux_mapping) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int dmcu_max_retry_on_wait_reg_ready = 801; unsigned int dmcu_wait_reg_ready_interval = 100; unsigned int otg_phy_mux = 0; /* If microcontroller is not running, do nothing */ if (dmcu->dmcu_state != DMCU_RUNNING) return; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, dmcu_wait_reg_ready_interval, dmcu_max_retry_on_wait_reg_ready); /* build up nitification data */ otg_phy_mux = (((unsigned int) mux_mapping->otg_output_num) << 16) | mux_mapping->phy_output_num; dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), otg_phy_mux); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, CRC_STOP_UPDATE); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); } #endif static const struct dmcu_funcs dce_funcs = { .dmcu_init = dce_dmcu_init, .load_iram = dce_dmcu_load_iram, .set_psr_enable = dce_dmcu_set_psr_enable, .setup_psr = dce_dmcu_setup_psr, .get_psr_state = dce_get_dmcu_psr_state, .set_psr_wait_loop = dce_psr_wait_loop, .get_psr_wait_loop = dce_get_psr_wait_loop, .is_dmcu_initialized = dce_is_dmcu_initialized }; static const struct dmcu_funcs dcn10_funcs = { .dmcu_init = dcn10_dmcu_init, .load_iram = dcn10_dmcu_load_iram, .set_psr_enable = dcn10_dmcu_set_psr_enable, .setup_psr = dcn10_dmcu_setup_psr, .get_psr_state = dcn10_get_dmcu_psr_state, .set_psr_wait_loop = dcn10_psr_wait_loop, .get_psr_wait_loop = dcn10_get_psr_wait_loop, .send_edid_cea = dcn10_send_edid_cea, .recv_amd_vsdb = dcn10_recv_amd_vsdb, .recv_edid_cea_ack = dcn10_recv_edid_cea_ack, #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) .forward_crc_window = dcn10_forward_crc_window, .stop_crc_win_update = dcn10_stop_crc_win_update, #endif .is_dmcu_initialized = dcn10_is_dmcu_initialized }; static const struct dmcu_funcs dcn20_funcs = { .dmcu_init = dcn10_dmcu_init, .load_iram = dcn10_dmcu_load_iram, .set_psr_enable = dcn10_dmcu_set_psr_enable, .setup_psr = dcn10_dmcu_setup_psr, .get_psr_state = dcn10_get_dmcu_psr_state, .set_psr_wait_loop = dcn10_psr_wait_loop, .get_psr_wait_loop = dcn10_get_psr_wait_loop, .is_dmcu_initialized = dcn10_is_dmcu_initialized, .lock_phy = dcn20_lock_phy, .unlock_phy = dcn20_unlock_phy }; static const struct dmcu_funcs dcn21_funcs = { .dmcu_init = dcn21_dmcu_init, .load_iram = dcn10_dmcu_load_iram, .set_psr_enable = dcn10_dmcu_set_psr_enable, .setup_psr = dcn10_dmcu_setup_psr, .get_psr_state = dcn10_get_dmcu_psr_state, .set_psr_wait_loop = dcn10_psr_wait_loop, .get_psr_wait_loop = dcn10_get_psr_wait_loop, .is_dmcu_initialized = dcn10_is_dmcu_initialized, .lock_phy = dcn20_lock_phy, .unlock_phy = dcn20_unlock_phy }; static void dce_dmcu_construct( struct dce_dmcu *dmcu_dce, struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { struct dmcu *base = &dmcu_dce->base; base->ctx = ctx; base->funcs = &dce_funcs; base->cached_wait_loop_number = 0; dmcu_dce->regs = regs; dmcu_dce->dmcu_shift = dmcu_shift; dmcu_dce->dmcu_mask = dmcu_mask; } static void dcn21_dmcu_construct( struct dce_dmcu *dmcu_dce, struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { uint32_t psp_version = 0; dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); dmcu_dce->base.psp_version = psp_version; } struct dmcu *dce_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dce_dmcu_construct( dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); dmcu_dce->base.funcs = &dce_funcs; return &dmcu_dce->base; } struct dmcu *dcn10_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dce_dmcu_construct( dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); dmcu_dce->base.funcs = &dcn10_funcs; return &dmcu_dce->base; } struct dmcu *dcn20_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dce_dmcu_construct( dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); dmcu_dce->base.funcs = &dcn20_funcs; return &dmcu_dce->base; } struct dmcu *dcn21_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dcn21_dmcu_construct( dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); dmcu_dce->base.funcs = &dcn21_funcs; return &dmcu_dce->base; } void dce_dmcu_destroy(struct dmcu **dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu); kfree(dmcu_dce); *dmcu = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc.h" #include "dc_dmub_srv.h" #include "dmub/dmub_srv.h" #include "core_types.h" #include "dmub_replay.h" #define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #define MAX_PIPES 6 /* * Get Replay state from firmware. */ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst) { struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; /* uint32_t raw_state = 0; */ uint32_t retry_count = 0; enum dmub_status status; do { // Send gpint command and wait for ack status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_REPLAY_STATE, panel_inst, 30); if (status == DMUB_STATUS_OK) { // GPINT was executed, get response dmub_srv_get_gpint_response(srv, (uint32_t *)state); } else // Return invalid state when GPINT times out *state = REPLAY_STATE_INVALID; } while (++retry_count <= 1000 && *state == REPLAY_STATE_INVALID); // Assert if max retry hit if (retry_count >= 1000 && *state == REPLAY_STATE_INVALID) { ASSERT(0); /* To-do: Add retry fail log */ } } /* * Enable/Disable Replay. */ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; uint32_t retry_count; enum replay_state state = REPLAY_STATE_0; memset(&cmd, 0, sizeof(cmd)); cmd.replay_enable.header.type = DMUB_CMD__REPLAY; cmd.replay_enable.data.panel_inst = panel_inst; cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE; if (enable) cmd.replay_enable.data.enable = REPLAY_ENABLE; else cmd.replay_enable.data.enable = REPLAY_DISABLE; cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Below loops 1000 x 500us = 500 ms. * Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at * least a few frames. Should never hit the max retry assert below. */ if (wait) { for (retry_count = 0; retry_count <= 1000; retry_count++) { dmub_replay_get_state(dmub, &state, panel_inst); if (enable) { if (state != REPLAY_STATE_0) break; } else { if (state == REPLAY_STATE_0) break; } fsleep(500); } /* assert if max retry hit */ if (retry_count >= 1000) ASSERT(0); } } /* * Set REPLAY power optimization flags. */ static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int power_opt, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; memset(&cmd, 0, sizeof(cmd)); cmd.replay_set_power_opt.header.type = DMUB_CMD__REPLAY; cmd.replay_set_power_opt.header.sub_type = DMUB_CMD__SET_REPLAY_POWER_OPT; cmd.replay_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_power_opt_data); cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt; cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst; dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* * Setup Replay by programming phy registers and sending replay hw context values to firmware. */ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, struct dc_link *link, struct replay_context *replay_context, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; struct dmub_cmd_replay_copy_settings_data *copy_settings_data = &cmd.replay_copy_settings.replay_copy_settings_data; struct pipe_ctx *pipe_ctx = NULL; struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; int i = 0; for (i = 0; i < MAX_PIPES; i++) { if (res_ctx && res_ctx->pipe_ctx[i].stream && res_ctx->pipe_ctx[i].stream->link && res_ctx->pipe_ctx[i].stream->link == link && res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { pipe_ctx = &res_ctx->pipe_ctx[i]; //TODO: refactor for multi edp support break; } } if (!pipe_ctx) return false; memset(&cmd, 0, sizeof(cmd)); cmd.replay_copy_settings.header.type = DMUB_CMD__REPLAY; cmd.replay_copy_settings.header.sub_type = DMUB_CMD__REPLAY_COPY_SETTINGS; cmd.replay_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_replay_copy_settings_data); // HW insts copy_settings_data->aux_inst = replay_context->aux_inst; copy_settings_data->digbe_inst = replay_context->digbe_inst; copy_settings_data->digfe_inst = replay_context->digfe_inst; if (pipe_ctx->plane_res.dpp) copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; else copy_settings_data->dpp_inst = 0; if (pipe_ctx->stream_res.tg) copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst; else copy_settings_data->otg_inst = 0; copy_settings_data->dpphy_inst = link->link_enc->transmitter; // Misc copy_settings_data->line_time_in_ns = replay_context->line_time_in_ns; copy_settings_data->panel_inst = panel_inst; copy_settings_data->debug.u32All = link->replay_settings.config.debug_flags; copy_settings_data->pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line; copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable; copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported; copy_settings_data->flags.u32All = 0; copy_settings_data->flags.bitfields.fec_enable_status = (link->fec_state == dc_link_fec_enabled); copy_settings_data->flags.bitfields.dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); // WA for PSRSU+DSC on specific TCON, if DSC is enabled, force PSRSU as ffu mode(full frame update) if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && !link->dc->debug.disable_fec) && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && !link->panel_config.dsc.disable_dsc_edp && link->dc->caps.edp_dsc_support)) && link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 /*&& (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, sizeof(DP_SINK_DEVICE_STR_ID_1)) || !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2, sizeof(DP_SINK_DEVICE_STR_ID_2)))*/) copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 1; else copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } /* * Set coasting vtotal. */ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, uint16_t coasting_vtotal, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; memset(&cmd, 0, sizeof(cmd)); cmd.replay_set_coasting_vtotal.header.type = DMUB_CMD__REPLAY; cmd.replay_set_coasting_vtotal.header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL; cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal; dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* * Get Replay residency from firmware. */ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm) { struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; uint16_t param = (uint16_t)(panel_inst << 8); if (is_alpm) param |= REPLAY_RESIDENCY_MODE_ALPM; if (is_start) param |= REPLAY_RESIDENCY_ENABLE; // Send gpint command and wait for ack dmub_srv_send_gpint_command(srv, DMUB_GPINT__REPLAY_RESIDENCY, param, 30); if (!is_start) dmub_srv_get_gpint_response(srv, residency); else *residency = 0; } static const struct dmub_replay_funcs replay_funcs = { .replay_copy_settings = dmub_replay_copy_settings, .replay_enable = dmub_replay_enable, .replay_get_state = dmub_replay_get_state, .replay_set_power_opt = dmub_replay_set_power_opt, .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, .replay_residency = dmub_replay_residency, }; /* * Construct Replay object. */ static void dmub_replay_construct(struct dmub_replay *replay, struct dc_context *ctx) { replay->ctx = ctx; replay->funcs = &replay_funcs; } /* * Allocate and initialize Replay object. */ struct dmub_replay *dmub_replay_create(struct dc_context *ctx) { struct dmub_replay *replay = kzalloc(sizeof(struct dmub_replay), GFP_KERNEL); if (replay == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dmub_replay_construct(replay, ctx); return replay; } /* * Deallocate Replay object. */ void dmub_replay_destroy(struct dmub_replay **dmub) { kfree(*dmub); *dmub = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
/* * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dce_abm.h" #include "dm_services.h" #include "reg_helper.h" #include "fixed31_32.h" #include "dc.h" #include "atom.h" #define TO_DCE_ABM(abm)\ container_of(abm, struct dce_abm, base) #define REG(reg) \ (abm_dce->regs->reg) #undef FN #define FN(reg_name, field_name) \ abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name #define DC_LOGGER \ abm->ctx->logger #define CTX \ abm_dce->base.ctx #define MCP_ABM_LEVEL_SET 0x65 #define MCP_ABM_PIPE_SET 0x66 #define MCP_BL_SET 0x67 #define MCP_DISABLE_ABM_IMMEDIATELY 255 static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t panel_inst) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); uint32_t rampingBoundary = 0xFFFF; if (abm->dmcu_is_running == false) return true; REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 80000); /* set ramping boundary */ REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary); /* setDMCUParam_Pipe */ REG_UPDATE_2(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET, MASTER_COMM_CMD_REG_BYTE1, controller_id); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 80000); return true; } static void dmcu_set_backlight_level( struct dce_abm *abm_dce, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp, uint32_t controller_id, uint32_t panel_id) { unsigned int backlight_8_bit = 0; uint32_t s2; if (backlight_pwm_u16_16 & 0x10000) // Check for max backlight condition backlight_8_bit = 0xFF; else // Take MSB of fractional part since backlight is not max backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF; dce_abm_set_pipe(&abm_dce->base, controller_id, panel_id); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 80000); /* setDMCUParam_BL */ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16); /* write ramp */ if (controller_id == 0) frame_ramp = 0; REG_WRITE(MASTER_COMM_DATA_REG1, frame_ramp); /* setDMCUParam_Cmd */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_BL_SET); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); /* UpdateRequestedBacklightLevel */ s2 = REG_READ(BIOS_SCRATCH_2); s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >> ATOM_S2_CURRENT_BL_LEVEL_SHIFT); s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); REG_WRITE(BIOS_SCRATCH_2, s2); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 80000); } static void dce_abm_init(struct abm *abm, uint32_t backlight) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103); REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101); REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103); REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101); REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101); REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0, ABM1_HG_NUM_OF_BINS_SEL, 0, ABM1_HG_VMAX_SEL, 1, ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0); REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0, ABM1_IPCSC_COEFF_SEL_R, 2, ABM1_IPCSC_COEFF_SEL_G, 4, ABM1_IPCSC_COEFF_SEL_B, 2); REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL, BL1_PWM_CURRENT_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL, BL1_PWM_TARGET_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight); REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000); REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0, ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1, ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1, ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); } static unsigned int dce_abm_get_current_backlight(struct abm *abm) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); /* return backlight in hardware format which is unsigned 17 bits, with * 1 bit integer and 16 bit fractional */ return backlight; } static unsigned int dce_abm_get_target_backlight(struct abm *abm) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); /* return backlight in hardware format which is unsigned 17 bits, with * 1 bit integer and 16 bit fractional */ return backlight; } static bool dce_abm_set_level(struct abm *abm, uint32_t level) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); if (abm->dmcu_is_running == false) return true; REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 80000); /* setDMCUParam_ABMLevel */ REG_UPDATE_2(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET, MASTER_COMM_CMD_REG_BYTE2, level); /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); return true; } static bool dce_abm_immediate_disable(struct abm *abm, uint32_t panel_inst) { if (abm->dmcu_is_running == false) return true; dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY, panel_inst); return true; } static bool dce_abm_set_backlight_level_pwm( struct abm *abm, unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, unsigned int controller_id, unsigned int panel_inst) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", backlight_pwm_u16_16, backlight_pwm_u16_16); dmcu_set_backlight_level(abm_dce, backlight_pwm_u16_16, frame_ramp, controller_id, panel_inst); return true; } static const struct abm_funcs dce_funcs = { .abm_init = dce_abm_init, .set_abm_level = dce_abm_set_level, .set_pipe = dce_abm_set_pipe, .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm, .get_current_backlight = dce_abm_get_current_backlight, .get_target_backlight = dce_abm_get_target_backlight, .init_abm_config = NULL, .set_abm_immediate_disable = dce_abm_immediate_disable, }; static void dce_abm_construct( struct dce_abm *abm_dce, struct dc_context *ctx, const struct dce_abm_registers *regs, const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { struct abm *base = &abm_dce->base; base->ctx = ctx; base->funcs = &dce_funcs; base->dmcu_is_running = false; abm_dce->regs = regs; abm_dce->abm_shift = abm_shift; abm_dce->abm_mask = abm_mask; } struct abm *dce_abm_create( struct dc_context *ctx, const struct dce_abm_registers *regs, const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC); if (abm_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dce_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask); abm_dce->base.funcs = &dce_funcs; return &abm_dce->base; } void dce_abm_destroy(struct abm **abm) { struct dce_abm *abm_dce = TO_DCE_ABM(*abm); kfree(abm_dce); *abm = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dmub_abm.h" #include "dmub_abm_lcd.h" #include "dc.h" #include "core_types.h" #include "dmub_cmd.h" #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) #define ABM_FEATURE_NO_SUPPORT 0 #define ABM_LCD_SUPPORT 1 static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst) { struct dc_context *dc = abm->ctx; struct dc_link *edp_links[MAX_NUM_EDP]; int i; int edp_num; unsigned int ret = ABM_FEATURE_NO_SUPPORT; dc_get_edp_links(dc->dc, edp_links, &edp_num); for (i = 0; i < edp_num; i++) { if (panel_inst == i) break; } if (i < edp_num) { ret = ABM_LCD_SUPPORT; } return ret; } static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight) { dmub_abm_init(abm, backlight); } static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm) { return dmub_abm_get_current_backlight(abm); } static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm) { return dmub_abm_get_target_backlight(abm); } static bool dmub_abm_set_level_ex(struct abm *abm, uint32_t level) { bool ret = false; unsigned int feature_support, i; uint8_t panel_mask0 = 0; for (i = 0; i < MAX_NUM_EDP; i++) { feature_support = abm_feature_support(abm, i); if (feature_support == ABM_LCD_SUPPORT) panel_mask0 |= (0x01 << i); } if (panel_mask0) ret = dmub_abm_set_level(abm, level, panel_mask0); return ret; } static bool dmub_abm_init_config_ex(struct abm *abm, const char *src, unsigned int bytes, unsigned int inst) { unsigned int feature_support; feature_support = abm_feature_support(abm, inst); if (feature_support == ABM_LCD_SUPPORT) dmub_abm_init_config(abm, src, bytes, inst); return true; } static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) { bool ret = false; unsigned int feature_support; feature_support = abm_feature_support(abm, panel_inst); if (feature_support == ABM_LCD_SUPPORT) ret = dmub_abm_set_pause(abm, pause, panel_inst, stream_inst); return ret; } /***************************************************************************** * dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's * Varibright states for LCD only. OLED is TBD * @abm: used to check get dc context * @panel_inst: panel instance index * @pData: contains command to pause/un-pause abm and abm parameters * * ***************************************************************************/ static bool dmub_abm_save_restore_ex( struct abm *abm, unsigned int panel_inst, struct abm_save_restore *pData) { bool ret = false; unsigned int feature_support; struct dc_context *dc = abm->ctx; feature_support = abm_feature_support(abm, panel_inst); if (feature_support == ABM_LCD_SUPPORT) ret = dmub_abm_save_restore(dc, panel_inst, pData); return ret; } static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) { bool ret = false; unsigned int feature_support; feature_support = abm_feature_support(abm, panel_inst); if (feature_support == ABM_LCD_SUPPORT) ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst); return ret; } static bool dmub_abm_set_backlight_level_pwm_ex(struct abm *abm, unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, unsigned int controller_id, unsigned int panel_inst) { bool ret = false; unsigned int feature_support; feature_support = abm_feature_support(abm, panel_inst); if (feature_support == ABM_LCD_SUPPORT) ret = dmub_abm_set_backlight_level(abm, backlight_pwm_u16_16, frame_ramp, panel_inst); return ret; } static const struct abm_funcs abm_funcs = { .abm_init = dmub_abm_init_ex, .set_abm_level = dmub_abm_set_level_ex, .get_current_backlight = dmub_abm_get_current_backlight_ex, .get_target_backlight = dmub_abm_get_target_backlight_ex, .init_abm_config = dmub_abm_init_config_ex, .set_abm_pause = dmub_abm_set_pause_ex, .save_restore = dmub_abm_save_restore_ex, .set_pipe_ex = dmub_abm_set_pipe_ex, .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex, }; static void dmub_abm_construct( struct dce_abm *abm_dce, struct dc_context *ctx, const struct dce_abm_registers *regs, const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { struct abm *base = &abm_dce->base; base->ctx = ctx; base->funcs = &abm_funcs; base->dmcu_is_running = false; abm_dce->regs = regs; abm_dce->abm_shift = abm_shift; abm_dce->abm_mask = abm_mask; } struct abm *dmub_abm_create( struct dc_context *ctx, const struct dce_abm_registers *regs, const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { if (ctx->dc->caps.dmcub_support) { struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); if (abm_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask); return &abm_dce->base; } return NULL; } void dmub_abm_destroy(struct abm **abm) { struct dce_abm *abm_dce = TO_DMUB_ABM(*abm); kfree(abm_dce); *abm = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "core_types.h" #include "dce_aux.h" #include "dce/dce_11_0_sh_mask.h" #include "dm_event_log.h" #include "dm_helpers.h" #include "dmub/inc/dmub_cmd.h" #define CTX \ aux110->base.ctx #define REG(reg_name)\ (aux110->regs->reg_name) #define DC_LOGGER \ engine->ctx->logger #define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0) #define IS_DC_I2CAUX_LOGGING_ENABLED() (false) #define LOG_FLAG_Error_I2cAux LOG_ERROR #define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX #include "reg_helper.h" #undef FN #define FN(reg_name, field_name) \ aux110->shift->field_name, aux110->mask->field_name #define FROM_AUX_ENGINE(ptr) \ container_of((ptr), struct aux_engine_dce110, base) #define FROM_ENGINE(ptr) \ FROM_AUX_ENGINE(container_of((ptr), struct dce_aux, base)) #define FROM_AUX_ENGINE_ENGINE(ptr) \ container_of((ptr), struct dce_aux, base) enum { AUX_INVALID_REPLY_RETRY_COUNTER = 1, AUX_TIMED_OUT_RETRY_COUNTER = 2, AUX_DEFER_RETRY_COUNTER = 6 }; #define TIME_OUT_INCREMENT 1016 #define TIME_OUT_MULTIPLIER_8 8 #define TIME_OUT_MULTIPLIER_16 16 #define TIME_OUT_MULTIPLIER_32 32 #define TIME_OUT_MULTIPLIER_64 64 #define MAX_TIMEOUT_LENGTH 127 #define DEFAULT_AUX_ENGINE_MULT 0 #define DEFAULT_AUX_ENGINE_LENGTH 69 #define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0) static void release_engine( struct dce_aux *engine) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); dal_ddc_close(engine->ddc); engine->ddc = NULL; REG_UPDATE_2(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1, AUX_SW_USE_AUX_REG_REQ, 0); } #define SW_CAN_ACCESS_AUX 1 #define DMCU_CAN_ACCESS_AUX 2 static bool is_engine_available( struct dce_aux *engine) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); uint32_t value = REG_READ(AUX_ARB_CONTROL); uint32_t field = get_reg_field_value( value, AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS); return (field != DMCU_CAN_ACCESS_AUX); } static bool acquire_engine( struct dce_aux *engine) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); uint32_t value = REG_READ(AUX_ARB_CONTROL); uint32_t field = get_reg_field_value( value, AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS); if (field == DMCU_CAN_ACCESS_AUX) return false; /* enable AUX before request SW to access AUX */ value = REG_READ(AUX_CONTROL); field = get_reg_field_value(value, AUX_CONTROL, AUX_EN); if (field == 0) { set_reg_field_value( value, 1, AUX_CONTROL, AUX_EN); if (REG(AUX_RESET_MASK)) { /*DP_AUX block as part of the enable sequence*/ set_reg_field_value( value, 1, AUX_CONTROL, AUX_RESET); } REG_WRITE(AUX_CONTROL, value); if (REG(AUX_RESET_MASK)) { /*poll HW to make sure reset it done*/ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1, 1, 11); set_reg_field_value( value, 0, AUX_CONTROL, AUX_RESET); REG_WRITE(AUX_CONTROL, value); REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0, 1, 11); } } /*if (field)*/ /* request SW to access AUX */ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1); value = REG_READ(AUX_ARB_CONTROL); field = get_reg_field_value( value, AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS); return (field == SW_CAN_ACCESS_AUX); } #define COMPOSE_AUX_SW_DATA_16_20(command, address) \ ((command) | ((0xF0000 & (address)) >> 16)) #define COMPOSE_AUX_SW_DATA_8_15(address) \ ((0xFF00 & (address)) >> 8) #define COMPOSE_AUX_SW_DATA_0_7(address) \ (0xFF & (address)) static void submit_channel_request( struct dce_aux *engine, struct aux_request_transaction_data *request) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); uint32_t value; uint32_t length; bool is_write = ((request->type == AUX_TRANSACTION_TYPE_DP) && (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) || ((request->type == AUX_TRANSACTION_TYPE_I2C) && ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT))); if (REG(AUXN_IMPCAL)) { /* clear_aux_error */ REG_UPDATE_SEQ_2(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, 1, AUXN_CALOUT_ERROR_AK, 0); REG_UPDATE_SEQ_2(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, 1, AUXP_CALOUT_ERROR_AK, 0); /* force_default_calibrate */ REG_UPDATE_SEQ_2(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, 1, AUXN_IMPCAL_OVERRIDE_ENABLE, 0); /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */ REG_UPDATE_SEQ_2(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, 1, AUXP_IMPCAL_OVERRIDE_ENABLE, 0); } REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, 10, aux110->polling_timeout_period/10); /* set the delay and the number of bytes to write */ /* The length include * the 4 bit header and the 20 bit address * (that is 3 byte). * If the requested length is non zero this means * an addition byte specifying the length is required. */ length = request->length ? 4 : 3; if (is_write) length += request->length; REG_UPDATE_2(AUX_SW_CONTROL, AUX_SW_START_DELAY, request->delay, AUX_SW_WR_BYTES, length); /* program action and address and payload data (if 'is_write') */ value = REG_UPDATE_4(AUX_SW_DATA, AUX_SW_INDEX, 0, AUX_SW_DATA_RW, 0, AUX_SW_AUTOINCREMENT_DISABLE, 1, AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address)); value = REG_SET_2(AUX_SW_DATA, value, AUX_SW_AUTOINCREMENT_DISABLE, 0, AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address)); value = REG_SET(AUX_SW_DATA, value, AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address)); if (request->length) { value = REG_SET(AUX_SW_DATA, value, AUX_SW_DATA, request->length - 1); } if (is_write) { /* Load the HW buffer with the Data to be sent. * This is relevant for write operation. * For read, the data recived data will be * processed in process_channel_reply(). */ uint32_t i = 0; while (i < request->length) { value = REG_SET(AUX_SW_DATA, value, AUX_SW_DATA, request->data[i]); ++i; } } REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); EVENT_LOG_AUX_REQ(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_NATIVE, request->action, request->address, request->length, request->data); } static int read_channel_reply(struct dce_aux *engine, uint32_t size, uint8_t *buffer, uint8_t *reply_result, uint32_t *sw_status) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); uint32_t bytes_replied; uint32_t reply_result_32; *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, &bytes_replied); /* In case HPD is LOW, exit AUX transaction */ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) return -1; /* Need at least the status byte */ if (!bytes_replied) return -1; REG_UPDATE_SEQ_3(AUX_SW_DATA, AUX_SW_INDEX, 0, AUX_SW_AUTOINCREMENT_DISABLE, 1, AUX_SW_DATA_RW, 1); REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32); reply_result_32 = reply_result_32 >> 4; if (reply_result != NULL) *reply_result = (uint8_t)reply_result_32; if (reply_result_32 == 0) { /* ACK */ uint32_t i = 0; /* First byte was already used to get the command status */ --bytes_replied; /* Do not overflow buffer */ if (bytes_replied > size) return -1; while (i < bytes_replied) { uint32_t aux_sw_data_val; REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val); buffer[i] = aux_sw_data_val; ++i; } return i; } return 0; } static enum aux_return_code_type get_channel_status( struct dce_aux *engine, uint8_t *returned_bytes) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); uint32_t value; if (returned_bytes == NULL) { /*caller pass NULL pointer*/ ASSERT_CRITICAL(false); return AUX_RET_ERROR_UNKNOWN; } *returned_bytes = 0; /* poll to make sure that SW_DONE is asserted */ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1, 10, aux110->polling_timeout_period/10); value = REG_READ(AUX_SW_STATUS); /* in case HPD is LOW, exit AUX transaction */ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) return AUX_RET_ERROR_HPD_DISCON; /* Note that the following bits are set in 'status.bits' * during CTS 4.2.1.2 (FW 3.3.1): * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP, * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H. * * AUX_SW_RX_MIN_COUNT_VIOL is an internal, * HW debugging bit and should be ignored. */ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) { if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) || (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK)) return AUX_RET_ERROR_TIMEOUT; else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) || (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) || (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) || (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK)) return AUX_RET_ERROR_INVALID_REPLY; *returned_bytes = get_reg_field_value(value, AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT); if (*returned_bytes == 0) return AUX_RET_ERROR_INVALID_REPLY; else { *returned_bytes -= 1; return AUX_RET_SUCCESS; } } else { /*time_elapsed >= aux_engine->timeout_period * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point */ ASSERT_CRITICAL(false); return AUX_RET_ERROR_TIMEOUT; } } static bool acquire( struct dce_aux *engine, struct ddc *ddc) { enum gpio_result result; if ((engine == NULL) || !is_engine_available(engine)) return false; result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, GPIO_DDC_CONFIG_TYPE_MODE_AUX); if (result != GPIO_RESULT_OK) return false; if (!acquire_engine(engine)) { engine->ddc = ddc; release_engine(engine); return false; } engine->ddc = ddc; return true; } void dce110_engine_destroy(struct dce_aux **engine) { struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine); kfree(engine110); *engine = NULL; } static uint32_t dce_aux_configure_timeout(struct ddc_service *ddc, uint32_t timeout_in_us) { uint32_t multiplier = 0; uint32_t length = 0; uint32_t prev_length = 0; uint32_t prev_mult = 0; uint32_t prev_timeout_val = 0; struct ddc *ddc_pin = ddc->ddc_pin; struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); /* 1-Update polling timeout period */ aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER; /* 2-Update aux timeout period length and multiplier */ if (timeout_in_us == 0) { multiplier = DEFAULT_AUX_ENGINE_MULT; length = DEFAULT_AUX_ENGINE_LENGTH; } else if (timeout_in_us <= TIME_OUT_INCREMENT) { multiplier = 0; length = timeout_in_us/TIME_OUT_MULTIPLIER_8; if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0) length++; } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) { multiplier = 1; length = timeout_in_us/TIME_OUT_MULTIPLIER_16; if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0) length++; } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) { multiplier = 2; length = timeout_in_us/TIME_OUT_MULTIPLIER_32; if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0) length++; } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) { multiplier = 3; length = timeout_in_us/TIME_OUT_MULTIPLIER_64; if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0) length++; } length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH; REG_GET_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, &prev_length, AUX_RX_TIMEOUT_LEN_MUL, &prev_mult); switch (prev_mult) { case 0: prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_8; break; case 1: prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_16; break; case 2: prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_32; break; case 3: prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_64; break; default: prev_timeout_val = DEFAULT_AUX_ENGINE_LENGTH * TIME_OUT_MULTIPLIER_8; break; } REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier); return prev_timeout_val; } static struct dce_aux_funcs aux_functions = { .configure_timeout = NULL, .destroy = NULL, }; struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, const struct dce110_aux_registers *regs, const struct dce110_aux_registers_mask *mask, const struct dce110_aux_registers_shift *shift, bool is_ext_aux_timeout_configurable) { aux_engine110->base.ddc = NULL; aux_engine110->base.ctx = ctx; aux_engine110->base.delay = 0; aux_engine110->base.max_defer_write_retry = 0; aux_engine110->base.inst = inst; aux_engine110->polling_timeout_period = timeout_period; aux_engine110->regs = regs; aux_engine110->mask = mask; aux_engine110->shift = shift; aux_engine110->base.funcs = &aux_functions; if (is_ext_aux_timeout_configurable) aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout; return &aux_engine110->base; } static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payload *payload) { if (payload->i2c_over_aux) { if (payload->write_status_update) { if (payload->mot) return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT; else return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST; } if (payload->write) { if (payload->mot) return I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT; else return I2CAUX_TRANSACTION_ACTION_I2C_WRITE; } if (payload->mot) return I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT; return I2CAUX_TRANSACTION_ACTION_I2C_READ; } if (payload->write) return I2CAUX_TRANSACTION_ACTION_DP_WRITE; return I2CAUX_TRANSACTION_ACTION_DP_READ; } int dce_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result) { struct ddc *ddc_pin = ddc->ddc_pin; struct dce_aux *aux_engine; struct aux_request_transaction_data aux_req; uint8_t returned_bytes = 0; int res = -1; uint32_t status; memset(&aux_req, 0, sizeof(aux_req)); if (ddc_pin == NULL) { *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; return -1; } aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; if (!acquire(aux_engine, ddc_pin)) { *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; return -1; } if (payload->i2c_over_aux) aux_req.type = AUX_TRANSACTION_TYPE_I2C; else aux_req.type = AUX_TRANSACTION_TYPE_DP; aux_req.action = i2caux_action_from_payload(payload); aux_req.address = payload->address; aux_req.delay = 0; aux_req.length = payload->length; aux_req.data = payload->data; submit_channel_request(aux_engine, &aux_req); *operation_result = get_channel_status(aux_engine, &returned_bytes); if (*operation_result == AUX_RET_SUCCESS) { int __maybe_unused bytes_replied = 0; bytes_replied = read_channel_reply(aux_engine, payload->length, payload->data, payload->reply, &status); EVENT_LOG_AUX_REP(aux_engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_NATIVE, *payload->reply, bytes_replied, payload->data); res = returned_bytes; } else { res = -1; } release_engine(aux_engine); return res; } int dce_aux_transfer_dmub_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result) { struct ddc *ddc_pin = ddc->ddc_pin; if (ddc_pin != NULL) { struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; /* XXX: Workaround to configure ddc channels for aux transactions */ if (!acquire(aux_engine, ddc_pin)) { *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; return -1; } release_engine(aux_engine); } return dm_helper_dmub_aux_transfer_sync(ddc->ctx, ddc->link, payload, operation_result); } #define AUX_MAX_RETRIES 7 #define AUX_MIN_DEFER_RETRIES 7 #define AUX_MAX_DEFER_TIMEOUT_MS 50 #define AUX_MAX_I2C_DEFER_RETRIES 7 #define AUX_MAX_INVALID_REPLY_RETRIES 2 #define AUX_MAX_TIMEOUT_RETRIES 3 #define AUX_DEFER_DELAY_FOR_DPIA 4 /*ms*/ static void dce_aux_log_payload(const char *payload_name, unsigned char *payload, uint32_t length, uint32_t max_length_to_log) { if (!IS_DC_I2CAUX_LOGGING_ENABLED()) return; if (payload && length) { char hex_str[128] = {0}; char *hex_str_ptr = &hex_str[0]; uint32_t hex_str_remaining = sizeof(hex_str); unsigned char *payload_ptr = payload; unsigned char *payload_max_to_log_ptr = payload_ptr + min(max_length_to_log, length); unsigned int count; char *padding = ""; while (payload_ptr < payload_max_to_log_ptr) { count = snprintf_count(hex_str_ptr, hex_str_remaining, "%s%02X", padding, *payload_ptr); padding = " "; hex_str_remaining -= count; hex_str_ptr += count; payload_ptr++; } count = snprintf_count(hex_str_ptr, hex_str_remaining, " "); hex_str_remaining -= count; hex_str_ptr += count; payload_ptr = payload; while (payload_ptr < payload_max_to_log_ptr) { count = snprintf_count(hex_str_ptr, hex_str_remaining, "%c", *payload_ptr >= ' ' ? *payload_ptr : '.'); hex_str_remaining -= count; hex_str_ptr += count; payload_ptr++; } DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE, LOG_FLAG_I2cAux_DceAux, "dce_aux_log_payload: %s: length=%u: data: %s%s", payload_name, length, hex_str, (length > max_length_to_log ? " (...)" : " ")); } else { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE, LOG_FLAG_I2cAux_DceAux, "dce_aux_log_payload: %s: length=%u: data: <empty payload>", payload_name, length); } } bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *payload) { int i, ret = 0; uint8_t reply; bool payload_reply = true; enum aux_return_code_type operation_result; bool retry_on_defer = false; struct ddc *ddc_pin = ddc->ddc_pin; struct dce_aux *aux_engine = NULL; struct aux_engine_dce110 *aux110 = NULL; uint32_t defer_time_in_ms = 0; int aux_ack_retries = 0, aux_defer_retries = 0, aux_i2c_defer_retries = 0, aux_timeout_retries = 0, aux_invalid_reply_retries = 0, aux_ack_m_retries = 0; if (ddc_pin) { aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; aux110 = FROM_AUX_ENGINE(aux_engine); } if (!payload->reply) { payload_reply = false; payload->reply = &reply; } for (i = 0; i < AUX_MAX_RETRIES; i++) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d", ddc && ddc->link ? ddc->link->link_index : UINT_MAX, i + 1, (int)AUX_MAX_RETRIES, payload->address, payload->length, (unsigned int) payload->write, (unsigned int) payload->mot); if (payload->write) dce_aux_log_payload(" write", payload->data, payload->length, 16); ret = dce_aux_transfer_raw(ddc, payload, &operation_result); DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u", ddc && ddc->link ? ddc->link->link_index : UINT_MAX, i + 1, (int)AUX_MAX_RETRIES, payload->address, payload->length, (unsigned int) payload->write, (unsigned int) payload->mot, ret, (int)operation_result, (unsigned int) *payload->reply); if (!payload->write) dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16); switch (operation_result) { case AUX_RET_SUCCESS: aux_timeout_retries = 0; aux_invalid_reply_retries = 0; switch (*payload->reply) { case AUX_TRANSACTION_REPLY_AUX_ACK: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_ACK"); if (!payload->write && payload->length != ret) { if (++aux_ack_retries >= AUX_MAX_RETRIES) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "dce_aux_transfer_with_retries: FAILURE: aux_ack_retries=%d >= AUX_MAX_RETRIES=%d", aux_defer_retries, AUX_MAX_RETRIES); goto fail; } else udelay(300); } else if (payload->write && ret > 0) { /* sink requested more time to complete the write via AUX_ACKM */ if (++aux_ack_m_retries >= AUX_MAX_RETRIES) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "dce_aux_transfer_with_retries: FAILURE: aux_ack_m_retries=%d >= AUX_MAX_RETRIES=%d", aux_ack_m_retries, AUX_MAX_RETRIES); goto fail; } /* retry reading the write status until complete * NOTE: payload is modified here */ payload->write = false; payload->write_status_update = true; payload->length = 0; udelay(300); } else return true; break; case AUX_TRANSACTION_REPLY_AUX_DEFER: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER"); /* polling_timeout_period is in us */ if (aux110) defer_time_in_ms += aux110->polling_timeout_period / 1000; else defer_time_in_ms += AUX_DEFER_DELAY_FOR_DPIA; ++aux_defer_retries; fallthrough; case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER) DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER"); retry_on_defer = true; if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES && defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d && defer_time_in_ms=%d >= AUX_MAX_DEFER_TIMEOUT_MS=%d", aux_defer_retries, AUX_MIN_DEFER_RETRIES, defer_time_in_ms, AUX_MAX_DEFER_TIMEOUT_MS); goto fail; } else { if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) || (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: payload->defer_delay=%u", payload->defer_delay); fsleep(payload->defer_delay * 1000); defer_time_in_ms += payload->defer_delay; } } break; case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: FAILURE: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK"); goto fail; case AUX_TRANSACTION_REPLY_I2C_DEFER: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_DEFER"); aux_defer_retries = 0; if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "dce_aux_transfer_with_retries: FAILURE: aux_i2c_defer_retries=%d >= AUX_MAX_I2C_DEFER_RETRIES=%d", aux_i2c_defer_retries, AUX_MAX_I2C_DEFER_RETRIES); goto fail; } break; case AUX_TRANSACTION_REPLY_AUX_NACK: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_NACK"); goto fail; case AUX_TRANSACTION_REPLY_HPD_DISCON: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_HPD_DISCON"); goto fail; default: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case. Reply: %d", *payload->reply); goto fail; } break; case AUX_RET_ERROR_INVALID_REPLY: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: AUX_RET_ERROR_INVALID_REPLY"); if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "dce_aux_transfer_with_retries: FAILURE: aux_invalid_reply_retries=%d >= AUX_MAX_INVALID_REPLY_RETRIES=%d", aux_invalid_reply_retries, AUX_MAX_INVALID_REPLY_RETRIES); goto fail; } else udelay(400); break; case AUX_RET_ERROR_TIMEOUT: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: AUX_RET_ERROR_TIMEOUT"); // Check whether a DEFER had occurred before the timeout. // If so, treat timeout as a DEFER. if (retry_on_defer) { if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d", aux_defer_retries, AUX_MIN_DEFER_RETRIES); goto fail; } else if (payload->defer_delay > 0) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: payload->defer_delay=%u", payload->defer_delay); msleep(payload->defer_delay); } } else { if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) { DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "dce_aux_transfer_with_retries: FAILURE: aux_timeout_retries=%d >= AUX_MAX_TIMEOUT_RETRIES=%d", aux_timeout_retries, AUX_MAX_TIMEOUT_RETRIES); goto fail; } else { /* * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts * According to the DP spec there should be 3 retries total * with a 400us wait inbetween each. Hardware already waits * for 550us therefore no wait is required here. */ } } break; case AUX_RET_ERROR_HPD_DISCON: case AUX_RET_ERROR_ENGINE_ACQUIRE: case AUX_RET_ERROR_UNKNOWN: default: goto fail; } } fail: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, "%s: Failure: operation_result=%d", __func__, (int)operation_result); if (!payload_reply) payload->reply = NULL; return false; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
/* * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce_transform.h" #include "reg_helper.h" #include "opp.h" #include "basics/conversion.h" #include "dc.h" #define REG(reg) \ (xfm_dce->regs->reg) #undef FN #define FN(reg_name, field_name) \ xfm_dce->xfm_shift->field_name, xfm_dce->xfm_mask->field_name #define CTX \ xfm_dce->base.ctx #define DC_LOGGER \ xfm_dce->base.ctx->logger #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) #define GAMUT_MATRIX_SIZE 12 #define SCL_PHASES 16 enum dcp_out_trunc_round_mode { DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE, DCP_OUT_TRUNC_ROUND_MODE_ROUND }; enum dcp_out_trunc_round_depth { DCP_OUT_TRUNC_ROUND_DEPTH_14BIT, DCP_OUT_TRUNC_ROUND_DEPTH_13BIT, DCP_OUT_TRUNC_ROUND_DEPTH_12BIT, DCP_OUT_TRUNC_ROUND_DEPTH_11BIT, DCP_OUT_TRUNC_ROUND_DEPTH_10BIT, DCP_OUT_TRUNC_ROUND_DEPTH_9BIT, DCP_OUT_TRUNC_ROUND_DEPTH_8BIT }; /* defines the various methods of bit reduction available for use */ enum dcp_bit_depth_reduction_mode { DCP_BIT_DEPTH_REDUCTION_MODE_DITHER, DCP_BIT_DEPTH_REDUCTION_MODE_ROUND, DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE, DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED, DCP_BIT_DEPTH_REDUCTION_MODE_INVALID }; enum dcp_spatial_dither_mode { DCP_SPATIAL_DITHER_MODE_AAAA, DCP_SPATIAL_DITHER_MODE_A_AA_A, DCP_SPATIAL_DITHER_MODE_AABBAABB, DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC, DCP_SPATIAL_DITHER_MODE_INVALID }; enum dcp_spatial_dither_depth { DCP_SPATIAL_DITHER_DEPTH_30BPP, DCP_SPATIAL_DITHER_DEPTH_24BPP }; enum csc_color_mode { /* 00 - BITS2:0 Bypass */ CSC_COLOR_MODE_GRAPHICS_BYPASS, /* 01 - hard coded coefficient TV RGB */ CSC_COLOR_MODE_GRAPHICS_PREDEFINED, /* 04 - programmable OUTPUT CSC coefficient */ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC, }; enum grph_color_adjust_option { GRPH_COLOR_MATRIX_HW_DEFAULT = 1, GRPH_COLOR_MATRIX_SW }; static const struct out_csc_color_matrix global_color_matrix[] = { { COLOR_SPACE_SRGB, { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, { COLOR_SPACE_SRGB_LIMITED, { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} }, { COLOR_SPACE_YCBCR601, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} }, { COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }, /* TODO: correct values below */ { COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991, 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} }, { COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3, 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} } }; static bool setup_scaling_configuration( struct dce_transform *xfm_dce, const struct scaler_data *data) { REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0); if (data->taps.h_taps + data->taps.v_taps <= 2) { /* Set bypass */ if (xfm_dce->xfm_mask->SCL_PSCL_EN != 0) REG_UPDATE_2(SCL_MODE, SCL_MODE, 0, SCL_PSCL_EN, 0); else REG_UPDATE(SCL_MODE, SCL_MODE, 0); return false; } REG_SET_2(SCL_TAP_CONTROL, 0, SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1, SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1); if (data->format <= PIXEL_FORMAT_GRPH_END) REG_UPDATE(SCL_MODE, SCL_MODE, 1); else REG_UPDATE(SCL_MODE, SCL_MODE, 2); if (xfm_dce->xfm_mask->SCL_PSCL_EN != 0) REG_UPDATE(SCL_MODE, SCL_PSCL_EN, 1); /* 1 - Replace out of bound pixels with edge */ REG_SET(SCL_CONTROL, 0, SCL_BOUNDARY_MODE, 1); return true; } #if defined(CONFIG_DRM_AMD_DC_SI) static bool dce60_setup_scaling_configuration( struct dce_transform *xfm_dce, const struct scaler_data *data) { REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0); if (data->taps.h_taps + data->taps.v_taps <= 2) { /* Set bypass */ /* DCE6 has no SCL_MODE register, skip scale mode programming */ return false; } REG_SET_2(SCL_TAP_CONTROL, 0, SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1, SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1); /* DCE6 has no SCL_MODE register, skip scale mode programming */ /* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */ return true; } #endif static void program_overscan( struct dce_transform *xfm_dce, const struct scaler_data *data) { int overscan_right = data->h_active - data->recout.x - data->recout.width; int overscan_bottom = data->v_active - data->recout.y - data->recout.height; if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { overscan_bottom += 2; overscan_right += 2; } if (overscan_right < 0) { BREAK_TO_DEBUGGER(); overscan_right = 0; } if (overscan_bottom < 0) { BREAK_TO_DEBUGGER(); overscan_bottom = 0; } REG_SET_2(EXT_OVERSCAN_LEFT_RIGHT, 0, EXT_OVERSCAN_LEFT, data->recout.x, EXT_OVERSCAN_RIGHT, overscan_right); REG_SET_2(EXT_OVERSCAN_TOP_BOTTOM, 0, EXT_OVERSCAN_TOP, data->recout.y, EXT_OVERSCAN_BOTTOM, overscan_bottom); } static void program_multi_taps_filter( struct dce_transform *xfm_dce, int taps, const uint16_t *coeffs, enum ram_filter_type filter_type) { int phase, pair; int array_idx = 0; int taps_pairs = (taps + 1) / 2; int phases_to_program = SCL_PHASES / 2 + 1; uint32_t power_ctl = 0; if (!coeffs) return; /*We need to disable power gating on coeff memory to do programming*/ if (REG(DCFE_MEM_PWR_CTRL)) { power_ctl = REG_READ(DCFE_MEM_PWR_CTRL); REG_SET(DCFE_MEM_PWR_CTRL, power_ctl, SCL_COEFF_MEM_PWR_DIS, 1); REG_WAIT(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, 0, 1, 10); } for (phase = 0; phase < phases_to_program; phase++) { /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror phase 0 is unique and phase N/2 is unique if N is even*/ for (pair = 0; pair < taps_pairs; pair++) { uint16_t odd_coeff = 0; uint16_t even_coeff = coeffs[array_idx]; REG_SET_3(SCL_COEF_RAM_SELECT, 0, SCL_C_RAM_FILTER_TYPE, filter_type, SCL_C_RAM_PHASE, phase, SCL_C_RAM_TAP_PAIR_IDX, pair); if (taps % 2 && pair == taps_pairs - 1) array_idx++; else { odd_coeff = coeffs[array_idx + 1]; array_idx += 2; } REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0, SCL_C_RAM_EVEN_TAP_COEF_EN, 1, SCL_C_RAM_EVEN_TAP_COEF, even_coeff, SCL_C_RAM_ODD_TAP_COEF_EN, 1, SCL_C_RAM_ODD_TAP_COEF, odd_coeff); } } /*We need to restore power gating on coeff memory to initial state*/ if (REG(DCFE_MEM_PWR_CTRL)) REG_WRITE(DCFE_MEM_PWR_CTRL, power_ctl); } static void program_viewport( struct dce_transform *xfm_dce, const struct rect *view_port) { REG_SET_2(VIEWPORT_START, 0, VIEWPORT_X_START, view_port->x, VIEWPORT_Y_START, view_port->y); REG_SET_2(VIEWPORT_SIZE, 0, VIEWPORT_HEIGHT, view_port->height, VIEWPORT_WIDTH, view_port->width); /* TODO: add stereo support */ } static void calculate_inits( struct dce_transform *xfm_dce, const struct scaler_data *data, struct scl_ratios_inits *inits) { struct fixed31_32 h_init; struct fixed31_32 v_init; inits->h_int_scale_ratio = dc_fixpt_u2d19(data->ratios.horz) << 5; inits->v_int_scale_ratio = dc_fixpt_u2d19(data->ratios.vert) << 5; h_init = dc_fixpt_div_int( dc_fixpt_add( data->ratios.horz, dc_fixpt_from_int(data->taps.h_taps + 1)), 2); inits->h_init.integer = dc_fixpt_floor(h_init); inits->h_init.fraction = dc_fixpt_u0d19(h_init) << 5; v_init = dc_fixpt_div_int( dc_fixpt_add( data->ratios.vert, dc_fixpt_from_int(data->taps.v_taps + 1)), 2); inits->v_init.integer = dc_fixpt_floor(v_init); inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5; } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_calculate_inits( struct dce_transform *xfm_dce, const struct scaler_data *data, struct sclh_ratios_inits *inits) { struct fixed31_32 v_init; inits->h_int_scale_ratio = dc_fixpt_u2d19(data->ratios.horz) << 5; inits->v_int_scale_ratio = dc_fixpt_u2d19(data->ratios.vert) << 5; /* DCE6 h_init_luma setting inspired by DCE110 */ inits->h_init_luma.integer = 1; /* DCE6 h_init_chroma setting inspired by DCE110 */ inits->h_init_chroma.integer = 1; v_init = dc_fixpt_div_int( dc_fixpt_add( data->ratios.vert, dc_fixpt_from_int(data->taps.v_taps + 1)), 2); inits->v_init.integer = dc_fixpt_floor(v_init); inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5; } #endif static void program_scl_ratios_inits( struct dce_transform *xfm_dce, struct scl_ratios_inits *inits) { REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, SCL_H_SCALE_RATIO, inits->h_int_scale_ratio); REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, SCL_V_SCALE_RATIO, inits->v_int_scale_ratio); REG_SET_2(SCL_HORZ_FILTER_INIT, 0, SCL_H_INIT_INT, inits->h_init.integer, SCL_H_INIT_FRAC, inits->h_init.fraction); REG_SET_2(SCL_VERT_FILTER_INIT, 0, SCL_V_INIT_INT, inits->v_init.integer, SCL_V_INIT_FRAC, inits->v_init.fraction); REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_program_scl_ratios_inits( struct dce_transform *xfm_dce, struct sclh_ratios_inits *inits) { REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, SCL_H_SCALE_RATIO, inits->h_int_scale_ratio); REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, SCL_V_SCALE_RATIO, inits->v_int_scale_ratio); /* DCE6 has SCL_HORZ_FILTER_INIT_RGB_LUMA register */ REG_SET_2(SCL_HORZ_FILTER_INIT_RGB_LUMA, 0, SCL_H_INIT_INT_RGB_Y, inits->h_init_luma.integer, SCL_H_INIT_FRAC_RGB_Y, inits->h_init_luma.fraction); /* DCE6 has SCL_HORZ_FILTER_INIT_CHROMA register */ REG_SET_2(SCL_HORZ_FILTER_INIT_CHROMA, 0, SCL_H_INIT_INT_CBCR, inits->h_init_chroma.integer, SCL_H_INIT_FRAC_CBCR, inits->h_init_chroma.fraction); REG_SET_2(SCL_VERT_FILTER_INIT, 0, SCL_V_INIT_INT, inits->v_init.integer, SCL_V_INIT_FRAC, inits->v_init.fraction); REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); } #endif static const uint16_t *get_filter_coeffs_16p(int taps, struct fixed31_32 ratio) { if (taps == 4) return get_filter_4tap_16p(ratio); else if (taps == 3) return get_filter_3tap_16p(ratio); else if (taps == 2) return get_filter_2tap_16p(); else if (taps == 1) return NULL; else { /* should never happen, bug */ BREAK_TO_DEBUGGER(); return NULL; } } static void dce_transform_set_scaler( struct transform *xfm, const struct scaler_data *data) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); bool is_scaling_required; bool filter_updated = false; const uint16_t *coeffs_v, *coeffs_h; /*Use all three pieces of memory always*/ REG_SET_2(LB_MEMORY_CTRL, 0, LB_MEMORY_CONFIG, 0, LB_MEMORY_SIZE, xfm_dce->lb_memory_size); /* Clear SCL_F_SHARP_CONTROL value to 0 */ REG_WRITE(SCL_F_SHARP_CONTROL, 0); /* 1. Program overscan */ program_overscan(xfm_dce, data); /* 2. Program taps and configuration */ is_scaling_required = setup_scaling_configuration(xfm_dce, data); if (is_scaling_required) { /* 3. Calculate and program ratio, filter initialization */ struct scl_ratios_inits inits = { 0 }; calculate_inits(xfm_dce, data, &inits); program_scl_ratios_inits(xfm_dce, &inits); coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert); coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz); if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) { /* 4. Program vertical filters */ if (xfm_dce->filter_v == NULL) REG_SET(SCL_VERT_FILTER_CONTROL, 0, SCL_V_2TAP_HARDCODE_COEF_EN, 0); program_multi_taps_filter( xfm_dce, data->taps.v_taps, coeffs_v, FILTER_TYPE_RGB_Y_VERTICAL); program_multi_taps_filter( xfm_dce, data->taps.v_taps, coeffs_v, FILTER_TYPE_ALPHA_VERTICAL); /* 5. Program horizontal filters */ if (xfm_dce->filter_h == NULL) REG_SET(SCL_HORZ_FILTER_CONTROL, 0, SCL_H_2TAP_HARDCODE_COEF_EN, 0); program_multi_taps_filter( xfm_dce, data->taps.h_taps, coeffs_h, FILTER_TYPE_RGB_Y_HORIZONTAL); program_multi_taps_filter( xfm_dce, data->taps.h_taps, coeffs_h, FILTER_TYPE_ALPHA_HORIZONTAL); xfm_dce->filter_v = coeffs_v; xfm_dce->filter_h = coeffs_h; filter_updated = true; } } /* 6. Program the viewport */ program_viewport(xfm_dce, &data->viewport); /* 7. Set bit to flip to new coefficient memory */ if (filter_updated) REG_UPDATE(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, 1); REG_UPDATE(LB_DATA_FORMAT, ALPHA_EN, data->lb_params.alpha_en); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_transform_set_scaler( struct transform *xfm, const struct scaler_data *data) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); bool is_scaling_required; const uint16_t *coeffs_v, *coeffs_h; /*Use whole line buffer memory always*/ REG_SET(DC_LB_MEMORY_SPLIT, 0, DC_LB_MEMORY_CONFIG, 0); REG_SET(DC_LB_MEM_SIZE, 0, DC_LB_MEM_SIZE, xfm_dce->lb_memory_size); /* Clear SCL_F_SHARP_CONTROL value to 0 */ REG_WRITE(SCL_F_SHARP_CONTROL, 0); /* 1. Program overscan */ program_overscan(xfm_dce, data); /* 2. Program taps and configuration */ is_scaling_required = dce60_setup_scaling_configuration(xfm_dce, data); if (is_scaling_required) { /* 3. Calculate and program ratio, DCE6 filter initialization */ struct sclh_ratios_inits inits = { 0 }; /* DCE6 has specific calculate_inits() function */ dce60_calculate_inits(xfm_dce, data, &inits); /* DCE6 has specific program_scl_ratios_inits() function */ dce60_program_scl_ratios_inits(xfm_dce, &inits); coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert); coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz); if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) { /* 4. Program vertical filters */ if (xfm_dce->filter_v == NULL) REG_SET(SCL_VERT_FILTER_CONTROL, 0, SCL_V_2TAP_HARDCODE_COEF_EN, 0); program_multi_taps_filter( xfm_dce, data->taps.v_taps, coeffs_v, FILTER_TYPE_RGB_Y_VERTICAL); program_multi_taps_filter( xfm_dce, data->taps.v_taps, coeffs_v, FILTER_TYPE_ALPHA_VERTICAL); /* 5. Program horizontal filters */ if (xfm_dce->filter_h == NULL) REG_SET(SCL_HORZ_FILTER_CONTROL, 0, SCL_H_2TAP_HARDCODE_COEF_EN, 0); program_multi_taps_filter( xfm_dce, data->taps.h_taps, coeffs_h, FILTER_TYPE_RGB_Y_HORIZONTAL); program_multi_taps_filter( xfm_dce, data->taps.h_taps, coeffs_h, FILTER_TYPE_ALPHA_HORIZONTAL); xfm_dce->filter_v = coeffs_v; xfm_dce->filter_h = coeffs_h; } } /* 6. Program the viewport */ program_viewport(xfm_dce, &data->viewport); /* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */ /* DCE6 DATA_FORMAT register does not support ALPHA_EN */ } #endif /***************************************************************************** * set_clamp * * @param depth : bit depth to set the clamp to (should match denorm) * * @brief * Programs clamp according to panel bit depth. * *******************************************************************************/ static void set_clamp( struct dce_transform *xfm_dce, enum dc_color_depth depth) { int clamp_max = 0; /* At the clamp block the data will be MSB aligned, so we set the max * clamp accordingly. * For example, the max value for 6 bits MSB aligned (14 bit bus) would * be "11 1111 0000 0000" in binary, so 0x3F00. */ switch (depth) { case COLOR_DEPTH_666: /* 6bit MSB aligned on 14 bit bus '11 1111 0000 0000' */ clamp_max = 0x3F00; break; case COLOR_DEPTH_888: /* 8bit MSB aligned on 14 bit bus '11 1111 1100 0000' */ clamp_max = 0x3FC0; break; case COLOR_DEPTH_101010: /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */ clamp_max = 0x3FF0; break; case COLOR_DEPTH_121212: /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */ clamp_max = 0x3FFC; break; default: clamp_max = 0x3FC0; BREAK_TO_DEBUGGER(); /* Invalid clamp bit depth */ } REG_SET_2(OUT_CLAMP_CONTROL_B_CB, 0, OUT_CLAMP_MIN_B_CB, 0, OUT_CLAMP_MAX_B_CB, clamp_max); REG_SET_2(OUT_CLAMP_CONTROL_G_Y, 0, OUT_CLAMP_MIN_G_Y, 0, OUT_CLAMP_MAX_G_Y, clamp_max); REG_SET_2(OUT_CLAMP_CONTROL_R_CR, 0, OUT_CLAMP_MIN_R_CR, 0, OUT_CLAMP_MAX_R_CR, clamp_max); } /******************************************************************************* * set_round * * @brief * Programs Round/Truncate * * @param [in] mode :round or truncate * @param [in] depth :bit depth to round/truncate to OUT_ROUND_TRUNC_MODE 3:0 0xA Output data round or truncate mode POSSIBLE VALUES: 00 - truncate to u0.12 01 - truncate to u0.11 02 - truncate to u0.10 03 - truncate to u0.9 04 - truncate to u0.8 05 - reserved 06 - truncate to u0.14 07 - truncate to u0.13 set_reg_field_value( value, clamp_max, OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR); 08 - round to u0.12 09 - round to u0.11 10 - round to u0.10 11 - round to u0.9 12 - round to u0.8 13 - reserved 14 - round to u0.14 15 - round to u0.13 ******************************************************************************/ static void set_round( struct dce_transform *xfm_dce, enum dcp_out_trunc_round_mode mode, enum dcp_out_trunc_round_depth depth) { int depth_bits = 0; int mode_bit = 0; /* set up bit depth */ switch (depth) { case DCP_OUT_TRUNC_ROUND_DEPTH_14BIT: depth_bits = 6; break; case DCP_OUT_TRUNC_ROUND_DEPTH_13BIT: depth_bits = 7; break; case DCP_OUT_TRUNC_ROUND_DEPTH_12BIT: depth_bits = 0; break; case DCP_OUT_TRUNC_ROUND_DEPTH_11BIT: depth_bits = 1; break; case DCP_OUT_TRUNC_ROUND_DEPTH_10BIT: depth_bits = 2; break; case DCP_OUT_TRUNC_ROUND_DEPTH_9BIT: depth_bits = 3; break; case DCP_OUT_TRUNC_ROUND_DEPTH_8BIT: depth_bits = 4; break; default: depth_bits = 4; BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_depth */ } /* set up round or truncate */ switch (mode) { case DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE: mode_bit = 0; break; case DCP_OUT_TRUNC_ROUND_MODE_ROUND: mode_bit = 1; break; default: BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_mode */ } depth_bits |= mode_bit << 3; REG_SET(OUT_ROUND_CONTROL, 0, OUT_ROUND_TRUNC_MODE, depth_bits); } /***************************************************************************** * set_dither * * @brief * Programs Dither * * @param [in] dither_enable : enable dither * @param [in] dither_mode : dither mode to set * @param [in] dither_depth : bit depth to dither to * @param [in] frame_random_enable : enable frame random * @param [in] rgb_random_enable : enable rgb random * @param [in] highpass_random_enable : enable highpass random * ******************************************************************************/ static void set_dither( struct dce_transform *xfm_dce, bool dither_enable, enum dcp_spatial_dither_mode dither_mode, enum dcp_spatial_dither_depth dither_depth, bool frame_random_enable, bool rgb_random_enable, bool highpass_random_enable) { int dither_depth_bits = 0; int dither_mode_bits = 0; switch (dither_mode) { case DCP_SPATIAL_DITHER_MODE_AAAA: dither_mode_bits = 0; break; case DCP_SPATIAL_DITHER_MODE_A_AA_A: dither_mode_bits = 1; break; case DCP_SPATIAL_DITHER_MODE_AABBAABB: dither_mode_bits = 2; break; case DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC: dither_mode_bits = 3; break; default: /* Invalid dcp_spatial_dither_mode */ BREAK_TO_DEBUGGER(); } switch (dither_depth) { case DCP_SPATIAL_DITHER_DEPTH_30BPP: dither_depth_bits = 0; break; case DCP_SPATIAL_DITHER_DEPTH_24BPP: dither_depth_bits = 1; break; default: /* Invalid dcp_spatial_dither_depth */ BREAK_TO_DEBUGGER(); } /* write the register */ REG_SET_6(DCP_SPATIAL_DITHER_CNTL, 0, DCP_SPATIAL_DITHER_EN, dither_enable, DCP_SPATIAL_DITHER_MODE, dither_mode_bits, DCP_SPATIAL_DITHER_DEPTH, dither_depth_bits, DCP_FRAME_RANDOM_ENABLE, frame_random_enable, DCP_RGB_RANDOM_ENABLE, rgb_random_enable, DCP_HIGHPASS_RANDOM_ENABLE, highpass_random_enable); } /***************************************************************************** * dce_transform_bit_depth_reduction_program * * @brief * Programs the DCP bit depth reduction registers (Clamp, Round/Truncate, * Dither) for dce * * @param depth : bit depth to set the clamp to (should match denorm) * ******************************************************************************/ static void program_bit_depth_reduction( struct dce_transform *xfm_dce, enum dc_color_depth depth, const struct bit_depth_reduction_params *bit_depth_params) { enum dcp_out_trunc_round_depth trunc_round_depth; enum dcp_out_trunc_round_mode trunc_mode; bool spatial_dither_enable; ASSERT(depth <= COLOR_DEPTH_121212); /* Invalid clamp bit depth */ spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED; /* Default to 12 bit truncation without rounding */ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT; trunc_mode = DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE; if (bit_depth_params->flags.TRUNCATE_ENABLED) { /* Don't enable dithering if truncation is enabled */ spatial_dither_enable = false; trunc_mode = bit_depth_params->flags.TRUNCATE_MODE ? DCP_OUT_TRUNC_ROUND_MODE_ROUND : DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE; if (bit_depth_params->flags.TRUNCATE_DEPTH == 0 || bit_depth_params->flags.TRUNCATE_DEPTH == 1) trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_8BIT; else if (bit_depth_params->flags.TRUNCATE_DEPTH == 2) trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_10BIT; else { /* * Invalid truncate/round depth. Setting here to 12bit * to prevent use-before-initialize errors. */ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT; BREAK_TO_DEBUGGER(); } } set_clamp(xfm_dce, depth); set_round(xfm_dce, trunc_mode, trunc_round_depth); set_dither(xfm_dce, spatial_dither_enable, DCP_SPATIAL_DITHER_MODE_A_AA_A, DCP_SPATIAL_DITHER_DEPTH_30BPP, bit_depth_params->flags.FRAME_RANDOM, bit_depth_params->flags.RGB_RANDOM, bit_depth_params->flags.HIGHPASS_RANDOM); } #if defined(CONFIG_DRM_AMD_DC_SI) /***************************************************************************** * dce60_transform_bit_depth_reduction program * * @brief * Programs the DCP bit depth reduction registers (Clamp, Round/Truncate, * Dither) for dce * * @param depth : bit depth to set the clamp to (should match denorm) * ******************************************************************************/ static void dce60_program_bit_depth_reduction( struct dce_transform *xfm_dce, enum dc_color_depth depth, const struct bit_depth_reduction_params *bit_depth_params) { enum dcp_out_trunc_round_depth trunc_round_depth; enum dcp_out_trunc_round_mode trunc_mode; bool spatial_dither_enable; ASSERT(depth <= COLOR_DEPTH_121212); /* Invalid clamp bit depth */ spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED; /* Default to 12 bit truncation without rounding */ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT; trunc_mode = DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE; if (bit_depth_params->flags.TRUNCATE_ENABLED) { /* Don't enable dithering if truncation is enabled */ spatial_dither_enable = false; trunc_mode = bit_depth_params->flags.TRUNCATE_MODE ? DCP_OUT_TRUNC_ROUND_MODE_ROUND : DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE; if (bit_depth_params->flags.TRUNCATE_DEPTH == 0 || bit_depth_params->flags.TRUNCATE_DEPTH == 1) trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_8BIT; else if (bit_depth_params->flags.TRUNCATE_DEPTH == 2) trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_10BIT; else { /* * Invalid truncate/round depth. Setting here to 12bit * to prevent use-before-initialize errors. */ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT; BREAK_TO_DEBUGGER(); } } /* DCE6 has no OUT_CLAMP_CONTROL_* registers - set_clamp() is skipped */ set_round(xfm_dce, trunc_mode, trunc_round_depth); set_dither(xfm_dce, spatial_dither_enable, DCP_SPATIAL_DITHER_MODE_A_AA_A, DCP_SPATIAL_DITHER_DEPTH_30BPP, bit_depth_params->flags.FRAME_RANDOM, bit_depth_params->flags.RGB_RANDOM, bit_depth_params->flags.HIGHPASS_RANDOM); } #endif static int dce_transform_get_max_num_of_supported_lines( struct dce_transform *xfm_dce, enum lb_pixel_depth depth, int pixel_width) { int pixels_per_entries = 0; int max_pixels_supports = 0; ASSERT(pixel_width); /* Find number of pixels that can fit into a single LB entry and * take floor of the value since we cannot store a single pixel * across multiple entries. */ switch (depth) { case LB_PIXEL_DEPTH_18BPP: pixels_per_entries = xfm_dce->lb_bits_per_entry / 18; break; case LB_PIXEL_DEPTH_24BPP: pixels_per_entries = xfm_dce->lb_bits_per_entry / 24; break; case LB_PIXEL_DEPTH_30BPP: pixels_per_entries = xfm_dce->lb_bits_per_entry / 30; break; case LB_PIXEL_DEPTH_36BPP: pixels_per_entries = xfm_dce->lb_bits_per_entry / 36; break; default: DC_LOG_WARNING("%s: Invalid LB pixel depth", __func__); BREAK_TO_DEBUGGER(); break; } ASSERT(pixels_per_entries); max_pixels_supports = pixels_per_entries * xfm_dce->lb_memory_size; return (max_pixels_supports / pixel_width); } static void set_denormalization( struct dce_transform *xfm_dce, enum dc_color_depth depth) { int denorm_mode = 0; switch (depth) { case COLOR_DEPTH_666: /* 63/64 for 6 bit output color depth */ denorm_mode = 1; break; case COLOR_DEPTH_888: /* Unity for 8 bit output color depth * because prescale is disabled by default */ denorm_mode = 0; break; case COLOR_DEPTH_101010: /* 1023/1024 for 10 bit output color depth */ denorm_mode = 3; break; case COLOR_DEPTH_121212: /* 4095/4096 for 12 bit output color depth */ denorm_mode = 5; break; case COLOR_DEPTH_141414: case COLOR_DEPTH_161616: default: /* not valid used case! */ break; } REG_SET(DENORM_CONTROL, 0, DENORM_MODE, denorm_mode); } static void dce_transform_set_pixel_storage_depth( struct transform *xfm, enum lb_pixel_depth depth, const struct bit_depth_reduction_params *bit_depth_params) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); int pixel_depth, expan_mode; enum dc_color_depth color_depth; switch (depth) { case LB_PIXEL_DEPTH_18BPP: color_depth = COLOR_DEPTH_666; pixel_depth = 2; expan_mode = 1; break; case LB_PIXEL_DEPTH_24BPP: color_depth = COLOR_DEPTH_888; pixel_depth = 1; expan_mode = 1; break; case LB_PIXEL_DEPTH_30BPP: color_depth = COLOR_DEPTH_101010; pixel_depth = 0; expan_mode = 1; break; case LB_PIXEL_DEPTH_36BPP: color_depth = COLOR_DEPTH_121212; pixel_depth = 3; expan_mode = 0; break; default: color_depth = COLOR_DEPTH_101010; pixel_depth = 0; expan_mode = 1; DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth); break; } set_denormalization(xfm_dce, color_depth); program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params); REG_UPDATE_2(LB_DATA_FORMAT, PIXEL_DEPTH, pixel_depth, PIXEL_EXPAN_MODE, expan_mode); if (!(xfm_dce->lb_pixel_depth_supported & depth)) { /*we should use unsupported capabilities * unless it is required by w/a*/ DC_LOG_DC("%s: Capability not supported", __func__); } } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_transform_set_pixel_storage_depth( struct transform *xfm, enum lb_pixel_depth depth, const struct bit_depth_reduction_params *bit_depth_params) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); enum dc_color_depth color_depth; switch (depth) { case LB_PIXEL_DEPTH_18BPP: color_depth = COLOR_DEPTH_666; break; case LB_PIXEL_DEPTH_24BPP: color_depth = COLOR_DEPTH_888; break; case LB_PIXEL_DEPTH_30BPP: color_depth = COLOR_DEPTH_101010; break; case LB_PIXEL_DEPTH_36BPP: color_depth = COLOR_DEPTH_121212; break; default: color_depth = COLOR_DEPTH_101010; BREAK_TO_DEBUGGER(); break; } set_denormalization(xfm_dce, color_depth); dce60_program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params); /* DATA_FORMAT in DCE6 does not have PIXEL_DEPTH and PIXEL_EXPAN_MODE masks */ if (!(xfm_dce->lb_pixel_depth_supported & depth)) { /*we should use unsupported capabilities * unless it is required by w/a*/ DC_LOG_WARNING("%s: Capability not supported", __func__); } } #endif static void program_gamut_remap( struct dce_transform *xfm_dce, const uint16_t *reg_val) { if (reg_val) { REG_SET_2(GAMUT_REMAP_C11_C12, 0, GAMUT_REMAP_C11, reg_val[0], GAMUT_REMAP_C12, reg_val[1]); REG_SET_2(GAMUT_REMAP_C13_C14, 0, GAMUT_REMAP_C13, reg_val[2], GAMUT_REMAP_C14, reg_val[3]); REG_SET_2(GAMUT_REMAP_C21_C22, 0, GAMUT_REMAP_C21, reg_val[4], GAMUT_REMAP_C22, reg_val[5]); REG_SET_2(GAMUT_REMAP_C23_C24, 0, GAMUT_REMAP_C23, reg_val[6], GAMUT_REMAP_C24, reg_val[7]); REG_SET_2(GAMUT_REMAP_C31_C32, 0, GAMUT_REMAP_C31, reg_val[8], GAMUT_REMAP_C32, reg_val[9]); REG_SET_2(GAMUT_REMAP_C33_C34, 0, GAMUT_REMAP_C33, reg_val[10], GAMUT_REMAP_C34, reg_val[11]); REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 1); } else REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 0); } /* ***************************************************************************** * Function: dal_transform_wide_gamut_set_gamut_remap * * @param [in] const struct xfm_grph_csc_adjustment *adjust * * @return * void * * @note calculate and apply color temperature adjustment to in Rgb color space * * @see * ***************************************************************************** */ static void dce_transform_set_gamut_remap( struct transform *xfm, const struct xfm_grph_csc_adjustment *adjust) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); int i = 0; if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) /* Bypass if type is bypass or hw */ program_gamut_remap(xfm_dce, NULL); else { struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE]; uint16_t arr_reg_val[GAMUT_MATRIX_SIZE]; for (i = 0; i < GAMUT_MATRIX_SIZE; i++) arr_matrix[i] = adjust->temperature_matrix[i]; convert_float_matrix( arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE); program_gamut_remap(xfm_dce, arr_reg_val); } } static uint32_t decide_taps(struct fixed31_32 ratio, uint32_t in_taps, bool chroma) { uint32_t taps; if (IDENTITY_RATIO(ratio)) { return 1; } else if (in_taps != 0) { taps = in_taps; } else { taps = 4; } if (chroma) { taps /= 2; if (taps < 2) taps = 2; } return taps; } bool dce_transform_get_optimal_number_of_taps( struct transform *xfm, struct scaler_data *scl_data, const struct scaling_taps *in_taps) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); int pixel_width = scl_data->viewport.width; int max_num_of_lines; if (xfm_dce->prescaler_on && (scl_data->viewport.width > scl_data->recout.width)) pixel_width = scl_data->recout.width; max_num_of_lines = dce_transform_get_max_num_of_supported_lines( xfm_dce, scl_data->lb_params.depth, pixel_width); /* Fail if in_taps are impossible */ if (in_taps->v_taps >= max_num_of_lines) return false; /* * Set taps according to this policy (in this order) * - Use 1 for no scaling * - Use input taps * - Use 4 and reduce as required by line buffer size * - Decide chroma taps if chroma is scaled * * Ignore input chroma taps. Decide based on non-chroma */ scl_data->taps.h_taps = decide_taps(scl_data->ratios.horz, in_taps->h_taps, false); scl_data->taps.v_taps = decide_taps(scl_data->ratios.vert, in_taps->v_taps, false); scl_data->taps.h_taps_c = decide_taps(scl_data->ratios.horz_c, in_taps->h_taps, true); scl_data->taps.v_taps_c = decide_taps(scl_data->ratios.vert_c, in_taps->v_taps, true); if (!IDENTITY_RATIO(scl_data->ratios.vert)) { /* reduce v_taps if needed but ensure we have at least two */ if (in_taps->v_taps == 0 && max_num_of_lines <= scl_data->taps.v_taps && scl_data->taps.v_taps > 1) { scl_data->taps.v_taps = max_num_of_lines - 1; } if (scl_data->taps.v_taps <= 1) return false; } if (!IDENTITY_RATIO(scl_data->ratios.vert_c)) { /* reduce chroma v_taps if needed but ensure we have at least two */ if (max_num_of_lines <= scl_data->taps.v_taps_c && scl_data->taps.v_taps_c > 1) { scl_data->taps.v_taps_c = max_num_of_lines - 1; } if (scl_data->taps.v_taps_c <= 1) return false; } /* we've got valid taps */ return true; } static void dce_transform_reset(struct transform *xfm) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); xfm_dce->filter_h = NULL; xfm_dce->filter_v = NULL; } static void program_color_matrix( struct dce_transform *xfm_dce, const struct out_csc_color_matrix *tbl_entry, enum grph_color_adjust_option options) { { REG_SET_2(OUTPUT_CSC_C11_C12, 0, OUTPUT_CSC_C11, tbl_entry->regval[0], OUTPUT_CSC_C12, tbl_entry->regval[1]); } { REG_SET_2(OUTPUT_CSC_C13_C14, 0, OUTPUT_CSC_C11, tbl_entry->regval[2], OUTPUT_CSC_C12, tbl_entry->regval[3]); } { REG_SET_2(OUTPUT_CSC_C21_C22, 0, OUTPUT_CSC_C11, tbl_entry->regval[4], OUTPUT_CSC_C12, tbl_entry->regval[5]); } { REG_SET_2(OUTPUT_CSC_C23_C24, 0, OUTPUT_CSC_C11, tbl_entry->regval[6], OUTPUT_CSC_C12, tbl_entry->regval[7]); } { REG_SET_2(OUTPUT_CSC_C31_C32, 0, OUTPUT_CSC_C11, tbl_entry->regval[8], OUTPUT_CSC_C12, tbl_entry->regval[9]); } { REG_SET_2(OUTPUT_CSC_C33_C34, 0, OUTPUT_CSC_C11, tbl_entry->regval[10], OUTPUT_CSC_C12, tbl_entry->regval[11]); } } static bool configure_graphics_mode( struct dce_transform *xfm_dce, enum csc_color_mode config, enum graphics_csc_adjust_type csc_adjust_type, enum dc_color_space color_space) { REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 0); if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) { if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC) { REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 4); } else { switch (color_space) { case COLOR_SPACE_SRGB: /* by pass */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 0); break; case COLOR_SPACE_SRGB_LIMITED: /* TV RGB */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 1); break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: /* YCbCr601 */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 2); break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: /* YCbCr709 */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 3); break; default: return false; } } } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) { switch (color_space) { case COLOR_SPACE_SRGB: /* by pass */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 0); break; case COLOR_SPACE_SRGB_LIMITED: /* TV RGB */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 1); break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: /* YCbCr601 */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 2); break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: /* YCbCr709 */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 3); break; default: return false; } } else /* by pass */ REG_SET(OUTPUT_CSC_CONTROL, 0, OUTPUT_CSC_GRPH_MODE, 0); return true; } void dce110_opp_set_csc_adjustment( struct transform *xfm, const struct out_csc_color_matrix *tbl_entry) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC; program_color_matrix( xfm_dce, tbl_entry, GRPH_COLOR_MATRIX_SW); /* We did everything ,now program DxOUTPUT_CSC_CONTROL */ configure_graphics_mode(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW, tbl_entry->color_space); } void dce110_opp_set_csc_default( struct transform *xfm, const struct default_adjustment *default_adjust) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_PREDEFINED; if (default_adjust->force_hw_default == false) { const struct out_csc_color_matrix *elm; /* currently parameter not in use */ enum grph_color_adjust_option option = GRPH_COLOR_MATRIX_HW_DEFAULT; uint32_t i; /* * HW default false we program locally defined matrix * HW default true we use predefined hw matrix and we * do not need to program matrix * OEM wants the HW default via runtime parameter. */ option = GRPH_COLOR_MATRIX_SW; for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) { elm = &global_color_matrix[i]; if (elm->color_space != default_adjust->out_color_space) continue; /* program the matrix with default values from this * file */ program_color_matrix(xfm_dce, elm, option); config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC; break; } } /* configure the what we programmed : * 1. Default values from this file * 2. Use hardware default from ROM_A and we do not need to program * matrix */ configure_graphics_mode(xfm_dce, config, default_adjust->csc_adjust_type, default_adjust->out_color_space); } static void program_pwl(struct dce_transform *xfm_dce, const struct pwl_params *params) { int retval; uint8_t max_tries = 10; uint8_t counter = 0; uint32_t i = 0; const struct pwl_result_data *rgb = params->rgb_resulted; /* Power on LUT memory */ if (REG(DCFE_MEM_PWR_CTRL)) REG_UPDATE(DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, 1); else REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, 1); while (counter < max_tries) { if (REG(DCFE_MEM_PWR_STATUS)) { REG_GET(DCFE_MEM_PWR_STATUS, DCP_REGAMMA_MEM_PWR_STATE, &retval); if (retval == 0) break; ++counter; } else { REG_GET(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_MEM_PWR_STATE, &retval); if (retval == 0) break; ++counter; } } if (counter == max_tries) { DC_LOG_WARNING("%s: regamma lut was not powered on " "in a timely manner," " programming still proceeds\n", __func__); } REG_UPDATE(REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, 7); REG_WRITE(REGAMMA_LUT_INDEX, 0); /* Program REGAMMA_LUT_DATA */ while (i != params->hw_points_num) { REG_WRITE(REGAMMA_LUT_DATA, rgb->red_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->green_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->blue_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_red_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_green_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_blue_reg); ++rgb; ++i; } /* we are done with DCP LUT memory; re-enable low power mode */ if (REG(DCFE_MEM_PWR_CTRL)) REG_UPDATE(DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, 0); else REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, 0); } static void regamma_config_regions_and_segments(struct dce_transform *xfm_dce, const struct pwl_params *params) { const struct gamma_curve *curve; REG_SET_2(REGAMMA_CNTLA_START_CNTL, 0, REGAMMA_CNTLA_EXP_REGION_START, params->arr_points[0].custom_float_x, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, 0); REG_SET(REGAMMA_CNTLA_SLOPE_CNTL, 0, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, params->arr_points[0].custom_float_slope); REG_SET(REGAMMA_CNTLA_END_CNTL1, 0, REGAMMA_CNTLA_EXP_REGION_END, params->arr_points[1].custom_float_x); REG_SET_2(REGAMMA_CNTLA_END_CNTL2, 0, REGAMMA_CNTLA_EXP_REGION_END_BASE, params->arr_points[1].custom_float_y, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, params->arr_points[1].custom_float_slope); curve = params->arr_curve_points; REG_SET_4(REGAMMA_CNTLA_REGION_0_1, 0, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(REGAMMA_CNTLA_REGION_2_3, 0, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(REGAMMA_CNTLA_REGION_4_5, 0, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(REGAMMA_CNTLA_REGION_6_7, 0, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(REGAMMA_CNTLA_REGION_8_9, 0, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(REGAMMA_CNTLA_REGION_10_11, 0, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(REGAMMA_CNTLA_REGION_12_13, 0, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(REGAMMA_CNTLA_REGION_14_15, 0, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); } void dce110_opp_program_regamma_pwl(struct transform *xfm, const struct pwl_params *params) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); /* Setup regions */ regamma_config_regions_and_segments(xfm_dce, params); /* Program PWL */ program_pwl(xfm_dce, params); } void dce110_opp_power_on_regamma_lut(struct transform *xfm, bool power_on) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); if (REG(DCFE_MEM_PWR_CTRL)) REG_UPDATE_2(DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, power_on, DCP_LUT_MEM_PWR_DIS, power_on); else REG_UPDATE_2(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, power_on, DCP_LUT_LIGHT_SLEEP_DIS, power_on); } void dce110_opp_set_regamma_mode(struct transform *xfm, enum opp_regamma mode) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); REG_SET(REGAMMA_CONTROL, 0, GRPH_REGAMMA_MODE, mode); } static const struct transform_funcs dce_transform_funcs = { .transform_reset = dce_transform_reset, .transform_set_scaler = dce_transform_set_scaler, .transform_set_gamut_remap = dce_transform_set_gamut_remap, .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment, .opp_set_csc_default = dce110_opp_set_csc_default, .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut, .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl, .opp_set_regamma_mode = dce110_opp_set_regamma_mode, .transform_set_pixel_storage_depth = dce_transform_set_pixel_storage_depth, .transform_get_optimal_number_of_taps = dce_transform_get_optimal_number_of_taps }; #if defined(CONFIG_DRM_AMD_DC_SI) static const struct transform_funcs dce60_transform_funcs = { .transform_reset = dce_transform_reset, .transform_set_scaler = dce60_transform_set_scaler, .transform_set_gamut_remap = dce_transform_set_gamut_remap, .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment, .opp_set_csc_default = dce110_opp_set_csc_default, .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut, .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl, .opp_set_regamma_mode = dce110_opp_set_regamma_mode, .transform_set_pixel_storage_depth = dce60_transform_set_pixel_storage_depth, .transform_get_optimal_number_of_taps = dce_transform_get_optimal_number_of_taps }; #endif /*****************************************/ /* Constructor, Destructor */ /*****************************************/ void dce_transform_construct( struct dce_transform *xfm_dce, struct dc_context *ctx, uint32_t inst, const struct dce_transform_registers *regs, const struct dce_transform_shift *xfm_shift, const struct dce_transform_mask *xfm_mask) { xfm_dce->base.ctx = ctx; xfm_dce->base.inst = inst; xfm_dce->base.funcs = &dce_transform_funcs; xfm_dce->regs = regs; xfm_dce->xfm_shift = xfm_shift; xfm_dce->xfm_mask = xfm_mask; xfm_dce->prescaler_on = true; xfm_dce->lb_pixel_depth_supported = LB_PIXEL_DEPTH_18BPP | LB_PIXEL_DEPTH_24BPP | LB_PIXEL_DEPTH_30BPP | LB_PIXEL_DEPTH_36BPP; xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY; xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/ } #if defined(CONFIG_DRM_AMD_DC_SI) void dce60_transform_construct( struct dce_transform *xfm_dce, struct dc_context *ctx, uint32_t inst, const struct dce_transform_registers *regs, const struct dce_transform_shift *xfm_shift, const struct dce_transform_mask *xfm_mask) { xfm_dce->base.ctx = ctx; xfm_dce->base.inst = inst; xfm_dce->base.funcs = &dce60_transform_funcs; xfm_dce->regs = regs; xfm_dce->xfm_shift = xfm_shift; xfm_dce->xfm_mask = xfm_mask; xfm_dce->prescaler_on = true; xfm_dce->lb_pixel_depth_supported = LB_PIXEL_DEPTH_18BPP | LB_PIXEL_DEPTH_24BPP | LB_PIXEL_DEPTH_30BPP | LB_PIXEL_DEPTH_36BPP; xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY; xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/ } #endif
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "dce_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" #define DC_LOGGER \ enc110->base.ctx->logger #define REG(reg)\ (enc110->regs->reg) #undef FN #define FN(reg_name, field_name) \ enc110->se_shift->field_name, enc110->se_mask->field_name #define VBI_LINE_0 0 #define DP_BLANK_MAX_RETRY 20 #define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000 #ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004 #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008 #endif enum { DP_MST_UPDATE_MAX_RETRY = 50 }; #define DCE110_SE(audio)\ container_of(audio, struct dce110_stream_encoder, base) #define CTX \ enc110->base.ctx static void dce110_update_generic_info_packet( struct dce110_stream_encoder *enc110, uint32_t packet_index, const struct dc_info_packet *info_packet) { /* TODOFPGA Figure out a proper number for max_retries polling for lock * use 50 for now. */ uint32_t max_retries = 50; /*we need turn on clock before programming AFMT block*/ if (REG(AFMT_CNTL)) REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); if (REG(AFMT_VBI_PACKET_CONTROL1)) { if (packet_index >= 8) ASSERT(0); /* poll dig_update_lock is not locked -> asic internal signal * assume otg master lock will unlock it */ /* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, 0, 10, max_retries);*/ /* check if HW reading GSP memory */ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, 0, 10, max_retries); /* HW does is not reading GSP memory not reading too long -> * something wrong. clear GPS memory access and notify? * hw SW is writing to GSP memory */ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1); } /* choose which generic packet to use */ { REG_READ(AFMT_VBI_PACKET_CONTROL); REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, packet_index); } /* write generic packet header * (4th byte is for GENERIC0 only) */ { REG_SET_4(AFMT_GENERIC_HDR, 0, AFMT_GENERIC_HB0, info_packet->hb0, AFMT_GENERIC_HB1, info_packet->hb1, AFMT_GENERIC_HB2, info_packet->hb2, AFMT_GENERIC_HB3, info_packet->hb3); } /* write generic packet contents * (we never use last 4 bytes) * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers */ { const uint32_t *content = (const uint32_t *) &info_packet->sb[0]; REG_WRITE(AFMT_GENERIC_0, *content++); REG_WRITE(AFMT_GENERIC_1, *content++); REG_WRITE(AFMT_GENERIC_2, *content++); REG_WRITE(AFMT_GENERIC_3, *content++); REG_WRITE(AFMT_GENERIC_4, *content++); REG_WRITE(AFMT_GENERIC_5, *content++); REG_WRITE(AFMT_GENERIC_6, *content++); REG_WRITE(AFMT_GENERIC_7, *content); } if (!REG(AFMT_VBI_PACKET_CONTROL1)) { /* force double-buffered packet update */ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, (packet_index == 0), AFMT_GENERIC2_UPDATE, (packet_index == 2)); } if (REG(AFMT_VBI_PACKET_CONTROL1)) { switch (packet_index) { case 0: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, 1); break; case 1: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, 1); break; case 2: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, 1); break; case 3: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, 1); break; case 4: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, 1); break; case 5: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, 1); break; case 6: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, 1); break; case 7: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, 1); break; default: break; } } } static void dce110_update_hdmi_info_packet( struct dce110_stream_encoder *enc110, uint32_t packet_index, const struct dc_info_packet *info_packet) { uint32_t cont, send, line; if (info_packet->valid) { dce110_update_generic_info_packet( enc110, packet_index, info_packet); /* enable transmission of packet(s) - * packet transmission begins on the next frame */ cont = 1; /* send packet(s) every frame */ send = 1; /* select line number to send packets on */ line = 2; } else { cont = 0; send = 0; line = 0; } /* choose which generic packet control to use */ switch (packet_index) { case 0: REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send, HDMI_GENERIC0_LINE, line); break; case 1: REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; case 2: REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send, HDMI_GENERIC0_LINE, line); break; case 3: REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; case 4: if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send, HDMI_GENERIC0_LINE, line); break; case 5: if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; case 6: if (REG(HDMI_GENERIC_PACKET_CONTROL3)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send, HDMI_GENERIC0_LINE, line); break; case 7: if (REG(HDMI_GENERIC_PACKET_CONTROL3)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; default: /* invalid HW packet index */ DC_LOG_WARNING( "Invalid HW packet index: %s()\n", __func__); return; } } /* setup stream encoder in dp mode */ static void dce110_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { uint32_t h_active_start; uint32_t v_active_start; uint32_t misc0 = 0; uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dynamic_range_rgb = 0; /*full range*/ uint8_t dynamic_range_ycbcr = 1; /*bt709*/ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); struct dc_crtc_timing hw_crtc_timing = *crtc_timing; if (hw_crtc_timing.flags.INTERLACE) { /*the input timing is in VESA spec format with Interlace flag =1*/ hw_crtc_timing.v_total /= 2; hw_crtc_timing.v_border_top /= 2; hw_crtc_timing.v_addressable /= 2; hw_crtc_timing.v_border_bottom /= 2; hw_crtc_timing.v_front_porch /= 2; hw_crtc_timing.v_sync_width /= 2; } /* set pixel encoding */ switch (hw_crtc_timing.pixel_encoding) { case PIXEL_ENCODING_YCBCR422: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_YCBCR422); break; case PIXEL_ENCODING_YCBCR444: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_YCBCR444); if (hw_crtc_timing.flags.Y_ONLY) if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) /* HW testing only, no use case yet. * Color depth of Y-only could be * 8, 10, 12, 16 bits */ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_Y_ONLY); /* Note: DP_MSA_MISC1 bit 7 is the indicator * of Y-only mode. * This bit is set in HW if register * DP_PIXEL_ENCODING is programmed to 0x4 */ break; case PIXEL_ENCODING_YCBCR420: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_YCBCR420); if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN) REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); if (enc110->se_mask->DP_VID_N_MUL) REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1); break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_RGB444); break; } if (REG(DP_MSA_MISC)) misc1 = REG_READ(DP_MSA_MISC); /* set color depth */ switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, 0); break; case COLOR_DEPTH_888: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_8BPC); break; case COLOR_DEPTH_101010: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_10BPC); break; case COLOR_DEPTH_121212: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_12BPC); break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_6BPC); break; } /* set dynamic range and YCbCr range */ switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: colorimetry_bpc = 0; break; case COLOR_DEPTH_888: colorimetry_bpc = 1; break; case COLOR_DEPTH_101010: colorimetry_bpc = 2; break; case COLOR_DEPTH_121212: colorimetry_bpc = 3; break; default: colorimetry_bpc = 0; break; } misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; if (REG(DP_MSA_TIMING_PARAM1)) { switch (output_color_space) { case COLOR_SPACE_SRGB: misc0 = misc0 | 0x0; misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_rgb = 0; /*full range*/ break; case COLOR_SPACE_SRGB_LIMITED: misc0 = misc0 | 0x8; /* bit3=1 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_rgb = 1; /*limited range*/ break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_ycbcr = 0; /*bt601*/ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: case COLOR_SPACE_YCBCR709_BLACK: misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_ycbcr = 1; /*bt709*/ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_2020_RGB_LIMITEDRANGE: dynamic_range_rgb = 1; /*limited range*/ break; case COLOR_SPACE_2020_RGB_FULLRANGE: case COLOR_SPACE_2020_YCBCR: case COLOR_SPACE_XR_RGB: case COLOR_SPACE_MSREF_SCRGB: case COLOR_SPACE_ADOBERGB: case COLOR_SPACE_DCIP3: case COLOR_SPACE_XV_YCC_709: case COLOR_SPACE_XV_YCC_601: case COLOR_SPACE_DISPLAYNATIVE: case COLOR_SPACE_DOLBYVISION: case COLOR_SPACE_APPCTRL: case COLOR_SPACE_CUSTOMPOINTS: case COLOR_SPACE_UNKNOWN: /* do nothing */ break; } if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE) REG_UPDATE_2( DP_PIXEL_FORMAT, DP_DYN_RANGE, dynamic_range_rgb, DP_YCBCR_RANGE, dynamic_range_ycbcr); if (REG(DP_MSA_COLORIMETRY)) REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0); if (REG(DP_MSA_MISC)) REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */ /* dcn new register * dc_crtc_timing is vesa dmt struct. data from edid */ if (REG(DP_MSA_TIMING_PARAM1)) REG_SET_2(DP_MSA_TIMING_PARAM1, 0, DP_MSA_HTOTAL, hw_crtc_timing.h_total, DP_MSA_VTOTAL, hw_crtc_timing.v_total); /* calcuate from vesa timing parameters * h_active_start related to leading edge of sync */ h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; h_back_porch = h_blank - hw_crtc_timing.h_front_porch - hw_crtc_timing.h_sync_width; /* start at begining of left border */ h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - hw_crtc_timing.v_front_porch; /* start at begining of left border */ if (REG(DP_MSA_TIMING_PARAM2)) REG_SET_2(DP_MSA_TIMING_PARAM2, 0, DP_MSA_HSTART, h_active_start, DP_MSA_VSTART, v_active_start); if (REG(DP_MSA_TIMING_PARAM3)) REG_SET_4(DP_MSA_TIMING_PARAM3, 0, DP_MSA_HSYNCWIDTH, hw_crtc_timing.h_sync_width, DP_MSA_HSYNCPOLARITY, !hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY, DP_MSA_VSYNCWIDTH, hw_crtc_timing.v_sync_width, DP_MSA_VSYNCPOLARITY, !hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY); /* HWDITH include border or overscan */ if (REG(DP_MSA_TIMING_PARAM4)) REG_SET_2(DP_MSA_TIMING_PARAM4, 0, DP_MSA_HWIDTH, hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right, DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom); } } static void dce110_stream_encoder_set_stream_attribute_helper( struct dce110_stream_encoder *enc110, struct dc_crtc_timing *crtc_timing) { if (enc110->regs->TMDS_CNTL) { switch (crtc_timing->pixel_encoding) { case PIXEL_ENCODING_YCBCR422: REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 1); break; default: REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 0); break; } REG_UPDATE(TMDS_CNTL, TMDS_COLOR_FORMAT, 0); } else if (enc110->regs->DIG_FE_CNTL) { switch (crtc_timing->pixel_encoding) { case PIXEL_ENCODING_YCBCR422: REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1); break; default: REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0); break; } REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0); } } /* setup stream encoder in hdmi mode */ static void dce110_stream_encoder_hdmi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, int actual_pix_clk_khz, bool enable_audio) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); struct bp_encoder_control cntl = {0}; cntl.action = ENCODER_CONTROL_SETUP; cntl.engine_id = enc110->base.id; cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; cntl.enable_dp_audio = enable_audio; cntl.pixel_clock = actual_pix_clk_khz; cntl.lanes_number = LANE_COUNT_FOUR; cntl.color_depth = crtc_timing->display_color_depth; if (enc110->base.bp->funcs->encoder_control( enc110->base.bp, &cntl) != BP_RESULT_OK) return; dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing); /* setup HDMI engine */ if (!enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) { REG_UPDATE_3(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, 1, HDMI_KEEPOUT_MODE, 1, HDMI_DEEP_COLOR_ENABLE, 0); } else if (enc110->regs->DIG_FE_CNTL) { REG_UPDATE_5(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, 1, HDMI_KEEPOUT_MODE, 1, HDMI_DEEP_COLOR_ENABLE, 0, HDMI_DATA_SCRAMBLE_EN, 0, HDMI_CLOCK_CHANNEL_RATE, 0); } switch (crtc_timing->display_color_depth) { case COLOR_DEPTH_888: REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); break; case COLOR_DEPTH_101010: if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1, HDMI_DEEP_COLOR_ENABLE, 0); } else { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1, HDMI_DEEP_COLOR_ENABLE, 1); } break; case COLOR_DEPTH_121212: if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2, HDMI_DEEP_COLOR_ENABLE, 0); } else { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2, HDMI_DEEP_COLOR_ENABLE, 1); } break; case COLOR_DEPTH_161616: REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 3, HDMI_DEEP_COLOR_ENABLE, 1); break; default: break; } if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) { if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) { /* enable HDMI data scrambler * HDMI_CLOCK_CHANNEL_RATE_MORE_340M * Clock channel frequency is 1/4 of character rate. */ REG_UPDATE_2(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, 1, HDMI_CLOCK_CHANNEL_RATE, 1); } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) { /* TODO: New feature for DCE11, still need to implement */ /* enable HDMI data scrambler * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE * Clock channel frequency is the same * as character rate */ REG_UPDATE_2(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, 1, HDMI_CLOCK_CHANNEL_RATE, 0); } } REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1, HDMI_GC_SEND, 1, HDMI_NULL_SEND, 1); REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0); /* following belongs to audio */ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, VBI_LINE_0 + 2); REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0); } /* setup stream encoder in dvi mode */ static void dce110_stream_encoder_dvi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, bool is_dual_link) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); struct bp_encoder_control cntl = {0}; cntl.action = ENCODER_CONTROL_SETUP; cntl.engine_id = enc110->base.id; cntl.signal = is_dual_link ? SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; cntl.enable_dp_audio = false; cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; if (enc110->base.bp->funcs->encoder_control( enc110->base.bp, &cntl) != BP_RESULT_OK) return; ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888); dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing); } /* setup stream encoder in LVDS mode */ static void dce110_stream_encoder_lvds_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); struct bp_encoder_control cntl = {0}; cntl.action = ENCODER_CONTROL_SETUP; cntl.engine_id = enc110->base.id; cntl.signal = SIGNAL_TYPE_LVDS; cntl.enable_dp_audio = false; cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; cntl.lanes_number = LANE_COUNT_FOUR; if (enc110->base.bp->funcs->encoder_control( enc110->base.bp, &cntl) != BP_RESULT_OK) return; ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); } static void dce110_stream_encoder_set_throttled_vcp_size( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); uint32_t x = dc_fixpt_floor( avg_time_slots_per_mtp); uint32_t y = dc_fixpt_ceil( dc_fixpt_shl( dc_fixpt_sub_int( avg_time_slots_per_mtp, x), 26)); { REG_SET_2(DP_MSE_RATE_CNTL, 0, DP_MSE_RATE_X, x, DP_MSE_RATE_Y, y); } /* wait for update to be completed on the link */ /* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */ /* is reset to 0 (not pending) */ REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, 0, 10, DP_MST_UPDATE_MAX_RETRY); } static void dce110_stream_encoder_update_hdmi_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); if (enc110->se_mask->HDMI_AVI_INFO_CONT && enc110->se_mask->HDMI_AVI_INFO_SEND) { if (info_frame->avi.valid) { const uint32_t *content = (const uint32_t *) &info_frame->avi.sb[0]; /*we need turn on clock before programming AFMT block*/ if (REG(AFMT_CNTL)) REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); REG_WRITE(AFMT_AVI_INFO0, content[0]); REG_WRITE(AFMT_AVI_INFO1, content[1]); REG_WRITE(AFMT_AVI_INFO2, content[2]); REG_WRITE(AFMT_AVI_INFO3, content[3]); REG_UPDATE(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, info_frame->avi.hb1); REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1, HDMI_AVI_INFO_CONT, 1); REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, VBI_LINE_0 + 2); } else { REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0, HDMI_AVI_INFO_CONT, 0); } } if (enc110->se_mask->HDMI_AVI_INFO_CONT && enc110->se_mask->HDMI_AVI_INFO_SEND) { dce110_update_hdmi_info_packet(enc110, 0, &info_frame->vendor); dce110_update_hdmi_info_packet(enc110, 1, &info_frame->gamut); dce110_update_hdmi_info_packet(enc110, 2, &info_frame->spd); dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd); } if (enc110->se_mask->HDMI_DB_DISABLE) { /* for bring up, disable dp double TODO */ if (REG(HDMI_DB_CONTROL)) REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1); dce110_update_hdmi_info_packet(enc110, 0, &info_frame->avi); dce110_update_hdmi_info_packet(enc110, 1, &info_frame->vendor); dce110_update_hdmi_info_packet(enc110, 2, &info_frame->gamut); dce110_update_hdmi_info_packet(enc110, 3, &info_frame->spd); dce110_update_hdmi_info_packet(enc110, 4, &info_frame->hdrsmd); } } static void dce110_stream_encoder_stop_hdmi_info_packets( struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); /* stop generic packets 0 & 1 on HDMI */ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC1_CONT, 0, HDMI_GENERIC1_LINE, 0, HDMI_GENERIC1_SEND, 0, HDMI_GENERIC0_CONT, 0, HDMI_GENERIC0_LINE, 0, HDMI_GENERIC0_SEND, 0); /* stop generic packets 2 & 3 on HDMI */ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0, HDMI_GENERIC0_CONT, 0, HDMI_GENERIC0_LINE, 0, HDMI_GENERIC0_SEND, 0, HDMI_GENERIC1_CONT, 0, HDMI_GENERIC1_LINE, 0, HDMI_GENERIC1_SEND, 0); /* stop generic packets 2 & 3 on HDMI */ if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0, HDMI_GENERIC0_CONT, 0, HDMI_GENERIC0_LINE, 0, HDMI_GENERIC0_SEND, 0, HDMI_GENERIC1_CONT, 0, HDMI_GENERIC1_LINE, 0, HDMI_GENERIC1_SEND, 0); if (REG(HDMI_GENERIC_PACKET_CONTROL3)) REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0, HDMI_GENERIC0_CONT, 0, HDMI_GENERIC0_LINE, 0, HDMI_GENERIC0_SEND, 0, HDMI_GENERIC1_CONT, 0, HDMI_GENERIC1_LINE, 0, HDMI_GENERIC1_SEND, 0); } static void dce110_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); uint32_t value = 0; if (info_frame->vsc.valid) dce110_update_generic_info_packet( enc110, 0, /* packetIndex */ &info_frame->vsc); if (info_frame->spd.valid) dce110_update_generic_info_packet( enc110, 2, /* packetIndex */ &info_frame->spd); if (info_frame->hdrsmd.valid) dce110_update_generic_info_packet( enc110, 3, /* packetIndex */ &info_frame->hdrsmd); /* enable/disable transmission of packet(s). * If enabled, packet transmission begins on the next frame */ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); /* This bit is the master enable bit. * When enabling secondary stream engine, * this master bit must also be set. * This register shared with audio info frame. * Therefore we need to enable master bit * if at least on of the fields is not 0 */ value = REG_READ(DP_SEC_CNTL); if (value) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } static void dce110_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc) { /* stop generic packets on DP */ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); uint32_t value = 0; if (enc110->se_mask->DP_SEC_AVI_ENABLE) { REG_SET_7(DP_SEC_CNTL, 0, DP_SEC_GSP0_ENABLE, 0, DP_SEC_GSP1_ENABLE, 0, DP_SEC_GSP2_ENABLE, 0, DP_SEC_GSP3_ENABLE, 0, DP_SEC_AVI_ENABLE, 0, DP_SEC_MPG_ENABLE, 0, DP_SEC_STREAM_ENABLE, 0); } /* this register shared with audio info frame. * therefore we need to keep master enabled * if at least one of the fields is not 0 */ value = REG_READ(DP_SEC_CNTL); if (value) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } static void dce110_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); uint32_t reg1 = 0; uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; /* Note: For CZ, we are changing driver default to disable * stream deferred to next VBLANK. If results are positive, we * will make the same change to all DCE versions. There are a * handful of panels that cannot handle disable stream at * HBLANK and will result in a white line flash across the * screen on stream disable. */ REG_GET(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, &reg1); if ((reg1 & 0x1) == 0) /*stream not enabled*/ return; /* Specify the video stream disable point * (2 = start of the next vertical blank) */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2); /* Larger delay to wait until VBLANK - use max retry of * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode + * a little more because we may not trust delay accuracy. */ max_retries = DP_BLANK_MAX_RETRY * 150; /* disable DP stream */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); /* the encoder stops sending the video stream * at the start of the vertical blanking. * Poll for DP_VID_STREAM_STATUS == 0 */ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, max_retries); /* Tell the DP encoder to ignore timing from CRTC, must be done after * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is * complete, stream status will be stuck in video stream enabled state, * i.e. DP_VID_STREAM_STATUS stuck at 1. */ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); } /* output video stream to link encoder */ static void dce110_stream_encoder_dp_unblank( struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) { uint32_t n_vid = 0x8000; uint32_t m_vid; /* M / N = Fstream / Flink * m_vid / n_vid = pixel rate / link rate */ uint64_t m_vid_l = n_vid; m_vid_l *= param->timing.pix_clk_100hz / 10; m_vid_l = div_u64(m_vid_l, param->link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ); m_vid = (uint32_t) m_vid_l; /* enable auto measurement */ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0); /* auto measurement need 1 full 0x8000 symbol cycle to kick in, * therefore program initial value for Mvid and Nvid */ REG_UPDATE(DP_VID_N, DP_VID_N, n_vid); REG_UPDATE(DP_VID_M, DP_VID_M, m_vid); REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1); } /* set DIG_START to 0x1 to resync FIFO */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); /* switch DP encoder to CRTC data */ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); /* wait 100us for DIG/DP logic to prime * (i.e. a few video lines) */ udelay(100); /* the hardware would start sending video at the start of the next DP * frame (i.e. rising edge of the vblank). * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this * register has no effect on enable transition! HW always guarantees * VID_STREAM enable at start of next frame, and this is not * programmable */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); } static void dce110_stream_encoder_set_avmute( struct stream_encoder *enc, bool enable) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); unsigned int value = enable ? 1 : 0; REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value); } static void dce110_reset_hdmi_stream_attribute( struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) REG_UPDATE_5(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, 1, HDMI_KEEPOUT_MODE, 1, HDMI_DEEP_COLOR_ENABLE, 0, HDMI_DATA_SCRAMBLE_EN, 0, HDMI_CLOCK_CHANNEL_RATE, 0); else REG_UPDATE_3(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, 1, HDMI_KEEPOUT_MODE, 1, HDMI_DEEP_COLOR_ENABLE, 0); } #define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000 #define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1 #include "include/audio_types.h" /* 25.2MHz/1.001*/ /* 25.2MHz/1.001*/ /* 25.2MHz*/ /* 27MHz */ /* 27MHz*1.001*/ /* 27MHz*1.001*/ /* 54MHz*/ /* 54MHz*1.001*/ /* 74.25MHz/1.001*/ /* 74.25MHz*/ /* 148.5MHz/1.001*/ /* 148.5MHz*/ static const struct audio_clock_info audio_clock_info_table[16] = { {2517, 4576, 28125, 7007, 31250, 6864, 28125}, {2518, 4576, 28125, 7007, 31250, 6864, 28125}, {2520, 4096, 25200, 6272, 28000, 6144, 25200}, {2700, 4096, 27000, 6272, 30000, 6144, 27000}, {2702, 4096, 27027, 6272, 30030, 6144, 27027}, {2703, 4096, 27027, 6272, 30030, 6144, 27027}, {5400, 4096, 54000, 6272, 60000, 6144, 54000}, {5405, 4096, 54054, 6272, 60060, 6144, 54054}, {7417, 11648, 210937, 17836, 234375, 11648, 140625}, {7425, 4096, 74250, 6272, 82500, 6144, 74250}, {14835, 11648, 421875, 8918, 234375, 5824, 140625}, {14850, 4096, 148500, 6272, 165000, 6144, 148500}, {29670, 5824, 421875, 4459, 234375, 5824, 281250}, {29700, 3072, 222750, 4704, 247500, 5120, 247500}, {59340, 5824, 843750, 8918, 937500, 5824, 562500}, {59400, 3072, 445500, 9408, 990000, 6144, 594000} }; static const struct audio_clock_info audio_clock_info_table_36bpc[14] = { {2517, 9152, 84375, 7007, 48875, 9152, 56250}, {2518, 9152, 84375, 7007, 48875, 9152, 56250}, {2520, 4096, 37800, 6272, 42000, 6144, 37800}, {2700, 4096, 40500, 6272, 45000, 6144, 40500}, {2702, 8192, 81081, 6272, 45045, 8192, 54054}, {2703, 8192, 81081, 6272, 45045, 8192, 54054}, {5400, 4096, 81000, 6272, 90000, 6144, 81000}, {5405, 4096, 81081, 6272, 90090, 6144, 81081}, {7417, 11648, 316406, 17836, 351562, 11648, 210937}, {7425, 4096, 111375, 6272, 123750, 6144, 111375}, {14835, 11648, 632812, 17836, 703125, 11648, 421875}, {14850, 4096, 222750, 6272, 247500, 6144, 222750}, {29670, 5824, 632812, 8918, 703125, 5824, 421875}, {29700, 4096, 445500, 4704, 371250, 5120, 371250} }; static const struct audio_clock_info audio_clock_info_table_48bpc[14] = { {2517, 4576, 56250, 7007, 62500, 6864, 56250}, {2518, 4576, 56250, 7007, 62500, 6864, 56250}, {2520, 4096, 50400, 6272, 56000, 6144, 50400}, {2700, 4096, 54000, 6272, 60000, 6144, 54000}, {2702, 4096, 54054, 6267, 60060, 8192, 54054}, {2703, 4096, 54054, 6272, 60060, 8192, 54054}, {5400, 4096, 108000, 6272, 120000, 6144, 108000}, {5405, 4096, 108108, 6272, 120120, 6144, 108108}, {7417, 11648, 421875, 17836, 468750, 11648, 281250}, {7425, 4096, 148500, 6272, 165000, 6144, 148500}, {14835, 11648, 843750, 8918, 468750, 11648, 281250}, {14850, 4096, 297000, 6272, 330000, 6144, 297000}, {29670, 5824, 843750, 4459, 468750, 5824, 562500}, {29700, 3072, 445500, 4704, 495000, 5120, 495000} }; static union audio_cea_channels speakers_to_channels( struct audio_speaker_flags speaker_flags) { union audio_cea_channels cea_channels = {0}; /* these are one to one */ cea_channels.channels.FL = speaker_flags.FL_FR; cea_channels.channels.FR = speaker_flags.FL_FR; cea_channels.channels.LFE = speaker_flags.LFE; cea_channels.channels.FC = speaker_flags.FC; /* if Rear Left and Right exist move RC speaker to channel 7 * otherwise to channel 5 */ if (speaker_flags.RL_RR) { cea_channels.channels.RL_RC = speaker_flags.RL_RR; cea_channels.channels.RR = speaker_flags.RL_RR; cea_channels.channels.RC_RLC_FLC = speaker_flags.RC; } else { cea_channels.channels.RL_RC = speaker_flags.RC; } /* FRONT Left Right Center and REAR Left Right Center are exclusive */ if (speaker_flags.FLC_FRC) { cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC; cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC; } else { cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC; cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC; } return cea_channels; } static uint32_t calc_max_audio_packets_per_line( const struct audio_crtc_info *crtc_info) { uint32_t max_packets_per_line; max_packets_per_line = crtc_info->h_total - crtc_info->h_active; if (crtc_info->pixel_repetition) max_packets_per_line *= crtc_info->pixel_repetition; /* for other hdmi features */ max_packets_per_line -= 58; /* for Control Period */ max_packets_per_line -= 16; /* Number of Audio Packets per Line */ max_packets_per_line /= 32; return max_packets_per_line; } static void get_audio_clock_info( enum dc_color_depth color_depth, uint32_t crtc_pixel_clock_100Hz, uint32_t actual_pixel_clock_100Hz, struct audio_clock_info *audio_clock_info) { const struct audio_clock_info *clock_info; uint32_t index; uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_100Hz / 100; uint32_t audio_array_size; switch (color_depth) { case COLOR_DEPTH_161616: clock_info = audio_clock_info_table_48bpc; audio_array_size = ARRAY_SIZE( audio_clock_info_table_48bpc); break; case COLOR_DEPTH_121212: clock_info = audio_clock_info_table_36bpc; audio_array_size = ARRAY_SIZE( audio_clock_info_table_36bpc); break; default: clock_info = audio_clock_info_table; audio_array_size = ARRAY_SIZE( audio_clock_info_table); break; } if (clock_info != NULL) { /* search for exact pixel clock in table */ for (index = 0; index < audio_array_size; index++) { if (clock_info[index].pixel_clock_in_10khz > crtc_pixel_clock_in_10khz) break; /* not match */ else if (clock_info[index].pixel_clock_in_10khz == crtc_pixel_clock_in_10khz) { /* match found */ *audio_clock_info = clock_info[index]; return; } } } /* not found */ if (actual_pixel_clock_100Hz == 0) actual_pixel_clock_100Hz = crtc_pixel_clock_100Hz; /* See HDMI spec the table entry under * pixel clock of "Other". */ audio_clock_info->pixel_clock_in_10khz = actual_pixel_clock_100Hz / 100; audio_clock_info->cts_32khz = actual_pixel_clock_100Hz / 10; audio_clock_info->cts_44khz = actual_pixel_clock_100Hz / 10; audio_clock_info->cts_48khz = actual_pixel_clock_100Hz / 10; audio_clock_info->n_32khz = 4096; audio_clock_info->n_44khz = 6272; audio_clock_info->n_48khz = 6144; } static void dce110_se_audio_setup( struct stream_encoder *enc, unsigned int az_inst, struct audio_info *audio_info) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); uint32_t channels = 0; ASSERT(audio_info); if (audio_info == NULL) /* This should not happen.it does so we don't get BSOD*/ return; channels = speakers_to_channels(audio_info->flags.speaker_flags).all; /* setup the audio stream source select (audio -> dig mapping) */ REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst); /* Channel allocation */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels); } static void dce110_se_setup_hdmi_audio( struct stream_encoder *enc, const struct audio_crtc_info *crtc_info) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); struct audio_clock_info audio_clock_info = {0}; uint32_t max_packets_per_line; /* For now still do calculation, although this field is ignored when above HDMI_PACKET_GEN_VERSION set to 1 */ max_packets_per_line = calc_max_audio_packets_per_line(crtc_info); /* HDMI_AUDIO_PACKET_CONTROL */ REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line, HDMI_AUDIO_DELAY_EN, 1); /* AFMT_AUDIO_PACKET_CONTROL */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); /* AFMT_AUDIO_PACKET_CONTROL2 */ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, 0, AFMT_60958_OSF_OVRD, 0); /* HDMI_ACR_PACKET_CONTROL */ REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1, HDMI_ACR_SOURCE, 0, HDMI_ACR_AUDIO_PRIORITY, 0); /* Program audio clock sample/regeneration parameters */ get_audio_clock_info(crtc_info->color_depth, crtc_info->requested_pixel_clock_100Hz, crtc_info->calculated_pixel_clock_100Hz, &audio_clock_info); DC_LOG_HW_AUDIO( "\n%s:Input::requested_pixel_clock_100Hz = %d" \ "calculated_pixel_clock_100Hz = %d \n", __func__, \ crtc_info->requested_pixel_clock_100Hz, \ crtc_info->calculated_pixel_clock_100Hz); /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz); /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */ REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz); /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */ REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz); /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */ REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz); /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */ REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz); /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */ REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz); /* Video driver cannot know in advance which sample rate will be used by HD Audio driver HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is programmed below in interruppt callback */ /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK & AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */ REG_UPDATE_2(AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1, AFMT_60958_CS_CLOCK_ACCURACY, 0); /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */ REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); /*AFMT_60958_2 now keep this settings until * Programming guide comes out*/ REG_UPDATE_6(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3, AFMT_60958_CS_CHANNEL_NUMBER_3, 4, AFMT_60958_CS_CHANNEL_NUMBER_4, 5, AFMT_60958_CS_CHANNEL_NUMBER_5, 6, AFMT_60958_CS_CHANNEL_NUMBER_6, 7, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); } static void dce110_se_setup_dp_audio( struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); /* --- DP Audio packet configurations --- */ /* ATP Configuration */ REG_SET(DP_SEC_AUD_N, 0, DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT); /* Async/auto-calc timestamp mode */ REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE, DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC); /* --- The following are the registers * copied from the SetupHDMI --- */ /* AFMT_AUDIO_PACKET_CONTROL */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); /* AFMT_AUDIO_PACKET_CONTROL2 */ /* Program the ATP and AIP next */ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, 0, AFMT_60958_OSF_OVRD, 0); /* AFMT_INFOFRAME_CONTROL0 */ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */ REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0); } static void dce110_se_enable_audio_clock( struct stream_encoder *enc, bool enable) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); if (REG(AFMT_CNTL) == 0) return; /* DCE8/10 does not have this register */ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable); /* wait for AFMT clock to turn on, * expectation: this should complete in 1-2 reads * * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10); * * TODO: wait for clock_on does not work well. May need HW * program sequence. But audio seems work normally even without wait * for clock_on status change */ } static void dce110_se_enable_dp_audio( struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); /* Enable Audio packets */ REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1); /* Program the ATP and AIP next */ REG_UPDATE_2(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1, DP_SEC_AIP_ENABLE, 1); /* Program STREAM_ENABLE after all the other enables. */ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } static void dce110_se_disable_dp_audio( struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); uint32_t value = 0; /* Disable Audio packets */ REG_UPDATE_5(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 0, DP_SEC_ATP_ENABLE, 0, DP_SEC_AIP_ENABLE, 0, DP_SEC_ACM_ENABLE, 0, DP_SEC_STREAM_ENABLE, 0); /* This register shared with encoder info frame. Therefore we need to keep master enabled if at least on of the fields is not 0 */ value = REG_READ(DP_SEC_CNTL); if (value != 0) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } void dce110_se_audio_mute_control( struct stream_encoder *enc, bool mute) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute); } void dce110_se_dp_audio_setup( struct stream_encoder *enc, unsigned int az_inst, struct audio_info *info) { dce110_se_audio_setup(enc, az_inst, info); } void dce110_se_dp_audio_enable( struct stream_encoder *enc) { dce110_se_enable_audio_clock(enc, true); dce110_se_setup_dp_audio(enc); dce110_se_enable_dp_audio(enc); } void dce110_se_dp_audio_disable( struct stream_encoder *enc) { dce110_se_disable_dp_audio(enc); dce110_se_enable_audio_clock(enc, false); } void dce110_se_hdmi_audio_setup( struct stream_encoder *enc, unsigned int az_inst, struct audio_info *info, struct audio_crtc_info *audio_crtc_info) { dce110_se_enable_audio_clock(enc, true); dce110_se_setup_hdmi_audio(enc, audio_crtc_info); dce110_se_audio_setup(enc, az_inst, info); } void dce110_se_hdmi_audio_disable( struct stream_encoder *enc) { dce110_se_enable_audio_clock(enc, false); } static void setup_stereo_sync( struct stream_encoder *enc, int tg_inst, bool enable) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst); REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable); } static void dig_connect_to_otg( struct stream_encoder *enc, int tg_inst) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); } static unsigned int dig_source_otg( struct stream_encoder *enc) { uint32_t tg_inst = 0; struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst); return tg_inst; } static const struct stream_encoder_funcs dce110_str_enc_funcs = { .dp_set_stream_attribute = dce110_stream_encoder_dp_set_stream_attribute, .hdmi_set_stream_attribute = dce110_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = dce110_stream_encoder_dvi_set_stream_attribute, .lvds_set_stream_attribute = dce110_stream_encoder_lvds_set_stream_attribute, .set_throttled_vcp_size = dce110_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = dce110_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = dce110_stream_encoder_stop_hdmi_info_packets, .update_dp_info_packets = dce110_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = dce110_stream_encoder_stop_dp_info_packets, .dp_blank = dce110_stream_encoder_dp_blank, .dp_unblank = dce110_stream_encoder_dp_unblank, .audio_mute_control = dce110_se_audio_mute_control, .dp_audio_setup = dce110_se_dp_audio_setup, .dp_audio_enable = dce110_se_dp_audio_enable, .dp_audio_disable = dce110_se_dp_audio_disable, .hdmi_audio_setup = dce110_se_hdmi_audio_setup, .hdmi_audio_disable = dce110_se_hdmi_audio_disable, .setup_stereo_sync = setup_stereo_sync, .set_avmute = dce110_stream_encoder_set_avmute, .dig_connect_to_otg = dig_connect_to_otg, .hdmi_reset_stream_attribute = dce110_reset_hdmi_stream_attribute, .dig_source_otg = dig_source_otg, }; void dce110_stream_encoder_construct( struct dce110_stream_encoder *enc110, struct dc_context *ctx, struct dc_bios *bp, enum engine_id eng_id, const struct dce110_stream_enc_registers *regs, const struct dce_stream_encoder_shift *se_shift, const struct dce_stream_encoder_mask *se_mask) { enc110->base.funcs = &dce110_str_enc_funcs; enc110->base.ctx = ctx; enc110->base.id = eng_id; enc110->base.bp = bp; enc110->regs = regs; enc110->se_shift = se_shift; enc110->se_mask = se_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce_i2c.h" #include "reg_helper.h" bool dce_i2c_oem_device_present( struct resource_pool *pool, struct ddc_service *ddc, size_t slave_address ) { struct dc *dc = ddc->ctx->dc; struct dc_bios *dcb = dc->ctx->dc_bios; struct graphics_object_id id = {0}; struct graphics_object_i2c_info i2c_info; if (!dc->ctx->dc_bios->fw_info.oem_i2c_present) return false; id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; id.enum_id = 0; id.type = OBJECT_TYPE_GENERIC; if (dcb->funcs->get_i2c_info(dcb, id, &i2c_info) != BP_RESULT_OK) return false; if (i2c_info.i2c_slave_address != slave_address) return false; return true; } bool dce_i2c_submit_command( struct resource_pool *pool, struct ddc *ddc, struct i2c_command *cmd) { struct dce_i2c_hw *dce_i2c_hw; struct dce_i2c_sw dce_i2c_sw = {0}; if (!ddc) { BREAK_TO_DEBUGGER(); return false; } if (!cmd) { BREAK_TO_DEBUGGER(); return false; } dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); if (dce_i2c_hw) return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw); dce_i2c_sw.ctx = ddc->ctx; if (dce_i2c_engine_acquire_sw(&dce_i2c_sw, ddc)) { return dce_i2c_submit_command_sw(pool, ddc, cmd, &dce_i2c_sw); } return false; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
/* * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "transform.h" //========================================= // <num_taps> = 2 // <num_phases> = 16 // <scale_ratio> = 0.833333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = s1.10 // <CoefOut> = s1.12 //========================================= static const uint16_t filter_2tap_16p[18] = { 0x1000, 0x0000, 0x0FF0, 0x0010, 0x0FB0, 0x0050, 0x0F34, 0x00CC, 0x0E68, 0x0198, 0x0D44, 0x02BC, 0x0BC4, 0x043C, 0x09FC, 0x0604, 0x0800, 0x0800 }; //========================================= // <num_taps> = 3 // <num_phases> = 16 // <scale_ratio> = 0.83333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_3tap_16p_upscale[27] = { 0x0804, 0x07FC, 0x0000, 0x06AC, 0x0978, 0x3FDC, 0x055C, 0x0AF0, 0x3FB4, 0x0420, 0x0C50, 0x3F90, 0x0300, 0x0D88, 0x3F78, 0x0200, 0x0E90, 0x3F70, 0x0128, 0x0F5C, 0x3F7C, 0x007C, 0x0FD8, 0x3FAC, 0x0000, 0x1000, 0x0000 }; //========================================= // <num_taps> = 3 // <num_phases> = 16 // <scale_ratio> = 1.16666 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_3tap_16p_116[27] = { 0x0804, 0x07FC, 0x0000, 0x0700, 0x0914, 0x3FEC, 0x0604, 0x0A1C, 0x3FE0, 0x050C, 0x0B14, 0x3FE0, 0x041C, 0x0BF4, 0x3FF0, 0x0340, 0x0CB0, 0x0010, 0x0274, 0x0D3C, 0x0050, 0x01C0, 0x0D94, 0x00AC, 0x0128, 0x0DB4, 0x0124 }; //========================================= // <num_taps> = 3 // <num_phases> = 16 // <scale_ratio> = 1.49999 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_3tap_16p_149[27] = { 0x0804, 0x07FC, 0x0000, 0x0730, 0x08CC, 0x0004, 0x0660, 0x098C, 0x0014, 0x0590, 0x0A3C, 0x0034, 0x04C4, 0x0AD4, 0x0068, 0x0400, 0x0B54, 0x00AC, 0x0348, 0x0BB0, 0x0108, 0x029C, 0x0BEC, 0x0178, 0x0200, 0x0C00, 0x0200 }; //========================================= // <num_taps> = 3 // <num_phases> = 16 // <scale_ratio> = 1.83332 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_3tap_16p_183[27] = { 0x0804, 0x07FC, 0x0000, 0x0754, 0x0880, 0x002C, 0x06A8, 0x08F0, 0x0068, 0x05FC, 0x0954, 0x00B0, 0x0550, 0x09AC, 0x0104, 0x04A8, 0x09F0, 0x0168, 0x0408, 0x0A20, 0x01D8, 0x036C, 0x0A40, 0x0254, 0x02DC, 0x0A48, 0x02DC }; //========================================= // <num_taps> = 4 // <num_phases> = 16 // <scale_ratio> = 0.83333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_4tap_16p_upscale[36] = { 0x0000, 0x1000, 0x0000, 0x0000, 0x3F74, 0x0FDC, 0x00B4, 0x3FFC, 0x3F0C, 0x0F70, 0x0194, 0x3FF0, 0x3ECC, 0x0EC4, 0x0298, 0x3FD8, 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8, 0x3EA4, 0x0CD8, 0x04F4, 0x3F90, 0x3EB8, 0x0BA0, 0x0644, 0x3F64, 0x3ED8, 0x0A54, 0x07A0, 0x3F34, 0x3F00, 0x08FC, 0x0900, 0x3F04 }; //========================================= // <num_taps> = 4 // <num_phases> = 16 // <scale_ratio> = 1.16666 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_4tap_16p_116[36] = { 0x01A8, 0x0CB4, 0x01A4, 0x0000, 0x0110, 0x0CB0, 0x0254, 0x3FEC, 0x0090, 0x0C80, 0x031C, 0x3FD4, 0x0024, 0x0C2C, 0x03F4, 0x3FBC, 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0, 0x3F9C, 0x0B14, 0x05CC, 0x3F84, 0x3F70, 0x0A60, 0x06C4, 0x3F6C, 0x3F5C, 0x098C, 0x07BC, 0x3F5C, 0x3F54, 0x08AC, 0x08AC, 0x3F54 }; //========================================= // <num_taps> = 4 // <num_phases> = 16 // <scale_ratio> = 1.49999 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_4tap_16p_149[36] = { 0x02B8, 0x0A90, 0x02B8, 0x0000, 0x0230, 0x0A90, 0x0350, 0x3FF0, 0x01B8, 0x0A78, 0x03F0, 0x3FE0, 0x0148, 0x0A48, 0x049C, 0x3FD4, 0x00E8, 0x0A00, 0x054C, 0x3FCC, 0x0098, 0x09A0, 0x0600, 0x3FC8, 0x0054, 0x0928, 0x06B4, 0x3FD0, 0x001C, 0x08A4, 0x0760, 0x3FE0, 0x3FFC, 0x0804, 0x0804, 0x3FFC }; //========================================= // <num_taps> = 4 // <num_phases> = 16 // <scale_ratio> = 1.83332 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_4tap_16p_183[36] = { 0x03B0, 0x08A0, 0x03B0, 0x0000, 0x0348, 0x0898, 0x041C, 0x0004, 0x02DC, 0x0884, 0x0490, 0x0010, 0x0278, 0x0864, 0x0500, 0x0024, 0x021C, 0x0838, 0x0570, 0x003C, 0x01C8, 0x07FC, 0x05E0, 0x005C, 0x0178, 0x07B8, 0x064C, 0x0084, 0x0130, 0x076C, 0x06B0, 0x00B4, 0x00F0, 0x0714, 0x0710, 0x00EC }; //========================================= // <num_taps> = 2 // <num_phases> = 64 // <scale_ratio> = 0.833333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = s1.10 // <CoefOut> = s1.12 //========================================= static const uint16_t filter_2tap_64p[66] = { 0x1000, 0x0000, 0x1000, 0x0000, 0x0FFC, 0x0004, 0x0FF8, 0x0008, 0x0FF0, 0x0010, 0x0FE4, 0x001C, 0x0FD8, 0x0028, 0x0FC4, 0x003C, 0x0FB0, 0x0050, 0x0F98, 0x0068, 0x0F7C, 0x0084, 0x0F58, 0x00A8, 0x0F34, 0x00CC, 0x0F08, 0x00F8, 0x0ED8, 0x0128, 0x0EA4, 0x015C, 0x0E68, 0x0198, 0x0E28, 0x01D8, 0x0DE4, 0x021C, 0x0D98, 0x0268, 0x0D44, 0x02BC, 0x0CEC, 0x0314, 0x0C90, 0x0370, 0x0C2C, 0x03D4, 0x0BC4, 0x043C, 0x0B58, 0x04A8, 0x0AE8, 0x0518, 0x0A74, 0x058C, 0x09FC, 0x0604, 0x0980, 0x0680, 0x0900, 0x0700, 0x0880, 0x0780, 0x0800, 0x0800 }; //========================================= // <num_taps> = 3 // <num_phases> = 64 // <scale_ratio> = 0.83333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_3tap_64p_upscale[99] = { 0x0804, 0x07FC, 0x0000, 0x07A8, 0x0860, 0x3FF8, 0x0754, 0x08BC, 0x3FF0, 0x0700, 0x0918, 0x3FE8, 0x06AC, 0x0978, 0x3FDC, 0x0654, 0x09D8, 0x3FD4, 0x0604, 0x0A34, 0x3FC8, 0x05B0, 0x0A90, 0x3FC0, 0x055C, 0x0AF0, 0x3FB4, 0x050C, 0x0B48, 0x3FAC, 0x04BC, 0x0BA0, 0x3FA4, 0x0470, 0x0BF4, 0x3F9C, 0x0420, 0x0C50, 0x3F90, 0x03D8, 0x0C9C, 0x3F8C, 0x038C, 0x0CF0, 0x3F84, 0x0344, 0x0D40, 0x3F7C, 0x0300, 0x0D88, 0x3F78, 0x02BC, 0x0DD0, 0x3F74, 0x027C, 0x0E14, 0x3F70, 0x023C, 0x0E54, 0x3F70, 0x0200, 0x0E90, 0x3F70, 0x01C8, 0x0EC8, 0x3F70, 0x0190, 0x0EFC, 0x3F74, 0x015C, 0x0F2C, 0x3F78, 0x0128, 0x0F5C, 0x3F7C, 0x00FC, 0x0F7C, 0x3F88, 0x00CC, 0x0FA4, 0x3F90, 0x00A4, 0x0FC0, 0x3F9C, 0x007C, 0x0FD8, 0x3FAC, 0x0058, 0x0FE8, 0x3FC0, 0x0038, 0x0FF4, 0x3FD4, 0x0018, 0x1000, 0x3FE8, 0x0000, 0x1000, 0x0000 }; //========================================= // <num_taps> = 3 // <num_phases> = 64 // <scale_ratio> = 1.16666 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_3tap_64p_116[99] = { 0x0804, 0x07FC, 0x0000, 0x07C0, 0x0844, 0x3FFC, 0x0780, 0x0888, 0x3FF8, 0x0740, 0x08D0, 0x3FF0, 0x0700, 0x0914, 0x3FEC, 0x06C0, 0x0958, 0x3FE8, 0x0684, 0x0998, 0x3FE4, 0x0644, 0x09DC, 0x3FE0, 0x0604, 0x0A1C, 0x3FE0, 0x05C4, 0x0A5C, 0x3FE0, 0x0588, 0x0A9C, 0x3FDC, 0x0548, 0x0ADC, 0x3FDC, 0x050C, 0x0B14, 0x3FE0, 0x04CC, 0x0B54, 0x3FE0, 0x0490, 0x0B8C, 0x3FE4, 0x0458, 0x0BC0, 0x3FE8, 0x041C, 0x0BF4, 0x3FF0, 0x03E0, 0x0C28, 0x3FF8, 0x03A8, 0x0C58, 0x0000, 0x0374, 0x0C88, 0x0004, 0x0340, 0x0CB0, 0x0010, 0x0308, 0x0CD8, 0x0020, 0x02D8, 0x0CFC, 0x002C, 0x02A0, 0x0D20, 0x0040, 0x0274, 0x0D3C, 0x0050, 0x0244, 0x0D58, 0x0064, 0x0214, 0x0D70, 0x007C, 0x01E8, 0x0D84, 0x0094, 0x01C0, 0x0D94, 0x00AC, 0x0198, 0x0DA0, 0x00C8, 0x0170, 0x0DAC, 0x00E4, 0x014C, 0x0DB0, 0x0104, 0x0128, 0x0DB4, 0x0124 }; //========================================= // <num_taps> = 3 // <num_phases> = 64 // <scale_ratio> = 1.49999 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_3tap_64p_149[99] = { 0x0804, 0x07FC, 0x0000, 0x07CC, 0x0834, 0x0000, 0x0798, 0x0868, 0x0000, 0x0764, 0x089C, 0x0000, 0x0730, 0x08CC, 0x0004, 0x0700, 0x08FC, 0x0004, 0x06CC, 0x092C, 0x0008, 0x0698, 0x095C, 0x000C, 0x0660, 0x098C, 0x0014, 0x062C, 0x09B8, 0x001C, 0x05FC, 0x09E4, 0x0020, 0x05C4, 0x0A10, 0x002C, 0x0590, 0x0A3C, 0x0034, 0x055C, 0x0A64, 0x0040, 0x0528, 0x0A8C, 0x004C, 0x04F8, 0x0AB0, 0x0058, 0x04C4, 0x0AD4, 0x0068, 0x0490, 0x0AF8, 0x0078, 0x0460, 0x0B18, 0x0088, 0x0430, 0x0B38, 0x0098, 0x0400, 0x0B54, 0x00AC, 0x03D0, 0x0B6C, 0x00C4, 0x03A0, 0x0B88, 0x00D8, 0x0374, 0x0B9C, 0x00F0, 0x0348, 0x0BB0, 0x0108, 0x0318, 0x0BC4, 0x0124, 0x02EC, 0x0BD4, 0x0140, 0x02C4, 0x0BE0, 0x015C, 0x029C, 0x0BEC, 0x0178, 0x0274, 0x0BF4, 0x0198, 0x024C, 0x0BFC, 0x01B8, 0x0228, 0x0BFC, 0x01DC, 0x0200, 0x0C00, 0x0200 }; //========================================= // <num_taps> = 3 // <num_phases> = 64 // <scale_ratio> = 1.83332 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_3tap_64p_183[99] = { 0x0804, 0x07FC, 0x0000, 0x07D4, 0x0824, 0x0008, 0x07AC, 0x0840, 0x0014, 0x0780, 0x0860, 0x0020, 0x0754, 0x0880, 0x002C, 0x0728, 0x089C, 0x003C, 0x0700, 0x08B8, 0x0048, 0x06D4, 0x08D4, 0x0058, 0x06A8, 0x08F0, 0x0068, 0x067C, 0x090C, 0x0078, 0x0650, 0x0924, 0x008C, 0x0628, 0x093C, 0x009C, 0x05FC, 0x0954, 0x00B0, 0x05D0, 0x096C, 0x00C4, 0x05A8, 0x0980, 0x00D8, 0x0578, 0x0998, 0x00F0, 0x0550, 0x09AC, 0x0104, 0x0528, 0x09BC, 0x011C, 0x04FC, 0x09D0, 0x0134, 0x04D4, 0x09E0, 0x014C, 0x04A8, 0x09F0, 0x0168, 0x0480, 0x09FC, 0x0184, 0x045C, 0x0A08, 0x019C, 0x0434, 0x0A14, 0x01B8, 0x0408, 0x0A20, 0x01D8, 0x03E0, 0x0A2C, 0x01F4, 0x03B8, 0x0A34, 0x0214, 0x0394, 0x0A38, 0x0234, 0x036C, 0x0A40, 0x0254, 0x0348, 0x0A44, 0x0274, 0x0324, 0x0A48, 0x0294, 0x0300, 0x0A48, 0x02B8, 0x02DC, 0x0A48, 0x02DC }; //========================================= // <num_taps> = 4 // <num_phases> = 64 // <scale_ratio> = 0.83333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_4tap_64p_upscale[132] = { 0x0000, 0x1000, 0x0000, 0x0000, 0x3FDC, 0x0FFC, 0x0028, 0x0000, 0x3FB4, 0x0FF8, 0x0054, 0x0000, 0x3F94, 0x0FE8, 0x0084, 0x0000, 0x3F74, 0x0FDC, 0x00B4, 0x3FFC, 0x3F58, 0x0FC4, 0x00E8, 0x3FFC, 0x3F3C, 0x0FAC, 0x0120, 0x3FF8, 0x3F24, 0x0F90, 0x0158, 0x3FF4, 0x3F0C, 0x0F70, 0x0194, 0x3FF0, 0x3EF8, 0x0F4C, 0x01D0, 0x3FEC, 0x3EE8, 0x0F20, 0x0210, 0x3FE8, 0x3ED8, 0x0EF4, 0x0254, 0x3FE0, 0x3ECC, 0x0EC4, 0x0298, 0x3FD8, 0x3EC0, 0x0E90, 0x02DC, 0x3FD4, 0x3EB8, 0x0E58, 0x0324, 0x3FCC, 0x3EB0, 0x0E20, 0x036C, 0x3FC4, 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8, 0x3EA8, 0x0DA4, 0x0404, 0x3FB0, 0x3EA4, 0x0D60, 0x0454, 0x3FA8, 0x3EA4, 0x0D1C, 0x04A4, 0x3F9C, 0x3EA4, 0x0CD8, 0x04F4, 0x3F90, 0x3EA8, 0x0C88, 0x0548, 0x3F88, 0x3EAC, 0x0C3C, 0x059C, 0x3F7C, 0x3EB0, 0x0BF0, 0x05F0, 0x3F70, 0x3EB8, 0x0BA0, 0x0644, 0x3F64, 0x3EBC, 0x0B54, 0x0698, 0x3F58, 0x3EC4, 0x0B00, 0x06F0, 0x3F4C, 0x3ECC, 0x0AAC, 0x0748, 0x3F40, 0x3ED8, 0x0A54, 0x07A0, 0x3F34, 0x3EE0, 0x0A04, 0x07F8, 0x3F24, 0x3EEC, 0x09AC, 0x0850, 0x3F18, 0x3EF8, 0x0954, 0x08A8, 0x3F0C, 0x3F00, 0x08FC, 0x0900, 0x3F04 }; //========================================= // <num_taps> = 4 // <num_phases> = 64 // <scale_ratio> = 1.16666 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_4tap_64p_116[132] = { 0x01A8, 0x0CB4, 0x01A4, 0x0000, 0x017C, 0x0CB8, 0x01D0, 0x3FFC, 0x0158, 0x0CB8, 0x01F8, 0x3FF8, 0x0130, 0x0CB4, 0x0228, 0x3FF4, 0x0110, 0x0CB0, 0x0254, 0x3FEC, 0x00EC, 0x0CA8, 0x0284, 0x3FE8, 0x00CC, 0x0C9C, 0x02B4, 0x3FE4, 0x00AC, 0x0C90, 0x02E8, 0x3FDC, 0x0090, 0x0C80, 0x031C, 0x3FD4, 0x0070, 0x0C70, 0x0350, 0x3FD0, 0x0058, 0x0C5C, 0x0384, 0x3FC8, 0x003C, 0x0C48, 0x03BC, 0x3FC0, 0x0024, 0x0C2C, 0x03F4, 0x3FBC, 0x0010, 0x0C10, 0x042C, 0x3FB4, 0x3FFC, 0x0BF4, 0x0464, 0x3FAC, 0x3FE8, 0x0BD4, 0x04A0, 0x3FA4, 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0, 0x3FC4, 0x0B8C, 0x0518, 0x3F98, 0x3FB4, 0x0B68, 0x0554, 0x3F90, 0x3FA8, 0x0B40, 0x0590, 0x3F88, 0x3F9C, 0x0B14, 0x05CC, 0x3F84, 0x3F90, 0x0AEC, 0x0608, 0x3F7C, 0x3F84, 0x0ABC, 0x0648, 0x3F78, 0x3F7C, 0x0A90, 0x0684, 0x3F70, 0x3F70, 0x0A60, 0x06C4, 0x3F6C, 0x3F6C, 0x0A2C, 0x0700, 0x3F68, 0x3F64, 0x09F8, 0x0740, 0x3F64, 0x3F60, 0x09C4, 0x077C, 0x3F60, 0x3F5C, 0x098C, 0x07BC, 0x3F5C, 0x3F58, 0x0958, 0x07F8, 0x3F58, 0x3F58, 0x091C, 0x0834, 0x3F58, 0x3F54, 0x08E4, 0x0870, 0x3F58, 0x3F54, 0x08AC, 0x08AC, 0x3F54 }; //========================================= // <num_taps> = 4 // <num_phases> = 64 // <scale_ratio> = 1.49999 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_4tap_64p_149[132] = { 0x02B8, 0x0A90, 0x02B8, 0x0000, 0x0294, 0x0A94, 0x02DC, 0x3FFC, 0x0274, 0x0A94, 0x0300, 0x3FF8, 0x0250, 0x0A94, 0x0328, 0x3FF4, 0x0230, 0x0A90, 0x0350, 0x3FF0, 0x0214, 0x0A8C, 0x0374, 0x3FEC, 0x01F0, 0x0A88, 0x03A0, 0x3FE8, 0x01D4, 0x0A80, 0x03C8, 0x3FE4, 0x01B8, 0x0A78, 0x03F0, 0x3FE0, 0x0198, 0x0A70, 0x041C, 0x3FDC, 0x0180, 0x0A64, 0x0444, 0x3FD8, 0x0164, 0x0A54, 0x0470, 0x3FD8, 0x0148, 0x0A48, 0x049C, 0x3FD4, 0x0130, 0x0A38, 0x04C8, 0x3FD0, 0x0118, 0x0A24, 0x04F4, 0x3FD0, 0x0100, 0x0A14, 0x0520, 0x3FCC, 0x00E8, 0x0A00, 0x054C, 0x3FCC, 0x00D4, 0x09E8, 0x057C, 0x3FC8, 0x00C0, 0x09D0, 0x05A8, 0x3FC8, 0x00AC, 0x09B8, 0x05D4, 0x3FC8, 0x0098, 0x09A0, 0x0600, 0x3FC8, 0x0084, 0x0984, 0x0630, 0x3FC8, 0x0074, 0x0964, 0x065C, 0x3FCC, 0x0064, 0x0948, 0x0688, 0x3FCC, 0x0054, 0x0928, 0x06B4, 0x3FD0, 0x0044, 0x0908, 0x06E0, 0x3FD4, 0x0038, 0x08E8, 0x070C, 0x3FD4, 0x002C, 0x08C4, 0x0738, 0x3FD8, 0x001C, 0x08A4, 0x0760, 0x3FE0, 0x0014, 0x087C, 0x078C, 0x3FE4, 0x0008, 0x0858, 0x07B4, 0x3FEC, 0x0000, 0x0830, 0x07DC, 0x3FF4, 0x3FFC, 0x0804, 0x0804, 0x3FFC }; //========================================= // <num_taps> = 4 // <num_phases> = 64 // <scale_ratio> = 1.83332 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_4tap_64p_183[132] = { 0x03B0, 0x08A0, 0x03B0, 0x0000, 0x0394, 0x08A0, 0x03CC, 0x0000, 0x037C, 0x089C, 0x03E8, 0x0000, 0x0360, 0x089C, 0x0400, 0x0004, 0x0348, 0x0898, 0x041C, 0x0004, 0x032C, 0x0894, 0x0438, 0x0008, 0x0310, 0x0890, 0x0454, 0x000C, 0x02F8, 0x0888, 0x0474, 0x000C, 0x02DC, 0x0884, 0x0490, 0x0010, 0x02C4, 0x087C, 0x04AC, 0x0014, 0x02AC, 0x0874, 0x04C8, 0x0018, 0x0290, 0x086C, 0x04E4, 0x0020, 0x0278, 0x0864, 0x0500, 0x0024, 0x0264, 0x0858, 0x051C, 0x0028, 0x024C, 0x084C, 0x0538, 0x0030, 0x0234, 0x0844, 0x0554, 0x0034, 0x021C, 0x0838, 0x0570, 0x003C, 0x0208, 0x0828, 0x058C, 0x0044, 0x01F0, 0x081C, 0x05A8, 0x004C, 0x01DC, 0x080C, 0x05C4, 0x0054, 0x01C8, 0x07FC, 0x05E0, 0x005C, 0x01B4, 0x07EC, 0x05FC, 0x0064, 0x019C, 0x07DC, 0x0618, 0x0070, 0x018C, 0x07CC, 0x0630, 0x0078, 0x0178, 0x07B8, 0x064C, 0x0084, 0x0164, 0x07A8, 0x0664, 0x0090, 0x0150, 0x0794, 0x0680, 0x009C, 0x0140, 0x0780, 0x0698, 0x00A8, 0x0130, 0x076C, 0x06B0, 0x00B4, 0x0120, 0x0758, 0x06C8, 0x00C0, 0x0110, 0x0740, 0x06E0, 0x00D0, 0x0100, 0x072C, 0x06F8, 0x00DC, 0x00F0, 0x0714, 0x0710, 0x00EC }; //========================================= // <num_taps> = 5 // <num_phases> = 64 // <scale_ratio> = 0.83333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_5tap_64p_upscale[165] = { 0x3E40, 0x09C0, 0x09C0, 0x3E40, 0x0000, 0x3E50, 0x0964, 0x0A18, 0x3E34, 0x0000, 0x3E5C, 0x0908, 0x0A6C, 0x3E2C, 0x0004, 0x3E6C, 0x08AC, 0x0AC0, 0x3E20, 0x0008, 0x3E78, 0x0850, 0x0B14, 0x3E18, 0x000C, 0x3E88, 0x07F4, 0x0B60, 0x3E14, 0x0010, 0x3E98, 0x0798, 0x0BB0, 0x3E0C, 0x0014, 0x3EA8, 0x073C, 0x0C00, 0x3E08, 0x0014, 0x3EB8, 0x06E4, 0x0C48, 0x3E04, 0x0018, 0x3ECC, 0x0684, 0x0C90, 0x3E04, 0x001C, 0x3EDC, 0x062C, 0x0CD4, 0x3E04, 0x0020, 0x3EEC, 0x05D4, 0x0D1C, 0x3E04, 0x0020, 0x3EFC, 0x057C, 0x0D5C, 0x3E08, 0x0024, 0x3F0C, 0x0524, 0x0D98, 0x3E10, 0x0028, 0x3F20, 0x04CC, 0x0DD8, 0x3E14, 0x0028, 0x3F30, 0x0478, 0x0E14, 0x3E1C, 0x0028, 0x3F40, 0x0424, 0x0E48, 0x3E28, 0x002C, 0x3F50, 0x03D4, 0x0E7C, 0x3E34, 0x002C, 0x3F60, 0x0384, 0x0EAC, 0x3E44, 0x002C, 0x3F6C, 0x0338, 0x0EDC, 0x3E54, 0x002C, 0x3F7C, 0x02E8, 0x0F08, 0x3E68, 0x002C, 0x3F8C, 0x02A0, 0x0F2C, 0x3E7C, 0x002C, 0x3F98, 0x0258, 0x0F50, 0x3E94, 0x002C, 0x3FA4, 0x0210, 0x0F74, 0x3EB0, 0x0028, 0x3FB0, 0x01CC, 0x0F90, 0x3ECC, 0x0028, 0x3FC0, 0x018C, 0x0FA8, 0x3EE8, 0x0024, 0x3FC8, 0x014C, 0x0FC0, 0x3F0C, 0x0020, 0x3FD4, 0x0110, 0x0FD4, 0x3F2C, 0x001C, 0x3FE0, 0x00D4, 0x0FE0, 0x3F54, 0x0018, 0x3FE8, 0x009C, 0x0FF0, 0x3F7C, 0x0010, 0x3FF0, 0x0064, 0x0FFC, 0x3FA4, 0x000C, 0x3FFC, 0x0030, 0x0FFC, 0x3FD4, 0x0004, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000 }; //========================================= // <num_taps> = 5 // <num_phases> = 64 // <scale_ratio> = 1.16666 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_5tap_64p_116[165] = { 0x3EDC, 0x0924, 0x0924, 0x3EDC, 0x0000, 0x3ED8, 0x08EC, 0x095C, 0x3EE0, 0x0000, 0x3ED4, 0x08B0, 0x0994, 0x3EE8, 0x0000, 0x3ED0, 0x0878, 0x09C8, 0x3EF0, 0x0000, 0x3ED0, 0x083C, 0x09FC, 0x3EF8, 0x0000, 0x3ED0, 0x0800, 0x0A2C, 0x3F04, 0x0000, 0x3ED0, 0x07C4, 0x0A5C, 0x3F10, 0x0000, 0x3ED0, 0x0788, 0x0A8C, 0x3F1C, 0x0000, 0x3ED0, 0x074C, 0x0AC0, 0x3F28, 0x3FFC, 0x3ED4, 0x0710, 0x0AE8, 0x3F38, 0x3FFC, 0x3ED8, 0x06D0, 0x0B18, 0x3F48, 0x3FF8, 0x3EDC, 0x0694, 0x0B3C, 0x3F5C, 0x3FF8, 0x3EE0, 0x0658, 0x0B68, 0x3F6C, 0x3FF4, 0x3EE4, 0x061C, 0x0B90, 0x3F80, 0x3FF0, 0x3EEC, 0x05DC, 0x0BB4, 0x3F98, 0x3FEC, 0x3EF0, 0x05A0, 0x0BD8, 0x3FB0, 0x3FE8, 0x3EF8, 0x0564, 0x0BF8, 0x3FC8, 0x3FE4, 0x3EFC, 0x0528, 0x0C1C, 0x3FE0, 0x3FE0, 0x3F04, 0x04EC, 0x0C38, 0x3FFC, 0x3FDC, 0x3F0C, 0x04B4, 0x0C54, 0x0014, 0x3FD8, 0x3F14, 0x047C, 0x0C70, 0x0030, 0x3FD0, 0x3F1C, 0x0440, 0x0C88, 0x0050, 0x3FCC, 0x3F24, 0x0408, 0x0CA0, 0x0070, 0x3FC4, 0x3F2C, 0x03D0, 0x0CB0, 0x0094, 0x3FC0, 0x3F34, 0x0398, 0x0CC4, 0x00B8, 0x3FB8, 0x3F3C, 0x0364, 0x0CD4, 0x00DC, 0x3FB0, 0x3F48, 0x032C, 0x0CE0, 0x0100, 0x3FAC, 0x3F50, 0x02F8, 0x0CEC, 0x0128, 0x3FA4, 0x3F58, 0x02C4, 0x0CF8, 0x0150, 0x3F9C, 0x3F60, 0x0290, 0x0D00, 0x017C, 0x3F94, 0x3F68, 0x0260, 0x0D04, 0x01A8, 0x3F8C, 0x3F74, 0x0230, 0x0D04, 0x01D4, 0x3F84, 0x3F7C, 0x0200, 0x0D08, 0x0200, 0x3F7C }; //========================================= // <num_taps> = 5 // <num_phases> = 64 // <scale_ratio> = 1.49999 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_5tap_64p_149[165] = { 0x3FF4, 0x080C, 0x080C, 0x3FF4, 0x0000, 0x3FE8, 0x07E8, 0x0830, 0x0000, 0x0000, 0x3FDC, 0x07C8, 0x0850, 0x0010, 0x3FFC, 0x3FD0, 0x07A4, 0x0878, 0x001C, 0x3FF8, 0x3FC4, 0x0780, 0x0898, 0x0030, 0x3FF4, 0x3FB8, 0x075C, 0x08B8, 0x0040, 0x3FF4, 0x3FB0, 0x0738, 0x08D8, 0x0050, 0x3FF0, 0x3FA8, 0x0710, 0x08F8, 0x0064, 0x3FEC, 0x3FA0, 0x06EC, 0x0914, 0x0078, 0x3FE8, 0x3F98, 0x06C4, 0x0934, 0x008C, 0x3FE4, 0x3F90, 0x06A0, 0x094C, 0x00A4, 0x3FE0, 0x3F8C, 0x0678, 0x0968, 0x00B8, 0x3FDC, 0x3F84, 0x0650, 0x0984, 0x00D0, 0x3FD8, 0x3F80, 0x0628, 0x099C, 0x00E8, 0x3FD4, 0x3F7C, 0x0600, 0x09B8, 0x0100, 0x3FCC, 0x3F78, 0x05D8, 0x09D0, 0x0118, 0x3FC8, 0x3F74, 0x05B0, 0x09E4, 0x0134, 0x3FC4, 0x3F70, 0x0588, 0x09F8, 0x0150, 0x3FC0, 0x3F70, 0x0560, 0x0A08, 0x016C, 0x3FBC, 0x3F6C, 0x0538, 0x0A20, 0x0188, 0x3FB4, 0x3F6C, 0x0510, 0x0A30, 0x01A4, 0x3FB0, 0x3F6C, 0x04E8, 0x0A3C, 0x01C4, 0x3FAC, 0x3F6C, 0x04C0, 0x0A48, 0x01E4, 0x3FA8, 0x3F6C, 0x0498, 0x0A58, 0x0200, 0x3FA4, 0x3F6C, 0x0470, 0x0A60, 0x0224, 0x3FA0, 0x3F6C, 0x0448, 0x0A70, 0x0244, 0x3F98, 0x3F70, 0x0420, 0x0A78, 0x0264, 0x3F94, 0x3F70, 0x03F8, 0x0A80, 0x0288, 0x3F90, 0x3F74, 0x03D4, 0x0A84, 0x02A8, 0x3F8C, 0x3F74, 0x03AC, 0x0A8C, 0x02CC, 0x3F88, 0x3F78, 0x0384, 0x0A90, 0x02F0, 0x3F84, 0x3F7C, 0x0360, 0x0A90, 0x0314, 0x3F80, 0x3F7C, 0x033C, 0x0A90, 0x033C, 0x3F7C }; //========================================= // <num_taps> = 5 // <num_phases> = 64 // <scale_ratio> = 1.83332 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_5tap_64p_183[165] = { 0x0168, 0x069C, 0x0698, 0x0164, 0x0000, 0x0154, 0x068C, 0x06AC, 0x0174, 0x0000, 0x0144, 0x0674, 0x06C0, 0x0188, 0x0000, 0x0138, 0x0664, 0x06D0, 0x0198, 0x3FFC, 0x0128, 0x0654, 0x06E0, 0x01A8, 0x3FFC, 0x0118, 0x0640, 0x06F0, 0x01BC, 0x3FFC, 0x010C, 0x0630, 0x0700, 0x01CC, 0x3FF8, 0x00FC, 0x061C, 0x0710, 0x01E0, 0x3FF8, 0x00F0, 0x060C, 0x071C, 0x01F0, 0x3FF8, 0x00E4, 0x05F4, 0x072C, 0x0204, 0x3FF8, 0x00D8, 0x05E4, 0x0738, 0x0218, 0x3FF4, 0x00CC, 0x05D0, 0x0744, 0x022C, 0x3FF4, 0x00C0, 0x05B8, 0x0754, 0x0240, 0x3FF4, 0x00B4, 0x05A4, 0x0760, 0x0254, 0x3FF4, 0x00A8, 0x0590, 0x076C, 0x0268, 0x3FF4, 0x009C, 0x057C, 0x0778, 0x027C, 0x3FF4, 0x0094, 0x0564, 0x0780, 0x0294, 0x3FF4, 0x0088, 0x0550, 0x0788, 0x02A8, 0x3FF8, 0x0080, 0x0538, 0x0794, 0x02BC, 0x3FF8, 0x0074, 0x0524, 0x079C, 0x02D4, 0x3FF8, 0x006C, 0x0510, 0x07A4, 0x02E8, 0x3FF8, 0x0064, 0x04F4, 0x07AC, 0x0300, 0x3FFC, 0x005C, 0x04E4, 0x07B0, 0x0314, 0x3FFC, 0x0054, 0x04C8, 0x07B8, 0x032C, 0x0000, 0x004C, 0x04B4, 0x07C0, 0x0340, 0x0000, 0x0044, 0x04A0, 0x07C4, 0x0358, 0x0000, 0x003C, 0x0488, 0x07C8, 0x0370, 0x0004, 0x0038, 0x0470, 0x07CC, 0x0384, 0x0008, 0x0030, 0x045C, 0x07D0, 0x039C, 0x0008, 0x002C, 0x0444, 0x07D0, 0x03B4, 0x000C, 0x0024, 0x042C, 0x07D4, 0x03CC, 0x0010, 0x0020, 0x0414, 0x07D4, 0x03E0, 0x0018, 0x001C, 0x03FC, 0x07D4, 0x03F8, 0x001C }; //========================================= // <num_taps> = 6 // <num_phases> = 64 // <scale_ratio> = 0.83333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_6tap_64p_upscale[198] = { 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x000C, 0x3FD0, 0x0FFC, 0x0034, 0x3FF4, 0x0000, 0x0018, 0x3F9C, 0x0FF8, 0x006C, 0x3FE8, 0x0000, 0x0024, 0x3F6C, 0x0FF0, 0x00A8, 0x3FD8, 0x0000, 0x002C, 0x3F44, 0x0FE4, 0x00E4, 0x3FC8, 0x0000, 0x0038, 0x3F18, 0x0FD4, 0x0124, 0x3FB8, 0x0000, 0x0040, 0x3EF0, 0x0FC0, 0x0164, 0x3FA8, 0x0004, 0x0048, 0x3EC8, 0x0FAC, 0x01A8, 0x3F98, 0x0004, 0x0050, 0x3EA8, 0x0F94, 0x01EC, 0x3F84, 0x0004, 0x0058, 0x3E84, 0x0F74, 0x0234, 0x3F74, 0x0008, 0x0060, 0x3E68, 0x0F54, 0x027C, 0x3F60, 0x0008, 0x0064, 0x3E4C, 0x0F30, 0x02C8, 0x3F4C, 0x000C, 0x006C, 0x3E30, 0x0F04, 0x0314, 0x3F3C, 0x0010, 0x0070, 0x3E18, 0x0EDC, 0x0360, 0x3F28, 0x0014, 0x0074, 0x3E04, 0x0EB0, 0x03B0, 0x3F14, 0x0014, 0x0078, 0x3DF0, 0x0E80, 0x0400, 0x3F00, 0x0018, 0x0078, 0x3DE0, 0x0E4C, 0x0454, 0x3EEC, 0x001C, 0x007C, 0x3DD0, 0x0E14, 0x04A8, 0x3ED8, 0x0020, 0x007C, 0x3DC4, 0x0DDC, 0x04FC, 0x3EC4, 0x0024, 0x007C, 0x3DBC, 0x0DA0, 0x0550, 0x3EB0, 0x0028, 0x0080, 0x3DB4, 0x0D5C, 0x05A8, 0x3E9C, 0x002C, 0x0080, 0x3DAC, 0x0D1C, 0x0600, 0x3E88, 0x0030, 0x007C, 0x3DA8, 0x0CDC, 0x0658, 0x3E74, 0x0034, 0x007C, 0x3DA4, 0x0C94, 0x06B0, 0x3E64, 0x0038, 0x007C, 0x3DA4, 0x0C48, 0x0708, 0x3E50, 0x0040, 0x0078, 0x3DA4, 0x0C00, 0x0760, 0x3E40, 0x0044, 0x0078, 0x3DA8, 0x0BB4, 0x07B8, 0x3E2C, 0x0048, 0x0074, 0x3DAC, 0x0B68, 0x0810, 0x3E1C, 0x004C, 0x0070, 0x3DB4, 0x0B18, 0x0868, 0x3E0C, 0x0050, 0x006C, 0x3DBC, 0x0AC4, 0x08C4, 0x3DFC, 0x0054, 0x0068, 0x3DC4, 0x0A74, 0x0918, 0x3DF0, 0x0058, 0x0068, 0x3DCC, 0x0A20, 0x0970, 0x3DE0, 0x005C, 0x0064, 0x3DD4, 0x09C8, 0x09C8, 0x3DD4, 0x0064 }; //========================================= // <num_taps> = 6 // <num_phases> = 64 // <scale_ratio> = 1.16666 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_6tap_64p_116[198] = { 0x3F0C, 0x0240, 0x0D68, 0x0240, 0x3F0C, 0x0000, 0x3F18, 0x0210, 0x0D64, 0x0274, 0x3F00, 0x0000, 0x3F24, 0x01E0, 0x0D58, 0x02A8, 0x3EF8, 0x0004, 0x3F2C, 0x01B0, 0x0D58, 0x02DC, 0x3EEC, 0x0004, 0x3F38, 0x0180, 0x0D50, 0x0310, 0x3EE0, 0x0008, 0x3F44, 0x0154, 0x0D40, 0x0348, 0x3ED8, 0x0008, 0x3F50, 0x0128, 0x0D34, 0x037C, 0x3ECC, 0x000C, 0x3F5C, 0x00FC, 0x0D20, 0x03B4, 0x3EC4, 0x0010, 0x3F64, 0x00D4, 0x0D14, 0x03EC, 0x3EB8, 0x0010, 0x3F70, 0x00AC, 0x0CFC, 0x0424, 0x3EB0, 0x0014, 0x3F78, 0x0084, 0x0CE8, 0x0460, 0x3EA8, 0x0014, 0x3F84, 0x0060, 0x0CCC, 0x0498, 0x3EA0, 0x0018, 0x3F90, 0x003C, 0x0CB4, 0x04D0, 0x3E98, 0x0018, 0x3F98, 0x0018, 0x0C9C, 0x050C, 0x3E90, 0x0018, 0x3FA0, 0x3FFC, 0x0C78, 0x0548, 0x3E88, 0x001C, 0x3FAC, 0x3FDC, 0x0C54, 0x0584, 0x3E84, 0x001C, 0x3FB4, 0x3FBC, 0x0C3C, 0x05BC, 0x3E7C, 0x001C, 0x3FBC, 0x3FA0, 0x0C14, 0x05F8, 0x3E78, 0x0020, 0x3FC4, 0x3F84, 0x0BF0, 0x0634, 0x3E74, 0x0020, 0x3FCC, 0x3F68, 0x0BCC, 0x0670, 0x3E70, 0x0020, 0x3FD4, 0x3F50, 0x0BA4, 0x06AC, 0x3E6C, 0x0020, 0x3FDC, 0x3F38, 0x0B78, 0x06E8, 0x3E6C, 0x0020, 0x3FE0, 0x3F24, 0x0B50, 0x0724, 0x3E68, 0x0020, 0x3FE8, 0x3F0C, 0x0B24, 0x0760, 0x3E68, 0x0020, 0x3FF0, 0x3EFC, 0x0AF4, 0x0798, 0x3E68, 0x0020, 0x3FF4, 0x3EE8, 0x0AC8, 0x07D4, 0x3E68, 0x0020, 0x3FFC, 0x3ED8, 0x0A94, 0x0810, 0x3E6C, 0x001C, 0x0000, 0x3EC8, 0x0A64, 0x0848, 0x3E70, 0x001C, 0x0000, 0x3EB8, 0x0A38, 0x0880, 0x3E74, 0x001C, 0x0004, 0x3EAC, 0x0A04, 0x08BC, 0x3E78, 0x0018, 0x0008, 0x3EA4, 0x09D0, 0x08F4, 0x3E7C, 0x0014, 0x000C, 0x3E98, 0x0998, 0x092C, 0x3E84, 0x0014, 0x0010, 0x3E90, 0x0964, 0x0960, 0x3E8C, 0x0010 }; //========================================= // <num_taps> = 6 // <num_phases> = 64 // <scale_ratio> = 1.49999 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_6tap_64p_149[198] = { 0x3F14, 0x0394, 0x0AB0, 0x0394, 0x3F14, 0x0000, 0x3F18, 0x036C, 0x0AB0, 0x03B8, 0x3F14, 0x0000, 0x3F18, 0x0348, 0x0AAC, 0x03E0, 0x3F14, 0x0000, 0x3F1C, 0x0320, 0x0AAC, 0x0408, 0x3F10, 0x0000, 0x3F20, 0x02FC, 0x0AA8, 0x042C, 0x3F10, 0x0000, 0x3F24, 0x02D8, 0x0AA0, 0x0454, 0x3F10, 0x0000, 0x3F28, 0x02B4, 0x0A98, 0x047C, 0x3F10, 0x0000, 0x3F28, 0x0290, 0x0A90, 0x04A4, 0x3F14, 0x0000, 0x3F30, 0x026C, 0x0A84, 0x04CC, 0x3F14, 0x0000, 0x3F34, 0x024C, 0x0A7C, 0x04F4, 0x3F14, 0x3FFC, 0x3F38, 0x0228, 0x0A70, 0x051C, 0x3F18, 0x3FFC, 0x3F3C, 0x0208, 0x0A64, 0x0544, 0x3F1C, 0x3FF8, 0x3F40, 0x01E8, 0x0A54, 0x056C, 0x3F20, 0x3FF8, 0x3F44, 0x01C8, 0x0A48, 0x0594, 0x3F24, 0x3FF4, 0x3F4C, 0x01A8, 0x0A34, 0x05BC, 0x3F28, 0x3FF4, 0x3F50, 0x0188, 0x0A28, 0x05E4, 0x3F2C, 0x3FF0, 0x3F54, 0x016C, 0x0A10, 0x060C, 0x3F34, 0x3FF0, 0x3F5C, 0x014C, 0x09FC, 0x0634, 0x3F3C, 0x3FEC, 0x3F60, 0x0130, 0x09EC, 0x065C, 0x3F40, 0x3FE8, 0x3F68, 0x0114, 0x09D0, 0x0684, 0x3F48, 0x3FE8, 0x3F6C, 0x00F8, 0x09B8, 0x06AC, 0x3F54, 0x3FE4, 0x3F74, 0x00E0, 0x09A0, 0x06D0, 0x3F5C, 0x3FE0, 0x3F78, 0x00C4, 0x098C, 0x06F8, 0x3F64, 0x3FDC, 0x3F7C, 0x00AC, 0x0970, 0x0720, 0x3F70, 0x3FD8, 0x3F84, 0x0094, 0x0954, 0x0744, 0x3F7C, 0x3FD4, 0x3F88, 0x007C, 0x093C, 0x0768, 0x3F88, 0x3FD0, 0x3F90, 0x0064, 0x091C, 0x0790, 0x3F94, 0x3FCC, 0x3F94, 0x0050, 0x08FC, 0x07B4, 0x3FA4, 0x3FC8, 0x3F98, 0x003C, 0x08E0, 0x07D8, 0x3FB0, 0x3FC4, 0x3FA0, 0x0024, 0x08C0, 0x07FC, 0x3FC0, 0x3FC0, 0x3FA4, 0x0014, 0x08A4, 0x081C, 0x3FD0, 0x3FB8, 0x3FAC, 0x0000, 0x0880, 0x0840, 0x3FE0, 0x3FB4, 0x3FB0, 0x3FF0, 0x0860, 0x0860, 0x3FF0, 0x3FB0 }; //========================================= // <num_taps> = 6 // <num_phases> = 64 // <scale_ratio> = 1.83332 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_6tap_64p_183[198] = { 0x002C, 0x0420, 0x076C, 0x041C, 0x002C, 0x0000, 0x0028, 0x040C, 0x0768, 0x0430, 0x0034, 0x0000, 0x0020, 0x03F8, 0x0768, 0x0448, 0x003C, 0x3FFC, 0x0018, 0x03E4, 0x0768, 0x045C, 0x0044, 0x3FFC, 0x0014, 0x03D0, 0x0768, 0x0470, 0x004C, 0x3FF8, 0x000C, 0x03BC, 0x0764, 0x0484, 0x0058, 0x3FF8, 0x0008, 0x03A4, 0x0764, 0x049C, 0x0060, 0x3FF4, 0x0004, 0x0390, 0x0760, 0x04B0, 0x0068, 0x3FF4, 0x0000, 0x037C, 0x0760, 0x04C4, 0x0070, 0x3FF0, 0x3FFC, 0x0364, 0x075C, 0x04D8, 0x007C, 0x3FF0, 0x3FF8, 0x0350, 0x0758, 0x04F0, 0x0084, 0x3FEC, 0x3FF4, 0x033C, 0x0750, 0x0504, 0x0090, 0x3FEC, 0x3FF0, 0x0328, 0x074C, 0x0518, 0x009C, 0x3FE8, 0x3FEC, 0x0314, 0x0744, 0x052C, 0x00A8, 0x3FE8, 0x3FE8, 0x0304, 0x0740, 0x0540, 0x00B0, 0x3FE4, 0x3FE4, 0x02EC, 0x073C, 0x0554, 0x00BC, 0x3FE4, 0x3FE0, 0x02DC, 0x0734, 0x0568, 0x00C8, 0x3FE0, 0x3FE0, 0x02C4, 0x072C, 0x057C, 0x00D4, 0x3FE0, 0x3FDC, 0x02B4, 0x0724, 0x058C, 0x00E4, 0x3FDC, 0x3FDC, 0x02A0, 0x0718, 0x05A0, 0x00F0, 0x3FDC, 0x3FD8, 0x028C, 0x0714, 0x05B4, 0x00FC, 0x3FD8, 0x3FD8, 0x0278, 0x0704, 0x05C8, 0x010C, 0x3FD8, 0x3FD4, 0x0264, 0x0700, 0x05D8, 0x0118, 0x3FD8, 0x3FD4, 0x0254, 0x06F0, 0x05EC, 0x0128, 0x3FD4, 0x3FD0, 0x0244, 0x06E8, 0x05FC, 0x0134, 0x3FD4, 0x3FD0, 0x0230, 0x06DC, 0x060C, 0x0144, 0x3FD4, 0x3FD0, 0x021C, 0x06D0, 0x0620, 0x0154, 0x3FD0, 0x3FD0, 0x0208, 0x06C4, 0x0630, 0x0164, 0x3FD0, 0x3FD0, 0x01F8, 0x06B8, 0x0640, 0x0170, 0x3FD0, 0x3FCC, 0x01E8, 0x06AC, 0x0650, 0x0180, 0x3FD0, 0x3FCC, 0x01D8, 0x069C, 0x0660, 0x0190, 0x3FD0, 0x3FCC, 0x01C4, 0x068C, 0x0670, 0x01A4, 0x3FD0, 0x3FCC, 0x01B8, 0x0680, 0x067C, 0x01B4, 0x3FCC }; //========================================= // <num_taps> = 7 // <num_phases> = 64 // <scale_ratio> = 0.83333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_7tap_64p_upscale[231] = { 0x00B0, 0x3D98, 0x09BC, 0x09B8, 0x3D94, 0x00B0, 0x0000, 0x00AC, 0x3DA0, 0x0968, 0x0A10, 0x3D88, 0x00B4, 0x0000, 0x00A8, 0x3DAC, 0x0914, 0x0A60, 0x3D80, 0x00B8, 0x0000, 0x00A4, 0x3DB8, 0x08C0, 0x0AB4, 0x3D78, 0x00BC, 0x3FFC, 0x00A0, 0x3DC8, 0x0868, 0x0B00, 0x3D74, 0x00C0, 0x3FFC, 0x0098, 0x3DD8, 0x0818, 0x0B54, 0x3D6C, 0x00C0, 0x3FF8, 0x0094, 0x3DE8, 0x07C0, 0x0B9C, 0x3D6C, 0x00C4, 0x3FF8, 0x008C, 0x3DFC, 0x0768, 0x0BEC, 0x3D68, 0x00C4, 0x3FF8, 0x0088, 0x3E0C, 0x0714, 0x0C38, 0x3D68, 0x00C4, 0x3FF4, 0x0080, 0x3E20, 0x06BC, 0x0C80, 0x3D6C, 0x00C4, 0x3FF4, 0x0078, 0x3E34, 0x0668, 0x0CC4, 0x3D70, 0x00C4, 0x3FF4, 0x0074, 0x3E48, 0x0610, 0x0D08, 0x3D78, 0x00C4, 0x3FF0, 0x006C, 0x3E5C, 0x05BC, 0x0D48, 0x3D80, 0x00C4, 0x3FF0, 0x0068, 0x3E74, 0x0568, 0x0D84, 0x3D88, 0x00C0, 0x3FF0, 0x0060, 0x3E88, 0x0514, 0x0DC8, 0x3D94, 0x00BC, 0x3FEC, 0x0058, 0x3E9C, 0x04C0, 0x0E04, 0x3DA4, 0x00B8, 0x3FEC, 0x0054, 0x3EB4, 0x046C, 0x0E38, 0x3DB4, 0x00B4, 0x3FEC, 0x004C, 0x3ECC, 0x0418, 0x0E6C, 0x3DC8, 0x00B0, 0x3FEC, 0x0044, 0x3EE0, 0x03C8, 0x0EA4, 0x3DDC, 0x00A8, 0x3FEC, 0x0040, 0x3EF8, 0x0378, 0x0ED0, 0x3DF4, 0x00A0, 0x3FEC, 0x0038, 0x3F0C, 0x032C, 0x0EFC, 0x3E10, 0x0098, 0x3FEC, 0x0034, 0x3F24, 0x02DC, 0x0F24, 0x3E2C, 0x0090, 0x3FEC, 0x002C, 0x3F38, 0x0294, 0x0F4C, 0x3E48, 0x0088, 0x3FEC, 0x0028, 0x3F50, 0x0248, 0x0F68, 0x3E6C, 0x007C, 0x3FF0, 0x0020, 0x3F64, 0x0200, 0x0F88, 0x3E90, 0x0074, 0x3FF0, 0x001C, 0x3F7C, 0x01B8, 0x0FA4, 0x3EB4, 0x0068, 0x3FF0, 0x0018, 0x3F90, 0x0174, 0x0FBC, 0x3EDC, 0x0058, 0x3FF4, 0x0014, 0x3FA4, 0x0130, 0x0FD0, 0x3F08, 0x004C, 0x3FF4, 0x000C, 0x3FB8, 0x00F0, 0x0FE4, 0x3F34, 0x003C, 0x3FF8, 0x0008, 0x3FCC, 0x00B0, 0x0FF0, 0x3F64, 0x0030, 0x3FF8, 0x0004, 0x3FDC, 0x0070, 0x0FFC, 0x3F98, 0x0020, 0x3FFC, 0x0000, 0x3FF0, 0x0038, 0x0FFC, 0x3FCC, 0x0010, 0x0000, 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000 }; //========================================= // <num_taps> = 7 // <num_phases> = 64 // <scale_ratio> = 1.16666 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_7tap_64p_116[231] = { 0x0020, 0x3E58, 0x0988, 0x0988, 0x3E58, 0x0020, 0x0000, 0x0024, 0x3E4C, 0x0954, 0x09C0, 0x3E64, 0x0018, 0x0000, 0x002C, 0x3E44, 0x091C, 0x09F4, 0x3E70, 0x0010, 0x0000, 0x0030, 0x3E3C, 0x08E8, 0x0A24, 0x3E80, 0x0008, 0x0000, 0x0034, 0x3E34, 0x08AC, 0x0A5C, 0x3E90, 0x0000, 0x0000, 0x003C, 0x3E30, 0x0870, 0x0A84, 0x3EA0, 0x3FFC, 0x0004, 0x0040, 0x3E28, 0x0838, 0x0AB4, 0x3EB4, 0x3FF4, 0x0004, 0x0044, 0x3E24, 0x07FC, 0x0AE4, 0x3EC8, 0x3FEC, 0x0004, 0x0048, 0x3E24, 0x07C4, 0x0B08, 0x3EDC, 0x3FE4, 0x0008, 0x0048, 0x3E20, 0x0788, 0x0B3C, 0x3EF4, 0x3FD8, 0x0008, 0x004C, 0x3E20, 0x074C, 0x0B60, 0x3F0C, 0x3FD0, 0x000C, 0x0050, 0x3E20, 0x0710, 0x0B8C, 0x3F24, 0x3FC4, 0x000C, 0x0050, 0x3E20, 0x06D4, 0x0BB0, 0x3F40, 0x3FBC, 0x0010, 0x0054, 0x3E24, 0x0698, 0x0BD4, 0x3F5C, 0x3FB0, 0x0010, 0x0054, 0x3E24, 0x065C, 0x0BFC, 0x3F78, 0x3FA4, 0x0014, 0x0054, 0x3E28, 0x0624, 0x0C1C, 0x3F98, 0x3F98, 0x0014, 0x0058, 0x3E2C, 0x05E4, 0x0C3C, 0x3FB8, 0x3F8C, 0x0018, 0x0058, 0x3E34, 0x05A8, 0x0C58, 0x3FD8, 0x3F80, 0x001C, 0x0058, 0x3E38, 0x0570, 0x0C78, 0x3FF8, 0x3F74, 0x001C, 0x0058, 0x3E40, 0x0534, 0x0C94, 0x0018, 0x3F68, 0x0020, 0x0058, 0x3E48, 0x04F4, 0x0CAC, 0x0040, 0x3F5C, 0x0024, 0x0058, 0x3E50, 0x04BC, 0x0CC4, 0x0064, 0x3F50, 0x0024, 0x0054, 0x3E58, 0x0484, 0x0CD8, 0x008C, 0x3F44, 0x0028, 0x0054, 0x3E60, 0x0448, 0x0CEC, 0x00B4, 0x3F38, 0x002C, 0x0054, 0x3E68, 0x0410, 0x0CFC, 0x00E0, 0x3F28, 0x0030, 0x0054, 0x3E74, 0x03D4, 0x0D0C, 0x010C, 0x3F1C, 0x0030, 0x0050, 0x3E7C, 0x03A0, 0x0D18, 0x0138, 0x3F10, 0x0034, 0x0050, 0x3E88, 0x0364, 0x0D24, 0x0164, 0x3F04, 0x0038, 0x004C, 0x3E94, 0x0330, 0x0D30, 0x0194, 0x3EF4, 0x0038, 0x004C, 0x3EA0, 0x02F8, 0x0D34, 0x01C4, 0x3EE8, 0x003C, 0x0048, 0x3EAC, 0x02C0, 0x0D3C, 0x01F4, 0x3EDC, 0x0040, 0x0048, 0x3EB8, 0x0290, 0x0D3C, 0x0224, 0x3ED0, 0x0040, 0x0044, 0x3EC4, 0x0258, 0x0D40, 0x0258, 0x3EC4, 0x0044 }; //========================================= // <num_taps> = 7 // <num_phases> = 64 // <scale_ratio> = 1.49999 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_7tap_64p_149[231] = { 0x3F68, 0x3FEC, 0x08A8, 0x08AC, 0x3FF0, 0x3F68, 0x0000, 0x3F70, 0x3FDC, 0x0888, 0x08CC, 0x0000, 0x3F60, 0x0000, 0x3F74, 0x3FC8, 0x0868, 0x08F0, 0x0014, 0x3F58, 0x0000, 0x3F7C, 0x3FB4, 0x0844, 0x0908, 0x002C, 0x3F54, 0x0004, 0x3F84, 0x3FA4, 0x0820, 0x0924, 0x0044, 0x3F4C, 0x0004, 0x3F88, 0x3F90, 0x0800, 0x0944, 0x005C, 0x3F44, 0x0004, 0x3F90, 0x3F80, 0x07D8, 0x095C, 0x0074, 0x3F40, 0x0008, 0x3F98, 0x3F70, 0x07B0, 0x097C, 0x008C, 0x3F38, 0x0008, 0x3F9C, 0x3F60, 0x0790, 0x0994, 0x00A8, 0x3F30, 0x0008, 0x3FA4, 0x3F54, 0x0764, 0x09B0, 0x00C4, 0x3F28, 0x0008, 0x3FA8, 0x3F48, 0x0740, 0x09C4, 0x00DC, 0x3F24, 0x000C, 0x3FB0, 0x3F38, 0x0718, 0x09DC, 0x00FC, 0x3F1C, 0x000C, 0x3FB4, 0x3F2C, 0x06F0, 0x09F4, 0x0118, 0x3F18, 0x000C, 0x3FBC, 0x3F24, 0x06C8, 0x0A08, 0x0134, 0x3F10, 0x000C, 0x3FC0, 0x3F18, 0x06A0, 0x0A1C, 0x0154, 0x3F08, 0x0010, 0x3FC8, 0x3F10, 0x0678, 0x0A2C, 0x0170, 0x3F04, 0x0010, 0x3FCC, 0x3F04, 0x0650, 0x0A40, 0x0190, 0x3F00, 0x0010, 0x3FD0, 0x3EFC, 0x0628, 0x0A54, 0x01B0, 0x3EF8, 0x0010, 0x3FD4, 0x3EF4, 0x0600, 0x0A64, 0x01D0, 0x3EF4, 0x0010, 0x3FDC, 0x3EEC, 0x05D8, 0x0A6C, 0x01F4, 0x3EF0, 0x0010, 0x3FE0, 0x3EE8, 0x05B0, 0x0A7C, 0x0214, 0x3EE8, 0x0010, 0x3FE4, 0x3EE0, 0x0588, 0x0A88, 0x0238, 0x3EE4, 0x0010, 0x3FE8, 0x3EDC, 0x055C, 0x0A98, 0x0258, 0x3EE0, 0x0010, 0x3FEC, 0x3ED8, 0x0534, 0x0AA0, 0x027C, 0x3EDC, 0x0010, 0x3FF0, 0x3ED4, 0x050C, 0x0AAC, 0x02A0, 0x3ED8, 0x000C, 0x3FF4, 0x3ED0, 0x04E4, 0x0AB4, 0x02C4, 0x3ED4, 0x000C, 0x3FF4, 0x3ECC, 0x04C0, 0x0ABC, 0x02E8, 0x3ED0, 0x000C, 0x3FF8, 0x3ECC, 0x0494, 0x0AC0, 0x030C, 0x3ED0, 0x000C, 0x3FFC, 0x3EC8, 0x046C, 0x0AC8, 0x0334, 0x3ECC, 0x0008, 0x0000, 0x3EC8, 0x0444, 0x0AC8, 0x0358, 0x3ECC, 0x0008, 0x0000, 0x3EC8, 0x041C, 0x0ACC, 0x0380, 0x3EC8, 0x0008, 0x0000, 0x3EC8, 0x03F4, 0x0AD0, 0x03A8, 0x3EC8, 0x0004, 0x0004, 0x3EC8, 0x03CC, 0x0AD0, 0x03CC, 0x3EC8, 0x0004 }; //========================================= // <num_taps> = 7 // <num_phases> = 64 // <scale_ratio> = 1.83332 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_7tap_64p_183[231] = { 0x3FA4, 0x01E8, 0x0674, 0x0674, 0x01E8, 0x3FA4, 0x0000, 0x3FA4, 0x01D4, 0x0668, 0x0684, 0x01F8, 0x3FA4, 0x0000, 0x3FA4, 0x01C4, 0x0658, 0x0690, 0x0208, 0x3FA8, 0x0000, 0x3FA0, 0x01B4, 0x064C, 0x06A0, 0x021C, 0x3FA8, 0x3FFC, 0x3FA0, 0x01A4, 0x063C, 0x06AC, 0x022C, 0x3FAC, 0x3FFC, 0x3FA0, 0x0194, 0x0630, 0x06B4, 0x0240, 0x3FAC, 0x3FFC, 0x3FA0, 0x0184, 0x0620, 0x06C4, 0x0250, 0x3FB0, 0x3FF8, 0x3FA0, 0x0174, 0x0614, 0x06CC, 0x0264, 0x3FB0, 0x3FF8, 0x3FA0, 0x0164, 0x0604, 0x06D8, 0x0278, 0x3FB4, 0x3FF4, 0x3FA0, 0x0154, 0x05F4, 0x06E4, 0x0288, 0x3FB8, 0x3FF4, 0x3FA0, 0x0148, 0x05E4, 0x06EC, 0x029C, 0x3FBC, 0x3FF0, 0x3FA0, 0x0138, 0x05D4, 0x06F4, 0x02B0, 0x3FC0, 0x3FF0, 0x3FA0, 0x0128, 0x05C4, 0x0704, 0x02C4, 0x3FC0, 0x3FEC, 0x3FA0, 0x011C, 0x05B4, 0x0708, 0x02D8, 0x3FC4, 0x3FEC, 0x3FA4, 0x010C, 0x05A4, 0x0714, 0x02E8, 0x3FC8, 0x3FE8, 0x3FA4, 0x0100, 0x0590, 0x0718, 0x02FC, 0x3FD0, 0x3FE8, 0x3FA4, 0x00F0, 0x0580, 0x0724, 0x0310, 0x3FD4, 0x3FE4, 0x3FA4, 0x00E4, 0x056C, 0x072C, 0x0324, 0x3FD8, 0x3FE4, 0x3FA8, 0x00D8, 0x055C, 0x0730, 0x0338, 0x3FDC, 0x3FE0, 0x3FA8, 0x00CC, 0x0548, 0x0738, 0x034C, 0x3FE4, 0x3FDC, 0x3FA8, 0x00BC, 0x0538, 0x0740, 0x0360, 0x3FE8, 0x3FDC, 0x3FAC, 0x00B0, 0x0528, 0x0744, 0x0374, 0x3FEC, 0x3FD8, 0x3FAC, 0x00A4, 0x0514, 0x0748, 0x0388, 0x3FF4, 0x3FD8, 0x3FB0, 0x0098, 0x0500, 0x074C, 0x039C, 0x3FFC, 0x3FD4, 0x3FB0, 0x0090, 0x04EC, 0x0750, 0x03B0, 0x0000, 0x3FD4, 0x3FB0, 0x0084, 0x04DC, 0x0758, 0x03C4, 0x0004, 0x3FD0, 0x3FB4, 0x0078, 0x04CC, 0x0758, 0x03D8, 0x000C, 0x3FCC, 0x3FB4, 0x006C, 0x04B8, 0x075C, 0x03EC, 0x0014, 0x3FCC, 0x3FB8, 0x0064, 0x04A0, 0x0760, 0x0400, 0x001C, 0x3FC8, 0x3FB8, 0x0058, 0x0490, 0x0760, 0x0414, 0x0024, 0x3FC8, 0x3FBC, 0x0050, 0x047C, 0x0760, 0x0428, 0x002C, 0x3FC4, 0x3FBC, 0x0048, 0x0464, 0x0764, 0x043C, 0x0034, 0x3FC4, 0x3FC0, 0x003C, 0x0454, 0x0764, 0x0450, 0x003C, 0x3FC0 }; //========================================= // <num_taps> = 8 // <num_phases> = 64 // <scale_ratio> = 0.83333 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_8tap_64p_upscale[264] = { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3FFC, 0x0014, 0x3FC8, 0x1000, 0x0038, 0x3FEC, 0x0004, 0x0000, 0x3FF4, 0x0024, 0x3F94, 0x0FFC, 0x0074, 0x3FD8, 0x000C, 0x0000, 0x3FF0, 0x0038, 0x3F60, 0x0FEC, 0x00B4, 0x3FC4, 0x0014, 0x0000, 0x3FEC, 0x004C, 0x3F2C, 0x0FE4, 0x00F4, 0x3FAC, 0x0018, 0x0000, 0x3FE4, 0x005C, 0x3F00, 0x0FD4, 0x0138, 0x3F94, 0x0020, 0x0000, 0x3FE0, 0x006C, 0x3ED0, 0x0FC4, 0x017C, 0x3F7C, 0x0028, 0x0000, 0x3FDC, 0x007C, 0x3EA8, 0x0FA4, 0x01C4, 0x3F68, 0x0030, 0x0000, 0x3FD8, 0x0088, 0x3E80, 0x0F90, 0x020C, 0x3F50, 0x0038, 0x3FFC, 0x3FD4, 0x0098, 0x3E58, 0x0F70, 0x0258, 0x3F38, 0x0040, 0x3FFC, 0x3FD0, 0x00A4, 0x3E34, 0x0F54, 0x02A0, 0x3F1C, 0x004C, 0x3FFC, 0x3FD0, 0x00B0, 0x3E14, 0x0F28, 0x02F0, 0x3F04, 0x0054, 0x3FFC, 0x3FCC, 0x00BC, 0x3DF4, 0x0F08, 0x033C, 0x3EEC, 0x005C, 0x3FF8, 0x3FC8, 0x00C8, 0x3DD8, 0x0EDC, 0x038C, 0x3ED4, 0x0064, 0x3FF8, 0x3FC8, 0x00D0, 0x3DC0, 0x0EAC, 0x03E0, 0x3EBC, 0x006C, 0x3FF4, 0x3FC4, 0x00D8, 0x3DA8, 0x0E7C, 0x0430, 0x3EA4, 0x0078, 0x3FF4, 0x3FC4, 0x00E0, 0x3D94, 0x0E48, 0x0484, 0x3E8C, 0x0080, 0x3FF0, 0x3FC4, 0x00E8, 0x3D80, 0x0E10, 0x04D8, 0x3E74, 0x0088, 0x3FF0, 0x3FC4, 0x00F0, 0x3D70, 0x0DD8, 0x052C, 0x3E5C, 0x0090, 0x3FEC, 0x3FC0, 0x00F4, 0x3D60, 0x0DA0, 0x0584, 0x3E44, 0x0098, 0x3FEC, 0x3FC0, 0x00F8, 0x3D54, 0x0D68, 0x05D8, 0x3E2C, 0x00A0, 0x3FE8, 0x3FC0, 0x00FC, 0x3D48, 0x0D20, 0x0630, 0x3E18, 0x00AC, 0x3FE8, 0x3FC0, 0x0100, 0x3D40, 0x0CE0, 0x0688, 0x3E00, 0x00B4, 0x3FE4, 0x3FC4, 0x0100, 0x3D3C, 0x0C98, 0x06DC, 0x3DEC, 0x00BC, 0x3FE4, 0x3FC4, 0x0100, 0x3D38, 0x0C58, 0x0734, 0x3DD8, 0x00C0, 0x3FE0, 0x3FC4, 0x0104, 0x3D38, 0x0C0C, 0x078C, 0x3DC4, 0x00C8, 0x3FDC, 0x3FC4, 0x0100, 0x3D38, 0x0BC4, 0x07E4, 0x3DB0, 0x00D0, 0x3FDC, 0x3FC4, 0x0100, 0x3D38, 0x0B78, 0x083C, 0x3DA0, 0x00D8, 0x3FD8, 0x3FC8, 0x0100, 0x3D3C, 0x0B28, 0x0890, 0x3D90, 0x00DC, 0x3FD8, 0x3FC8, 0x00FC, 0x3D40, 0x0ADC, 0x08E8, 0x3D80, 0x00E4, 0x3FD4, 0x3FCC, 0x00FC, 0x3D48, 0x0A84, 0x093C, 0x3D74, 0x00E8, 0x3FD4, 0x3FCC, 0x00F8, 0x3D50, 0x0A38, 0x0990, 0x3D64, 0x00F0, 0x3FD0, 0x3FD0, 0x00F4, 0x3D58, 0x09E0, 0x09E4, 0x3D5C, 0x00F4, 0x3FD0 }; //========================================= // <num_taps> = 8 // <num_phases> = 64 // <scale_ratio> = 1.16666 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_8tap_64p_116[264] = { 0x0080, 0x3E90, 0x0268, 0x0D14, 0x0264, 0x3E90, 0x0080, 0x0000, 0x007C, 0x3E9C, 0x0238, 0x0D14, 0x0298, 0x3E84, 0x0080, 0x0000, 0x0078, 0x3EAC, 0x0200, 0x0D10, 0x02D0, 0x3E78, 0x0084, 0x0000, 0x0078, 0x3EB8, 0x01D0, 0x0D0C, 0x0304, 0x3E6C, 0x0084, 0x0000, 0x0074, 0x3EC8, 0x01A0, 0x0D00, 0x033C, 0x3E60, 0x0088, 0x0000, 0x0070, 0x3ED4, 0x0170, 0x0D00, 0x0374, 0x3E54, 0x0088, 0x3FFC, 0x006C, 0x3EE4, 0x0140, 0x0CF8, 0x03AC, 0x3E48, 0x0088, 0x3FFC, 0x006C, 0x3EF0, 0x0114, 0x0CE8, 0x03E4, 0x3E3C, 0x008C, 0x3FFC, 0x0068, 0x3F00, 0x00E8, 0x0CD8, 0x041C, 0x3E34, 0x008C, 0x3FFC, 0x0064, 0x3F10, 0x00BC, 0x0CCC, 0x0454, 0x3E28, 0x008C, 0x3FFC, 0x0060, 0x3F1C, 0x0090, 0x0CBC, 0x0490, 0x3E20, 0x008C, 0x3FFC, 0x005C, 0x3F2C, 0x0068, 0x0CA4, 0x04CC, 0x3E18, 0x008C, 0x3FFC, 0x0058, 0x3F38, 0x0040, 0x0C94, 0x0504, 0x3E10, 0x008C, 0x3FFC, 0x0054, 0x3F48, 0x001C, 0x0C7C, 0x0540, 0x3E08, 0x0088, 0x3FFC, 0x0050, 0x3F54, 0x3FF8, 0x0C60, 0x057C, 0x3E04, 0x0088, 0x3FFC, 0x004C, 0x3F64, 0x3FD4, 0x0C44, 0x05B8, 0x3DFC, 0x0088, 0x3FFC, 0x0048, 0x3F70, 0x3FB4, 0x0C28, 0x05F4, 0x3DF8, 0x0084, 0x3FFC, 0x0044, 0x3F80, 0x3F90, 0x0C0C, 0x0630, 0x3DF4, 0x0080, 0x3FFC, 0x0040, 0x3F8C, 0x3F70, 0x0BE8, 0x066C, 0x3DF4, 0x0080, 0x3FFC, 0x003C, 0x3F9C, 0x3F50, 0x0BC8, 0x06A8, 0x3DF0, 0x007C, 0x3FFC, 0x0038, 0x3FA8, 0x3F34, 0x0BA0, 0x06E4, 0x3DF0, 0x0078, 0x0000, 0x0034, 0x3FB4, 0x3F18, 0x0B80, 0x071C, 0x3DF0, 0x0074, 0x0000, 0x0030, 0x3FC0, 0x3EFC, 0x0B5C, 0x0758, 0x3DF0, 0x0070, 0x0000, 0x002C, 0x3FCC, 0x3EE4, 0x0B34, 0x0794, 0x3DF4, 0x0068, 0x0000, 0x002C, 0x3FDC, 0x3ECC, 0x0B08, 0x07CC, 0x3DF4, 0x0064, 0x0000, 0x0028, 0x3FE4, 0x3EB4, 0x0AE0, 0x0808, 0x3DF8, 0x0060, 0x0000, 0x0024, 0x3FF0, 0x3EA0, 0x0AB0, 0x0840, 0x3E00, 0x0058, 0x0004, 0x0020, 0x3FFC, 0x3E90, 0x0A84, 0x0878, 0x3E04, 0x0050, 0x0004, 0x001C, 0x0004, 0x3E7C, 0x0A54, 0x08B0, 0x3E0C, 0x004C, 0x0008, 0x0018, 0x000C, 0x3E68, 0x0A28, 0x08E8, 0x3E18, 0x0044, 0x0008, 0x0018, 0x0018, 0x3E54, 0x09F4, 0x0920, 0x3E20, 0x003C, 0x000C, 0x0014, 0x0020, 0x3E48, 0x09C0, 0x0954, 0x3E2C, 0x0034, 0x0010, 0x0010, 0x002C, 0x3E3C, 0x098C, 0x0988, 0x3E38, 0x002C, 0x0010 }; //========================================= // <num_taps> = 8 // <num_phases> = 64 // <scale_ratio> = 1.49999 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_8tap_64p_149[264] = { 0x0008, 0x3E8C, 0x03F8, 0x0AE8, 0x03F8, 0x3E8C, 0x0008, 0x0000, 0x000C, 0x3E8C, 0x03D0, 0x0AE8, 0x0420, 0x3E90, 0x0000, 0x0000, 0x000C, 0x3E8C, 0x03AC, 0x0AE8, 0x0444, 0x3E90, 0x0000, 0x0000, 0x0010, 0x3E90, 0x0384, 0x0AE0, 0x046C, 0x3E94, 0x3FFC, 0x0000, 0x0014, 0x3E90, 0x035C, 0x0ADC, 0x0494, 0x3E94, 0x3FF8, 0x0004, 0x0018, 0x3E90, 0x0334, 0x0AD8, 0x04BC, 0x3E98, 0x3FF4, 0x0004, 0x001C, 0x3E94, 0x0310, 0x0AD0, 0x04E4, 0x3E9C, 0x3FEC, 0x0004, 0x0020, 0x3E98, 0x02E8, 0x0AC4, 0x050C, 0x3EA0, 0x3FE8, 0x0008, 0x0020, 0x3E98, 0x02C4, 0x0AC0, 0x0534, 0x3EA4, 0x3FE4, 0x0008, 0x0024, 0x3E9C, 0x02A0, 0x0AB4, 0x055C, 0x3EAC, 0x3FDC, 0x0008, 0x0024, 0x3EA0, 0x027C, 0x0AA8, 0x0584, 0x3EB0, 0x3FD8, 0x000C, 0x0028, 0x3EA4, 0x0258, 0x0A9C, 0x05AC, 0x3EB8, 0x3FD0, 0x000C, 0x0028, 0x3EA8, 0x0234, 0x0A90, 0x05D4, 0x3EC0, 0x3FC8, 0x0010, 0x002C, 0x3EAC, 0x0210, 0x0A80, 0x05FC, 0x3EC8, 0x3FC4, 0x0010, 0x002C, 0x3EB4, 0x01F0, 0x0A70, 0x0624, 0x3ED0, 0x3FBC, 0x0010, 0x002C, 0x3EB8, 0x01CC, 0x0A60, 0x064C, 0x3EDC, 0x3FB4, 0x0014, 0x0030, 0x3EBC, 0x01A8, 0x0A50, 0x0674, 0x3EE4, 0x3FB0, 0x0014, 0x0030, 0x3EC4, 0x0188, 0x0A38, 0x069C, 0x3EF0, 0x3FA8, 0x0018, 0x0030, 0x3ECC, 0x0168, 0x0A28, 0x06C0, 0x3EFC, 0x3FA0, 0x0018, 0x0030, 0x3ED0, 0x0148, 0x0A14, 0x06E8, 0x3F08, 0x3F98, 0x001C, 0x0030, 0x3ED8, 0x012C, 0x0A00, 0x070C, 0x3F14, 0x3F90, 0x001C, 0x0034, 0x3EE0, 0x0108, 0x09E4, 0x0734, 0x3F24, 0x3F8C, 0x001C, 0x0034, 0x3EE4, 0x00EC, 0x09CC, 0x0758, 0x3F34, 0x3F84, 0x0020, 0x0034, 0x3EEC, 0x00D0, 0x09B8, 0x077C, 0x3F40, 0x3F7C, 0x0020, 0x0034, 0x3EF4, 0x00B4, 0x0998, 0x07A4, 0x3F50, 0x3F74, 0x0024, 0x0030, 0x3EFC, 0x0098, 0x0980, 0x07C8, 0x3F64, 0x3F6C, 0x0024, 0x0030, 0x3F04, 0x0080, 0x0968, 0x07E8, 0x3F74, 0x3F64, 0x0024, 0x0030, 0x3F0C, 0x0060, 0x094C, 0x080C, 0x3F88, 0x3F5C, 0x0028, 0x0030, 0x3F14, 0x0048, 0x0930, 0x0830, 0x3F98, 0x3F54, 0x0028, 0x0030, 0x3F1C, 0x0030, 0x0914, 0x0850, 0x3FAC, 0x3F4C, 0x0028, 0x0030, 0x3F24, 0x0018, 0x08F0, 0x0874, 0x3FC0, 0x3F44, 0x002C, 0x002C, 0x3F2C, 0x0000, 0x08D4, 0x0894, 0x3FD8, 0x3F3C, 0x002C, 0x002C, 0x3F34, 0x3FEC, 0x08B4, 0x08B4, 0x3FEC, 0x3F34, 0x002C }; //========================================= // <num_taps> = 8 // <num_phases> = 64 // <scale_ratio> = 1.83332 (input/output) // <sharpness> = 0 // <CoefType> = ModifiedLanczos // <CoefQuant> = 1.10 // <CoefOut> = 1.12 //========================================= static const uint16_t filter_8tap_64p_183[264] = { 0x3F88, 0x0048, 0x047C, 0x0768, 0x047C, 0x0048, 0x3F88, 0x0000, 0x3F88, 0x003C, 0x0468, 0x076C, 0x0490, 0x0054, 0x3F84, 0x0000, 0x3F8C, 0x0034, 0x0454, 0x0768, 0x04A4, 0x005C, 0x3F84, 0x0000, 0x3F8C, 0x0028, 0x0444, 0x076C, 0x04B4, 0x0068, 0x3F80, 0x0000, 0x3F90, 0x0020, 0x042C, 0x0768, 0x04C8, 0x0074, 0x3F80, 0x0000, 0x3F90, 0x0018, 0x041C, 0x0764, 0x04DC, 0x0080, 0x3F7C, 0x0000, 0x3F94, 0x0010, 0x0408, 0x075C, 0x04F0, 0x008C, 0x3F7C, 0x0000, 0x3F94, 0x0004, 0x03F8, 0x0760, 0x0500, 0x0098, 0x3F7C, 0x3FFC, 0x3F98, 0x0000, 0x03E0, 0x075C, 0x0514, 0x00A4, 0x3F78, 0x3FFC, 0x3F9C, 0x3FF8, 0x03CC, 0x0754, 0x0528, 0x00B0, 0x3F78, 0x3FFC, 0x3F9C, 0x3FF0, 0x03B8, 0x0754, 0x0538, 0x00BC, 0x3F78, 0x3FFC, 0x3FA0, 0x3FE8, 0x03A4, 0x0750, 0x054C, 0x00CC, 0x3F74, 0x3FF8, 0x3FA4, 0x3FE0, 0x0390, 0x074C, 0x055C, 0x00D8, 0x3F74, 0x3FF8, 0x3FA4, 0x3FDC, 0x037C, 0x0744, 0x0570, 0x00E4, 0x3F74, 0x3FF8, 0x3FA8, 0x3FD4, 0x0368, 0x0740, 0x0580, 0x00F4, 0x3F74, 0x3FF4, 0x3FA8, 0x3FCC, 0x0354, 0x073C, 0x0590, 0x0104, 0x3F74, 0x3FF4, 0x3FAC, 0x3FC8, 0x0340, 0x0730, 0x05A4, 0x0110, 0x3F74, 0x3FF4, 0x3FB0, 0x3FC0, 0x0330, 0x0728, 0x05B4, 0x0120, 0x3F74, 0x3FF0, 0x3FB0, 0x3FBC, 0x031C, 0x0724, 0x05C4, 0x0130, 0x3F70, 0x3FF0, 0x3FB4, 0x3FB4, 0x0308, 0x0720, 0x05D4, 0x013C, 0x3F70, 0x3FF0, 0x3FB8, 0x3FB0, 0x02F4, 0x0714, 0x05E4, 0x014C, 0x3F74, 0x3FEC, 0x3FB8, 0x3FAC, 0x02E0, 0x0708, 0x05F8, 0x015C, 0x3F74, 0x3FEC, 0x3FBC, 0x3FA8, 0x02CC, 0x0704, 0x0604, 0x016C, 0x3F74, 0x3FE8, 0x3FC0, 0x3FA0, 0x02BC, 0x06F8, 0x0614, 0x017C, 0x3F74, 0x3FE8, 0x3FC0, 0x3F9C, 0x02A8, 0x06F4, 0x0624, 0x018C, 0x3F74, 0x3FE4, 0x3FC4, 0x3F98, 0x0294, 0x06E8, 0x0634, 0x019C, 0x3F74, 0x3FE4, 0x3FC8, 0x3F94, 0x0284, 0x06D8, 0x0644, 0x01AC, 0x3F78, 0x3FE0, 0x3FC8, 0x3F90, 0x0270, 0x06D4, 0x0650, 0x01BC, 0x3F78, 0x3FE0, 0x3FCC, 0x3F8C, 0x025C, 0x06C8, 0x0660, 0x01D0, 0x3F78, 0x3FDC, 0x3FCC, 0x3F8C, 0x024C, 0x06B8, 0x066C, 0x01E0, 0x3F7C, 0x3FDC, 0x3FD0, 0x3F88, 0x0238, 0x06B0, 0x067C, 0x01F0, 0x3F7C, 0x3FD8, 0x3FD4, 0x3F84, 0x0228, 0x069C, 0x0688, 0x0204, 0x3F80, 0x3FD8, 0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4 }; const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_3tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_3tap_16p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_3tap_16p_149; else return filter_3tap_16p_183; } const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_3tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_3tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_3tap_64p_149; else return filter_3tap_64p_183; } const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_4tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_4tap_16p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_4tap_16p_149; else return filter_4tap_16p_183; } const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_4tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_4tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_4tap_64p_149; else return filter_4tap_64p_183; } const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_5tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_5tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_5tap_64p_149; else return filter_5tap_64p_183; } const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_6tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_6tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_6tap_64p_149; else return filter_6tap_64p_183; } const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_7tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_7tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_7tap_64p_149; else return filter_7tap_64p_183; } const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_8tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_8tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_8tap_64p_149; else return filter_8tap_64p_183; } const uint16_t *get_filter_2tap_16p(void) { return filter_2tap_16p; } const uint16_t *get_filter_2tap_64p(void) { return filter_2tap_64p; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce_hwseq.h" #include "reg_helper.h" #include "hw_sequencer_private.h" #include "core_types.h" #define CTX \ hws->ctx #define REG(reg)\ hws->regs->reg #undef FN #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name void dce_enable_fe_clock(struct dce_hwseq *hws, unsigned int fe_inst, bool enable) { REG_UPDATE(DCFE_CLOCK_CONTROL[fe_inst], DCFE_CLOCK_ENABLE, enable); } void dce_pipe_control_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) { uint32_t lock_val = lock ? 1 : 0; uint32_t dcp_grph, scl, blnd, update_lock_mode, val; struct dce_hwseq *hws = dc->hwseq; /* Not lock pipe when blank */ if (lock && pipe->stream_res.tg->funcs->is_blanked && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg)) return; val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph, BLND_SCL_V_UPDATE_LOCK, &scl, BLND_BLND_V_UPDATE_LOCK, &blnd, BLND_V_UPDATE_LOCK_MODE, &update_lock_mode); dcp_grph = lock_val; scl = lock_val; blnd = lock_val; update_lock_mode = lock_val; REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val, BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph, BLND_SCL_V_UPDATE_LOCK, scl); if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0) REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val, BLND_BLND_V_UPDATE_LOCK, blnd, BLND_V_UPDATE_LOCK_MODE, update_lock_mode); if (hws->wa.blnd_crtc_trigger) { if (!lock) { uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst]); REG_WRITE(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst], value); } } } #if defined(CONFIG_DRM_AMD_DC_SI) void dce60_pipe_control_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) { /* DCE6 has no BLND_V_UPDATE_LOCK register */ } #endif void dce_set_blender_mode(struct dce_hwseq *hws, unsigned int blnd_inst, enum blnd_mode mode) { uint32_t feedthrough = 1; uint32_t blnd_mode = 0; uint32_t multiplied_mode = 0; uint32_t alpha_mode = 2; switch (mode) { case BLND_MODE_OTHER_PIPE: feedthrough = 0; blnd_mode = 1; alpha_mode = 0; break; case BLND_MODE_BLENDING: feedthrough = 0; blnd_mode = 2; alpha_mode = 0; multiplied_mode = 1; break; case BLND_MODE_CURRENT_PIPE: default: if (REG(BLND_CONTROL[blnd_inst]) == REG(BLNDV_CONTROL) || blnd_inst == 0) feedthrough = 0; break; } REG_UPDATE(BLND_CONTROL[blnd_inst], BLND_MODE, blnd_mode); if (hws->masks->BLND_ALPHA_MODE != 0) { REG_UPDATE_3(BLND_CONTROL[blnd_inst], BLND_FEEDTHROUGH_EN, feedthrough, BLND_ALPHA_MODE, alpha_mode, BLND_MULTIPLIED_MODE, multiplied_mode); } } static void dce_disable_sram_shut_down(struct dce_hwseq *hws) { if (REG(DC_MEM_GLOBAL_PWR_REQ_CNTL)) REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1); } static void dce_underlay_clock_enable(struct dce_hwseq *hws) { /* todo: why do we need this at boot? is dce_enable_fe_clock enough? */ if (REG(DCFEV_CLOCK_CONTROL)) REG_UPDATE(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, 1); } static void enable_hw_base_light_sleep(void) { /* TODO: implement */ } static void disable_sw_manual_control_light_sleep(void) { /* TODO: implement */ } void dce_clock_gating_power_up(struct dce_hwseq *hws, bool enable) { if (enable) { enable_hw_base_light_sleep(); disable_sw_manual_control_light_sleep(); } else { dce_disable_sram_shut_down(hws); dce_underlay_clock_enable(hws); } } void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws, struct clock_source *clk_src, unsigned int tg_inst) { if (clk_src->id == CLOCK_SOURCE_ID_DP_DTO || clk_src->dp_clk_src) { REG_UPDATE(PIXEL_RATE_CNTL[tg_inst], DP_DTO0_ENABLE, 1); } else if (clk_src->id >= CLOCK_SOURCE_COMBO_PHY_PLL0) { uint32_t rate_source = clk_src->id - CLOCK_SOURCE_COMBO_PHY_PLL0; REG_UPDATE_2(PHYPLL_PIXEL_RATE_CNTL[tg_inst], PHYPLL_PIXEL_RATE_SOURCE, rate_source, PIXEL_RATE_PLL_SOURCE, 0); REG_UPDATE(PIXEL_RATE_CNTL[tg_inst], DP_DTO0_ENABLE, 0); } else if (clk_src->id <= CLOCK_SOURCE_ID_PLL2) { uint32_t rate_source = clk_src->id - CLOCK_SOURCE_ID_PLL0; REG_UPDATE_2(PIXEL_RATE_CNTL[tg_inst], PIXEL_RATE_SOURCE, rate_source, DP_DTO0_ENABLE, 0); if (REG(PHYPLL_PIXEL_RATE_CNTL[tg_inst])) REG_UPDATE(PHYPLL_PIXEL_RATE_CNTL[tg_inst], PIXEL_RATE_PLL_SOURCE, 1); } else { DC_ERR("Unknown clock source. clk_src id: %d, TG_inst: %d", clk_src->id, tg_inst); } } /* Only use LUT for 8 bit formats */ bool dce_use_lut(enum surface_pixel_format format) { switch (format) { case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: return true; default: return false; } }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
/* * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dmub_psr.h" #include "dc.h" #include "dc_dmub_srv.h" #include "dmub/dmub_srv.h" #include "core_types.h" #define DC_TRACE_LEVEL_MESSAGE(...) do {} while (0) /* do nothing */ #define MAX_PIPES 6 static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3}; static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5}; /* * Convert dmcub psr state to dmcu psr state. */ static enum dc_psr_state convert_psr_state(uint32_t raw_state) { enum dc_psr_state state = PSR_STATE0; if (raw_state == 0) state = PSR_STATE0; else if (raw_state == 0x10) state = PSR_STATE1; else if (raw_state == 0x11) state = PSR_STATE1a; else if (raw_state == 0x20) state = PSR_STATE2; else if (raw_state == 0x21) state = PSR_STATE2a; else if (raw_state == 0x22) state = PSR_STATE2b; else if (raw_state == 0x30) state = PSR_STATE3; else if (raw_state == 0x31) state = PSR_STATE3Init; else if (raw_state == 0x40) state = PSR_STATE4; else if (raw_state == 0x41) state = PSR_STATE4a; else if (raw_state == 0x42) state = PSR_STATE4b; else if (raw_state == 0x43) state = PSR_STATE4c; else if (raw_state == 0x44) state = PSR_STATE4d; else if (raw_state == 0x50) state = PSR_STATE5; else if (raw_state == 0x51) state = PSR_STATE5a; else if (raw_state == 0x52) state = PSR_STATE5b; else if (raw_state == 0x53) state = PSR_STATE5c; else if (raw_state == 0x4A) state = PSR_STATE4_FULL_FRAME; else if (raw_state == 0x4B) state = PSR_STATE4a_FULL_FRAME; else if (raw_state == 0x4C) state = PSR_STATE4b_FULL_FRAME; else if (raw_state == 0x4D) state = PSR_STATE4c_FULL_FRAME; else if (raw_state == 0x4E) state = PSR_STATE4_FULL_FRAME_POWERUP; else if (raw_state == 0x4F) state = PSR_STATE4_FULL_FRAME_HW_LOCK; else if (raw_state == 0x60) state = PSR_STATE_HWLOCK_MGR; else if (raw_state == 0x61) state = PSR_STATE_POLLVUPDATE; else state = PSR_STATE_INVALID; return state; } /* * Get PSR state from firmware. */ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state, uint8_t panel_inst) { struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; uint32_t raw_state = 0; uint32_t retry_count = 0; enum dmub_status status; do { // Send gpint command and wait for ack status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, panel_inst, 30); if (status == DMUB_STATUS_OK) { // GPINT was executed, get response dmub_srv_get_gpint_response(srv, &raw_state); *state = convert_psr_state(raw_state); } else // Return invalid state when GPINT times out *state = PSR_STATE_INVALID; } while (++retry_count <= 1000 && *state == PSR_STATE_INVALID); // Assert if max retry hit if (retry_count >= 1000 && *state == PSR_STATE_INVALID) { ASSERT(0); DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, WPP_BIT_FLAG_Firmware_PsrState, "Unable to get PSR state from FW."); } else DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE, WPP_BIT_FLAG_Firmware_PsrState, "Got PSR state from FW. PSR state: %d, Retry count: %d", *state, retry_count); } /* * Set PSR version. */ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) return false; memset(&cmd, 0, sizeof(cmd)); cmd.psr_set_version.header.type = DMUB_CMD__PSR; cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION; switch (stream->link->psr_settings.psr_version) { case DC_PSR_VERSION_1: cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1; break; case DC_PSR_VERSION_SU_1: cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_SU_1; break; case DC_PSR_VERSION_UNSUPPORTED: default: cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED; break; } if (cmd.psr_set_version.psr_set_version_data.version == PSR_VERSION_UNSUPPORTED) return false; cmd.psr_set_version.psr_set_version_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst; cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } /* * Enable/Disable PSR. */ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; uint32_t retry_count; enum dc_psr_state state = PSR_STATE0; memset(&cmd, 0, sizeof(cmd)); cmd.psr_enable.header.type = DMUB_CMD__PSR; cmd.psr_enable.data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_enable.data.panel_inst = panel_inst; if (enable) cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE; else cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_DISABLE; cmd.psr_enable.header.payload_bytes = 0; // Send header only dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Below loops 1000 x 500us = 500 ms. * Exit PSR may need to wait 1-2 frames to power up. Timeout after at * least a few frames. Should never hit the max retry assert below. */ if (wait) { for (retry_count = 0; retry_count <= 1000; retry_count++) { dmub_psr_get_state(dmub, &state, panel_inst); if (enable) { if (state != PSR_STATE0) break; } else { if (state == PSR_STATE0) break; } fsleep(500); } /* assert if max retry hit */ if (retry_count >= 1000) ASSERT(0); } } /* * Set PSR level. */ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_t panel_inst) { union dmub_rb_cmd cmd; enum dc_psr_state state = PSR_STATE0; struct dc_context *dc = dmub->ctx; dmub_psr_get_state(dmub, &state, panel_inst); if (state == PSR_STATE0) return; memset(&cmd, 0, sizeof(cmd)); cmd.psr_set_level.header.type = DMUB_CMD__PSR; cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL; cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data); cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst; dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* * Set PSR vtotal requirement for FreeSync PSR. */ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; memset(&cmd, 0, sizeof(cmd)); cmd.psr_set_vtotal.header.type = DMUB_CMD__PSR; cmd.psr_set_vtotal.header.sub_type = DMUB_CMD__SET_SINK_VTOTAL_IN_PSR_ACTIVE; cmd.psr_set_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_vtotal_data); cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle; cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su; dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* * Set PSR power optimization flags. */ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; memset(&cmd, 0, sizeof(cmd)); cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR; cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT; cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data); cmd.psr_set_power_opt.psr_set_power_opt_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* * Setup PSR by programming phy registers and sending psr hw context values to firmware. */ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; struct dmub_cmd_psr_copy_settings_data *copy_settings_data = &cmd.psr_copy_settings.psr_copy_settings_data; struct pipe_ctx *pipe_ctx = NULL; struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; int i = 0; for (i = 0; i < MAX_PIPES; i++) { if (res_ctx->pipe_ctx[i].stream && res_ctx->pipe_ctx[i].stream->link == link && res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { pipe_ctx = &res_ctx->pipe_ctx[i]; //TODO: refactor for multi edp support break; } } if (!pipe_ctx) return false; // First, set the psr version if (!dmub_psr_set_version(dmub, pipe_ctx->stream, panel_inst)) return false; // Program DP DPHY fast training registers link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc, psr_context->psrExitLinkTrainingRequired); // Program DP_SEC_CNTL1 register to set transmission GPS0 line num and priority to high link->link_enc->funcs->psr_program_secondary_packet(link->link_enc, psr_context->sdpTransmitLineNumDeadline); memset(&cmd, 0, sizeof(cmd)); cmd.psr_copy_settings.header.type = DMUB_CMD__PSR; cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS; cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data); // Hw insts copy_settings_data->dpphy_inst = psr_context->transmitterId; copy_settings_data->aux_inst = psr_context->channel; copy_settings_data->digfe_inst = psr_context->engineId; copy_settings_data->digbe_inst = psr_context->transmitterId; copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst; if (pipe_ctx->plane_res.dpp) copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; else copy_settings_data->dpp_inst = 0; if (pipe_ctx->stream_res.opp) copy_settings_data->opp_inst = pipe_ctx->stream_res.opp->inst; else copy_settings_data->opp_inst = 0; if (pipe_ctx->stream_res.tg) copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst; else copy_settings_data->otg_inst = 0; // Misc copy_settings_data->use_phy_fsm = link->ctx->dc->debug.psr_power_use_phy_fsm; copy_settings_data->psr_level = psr_context->psr_level.u32all; copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations; copy_settings_data->multi_disp_optimizations_en = psr_context->allow_multi_disp_optimizations; copy_settings_data->frame_delay = psr_context->frame_delay; copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline; copy_settings_data->debug.u32All = 0; copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR; copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1; copy_settings_data->debug.bitfields.force_full_frame_update = 0; if (psr_context->su_granularity_required == 0) copy_settings_data->su_y_granularity = 0; else copy_settings_data->su_y_granularity = psr_context->su_y_granularity; copy_settings_data->line_capture_indication = 0; copy_settings_data->line_time_in_us = psr_context->line_time_in_us; copy_settings_data->rate_control_caps = psr_context->rate_control_caps; copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled); copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us; copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; copy_settings_data->panel_inst = panel_inst; copy_settings_data->dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); /** * WA for PSRSU+DSC on specific TCON, if DSC is enabled, force PSRSU as ffu mode(full frame update) * Note that PSRSU+DSC is still under development. */ if (copy_settings_data->dsc_enable_status && link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 && !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, sizeof(DP_SINK_DEVICE_STR_ID_1))) link->psr_settings.force_ffu_mode = 1; else link->psr_settings.force_ffu_mode = 0; copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode; if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && !link->dc->debug.disable_fec) && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && !link->panel_config.dsc.disable_dsc_edp && link->dc->caps.edp_dsc_support)) && link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 && (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, sizeof(DP_SINK_DEVICE_STR_ID_1)) || !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2, sizeof(DP_SINK_DEVICE_STR_ID_2)))) copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1; else copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0; //WA for PSR1 on specific TCON, require frame delay for frame re-lock copy_settings_data->relock_delay_frame_cnt = 0; if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) copy_settings_data->relock_delay_frame_cnt = 2; copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height; dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } /* * Send command to PSR to force static ENTER and ignore all state changes until exit */ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; memset(&cmd, 0, sizeof(cmd)); cmd.psr_force_static.psr_force_static_data.panel_inst = panel_inst; cmd.psr_force_static.psr_force_static_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_force_static.header.type = DMUB_CMD__PSR; cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC; cmd.psr_enable.header.payload_bytes = 0; dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* * Get PSR residency from firmware. */ static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst) { struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; uint16_t param = (uint16_t)(panel_inst << 8); /* Send gpint command and wait for ack */ dmub_srv_send_gpint_command(srv, DMUB_GPINT__PSR_RESIDENCY, param, 30); dmub_srv_get_gpint_response(srv, residency); } static const struct dmub_psr_funcs psr_funcs = { .psr_copy_settings = dmub_psr_copy_settings, .psr_enable = dmub_psr_enable, .psr_get_state = dmub_psr_get_state, .psr_set_level = dmub_psr_set_level, .psr_force_static = dmub_psr_force_static, .psr_get_residency = dmub_psr_get_residency, .psr_set_sink_vtotal_in_psr_active = dmub_psr_set_sink_vtotal_in_psr_active, .psr_set_power_opt = dmub_psr_set_power_opt, }; /* * Construct PSR object. */ static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx) { psr->ctx = ctx; psr->funcs = &psr_funcs; } /* * Allocate and initialize PSR object. */ struct dmub_psr *dmub_psr_create(struct dc_context *ctx) { struct dmub_psr *psr = kzalloc(sizeof(struct dmub_psr), GFP_KERNEL); if (psr == NULL) { BREAK_TO_DEBUGGER(); return NULL; } dmub_psr_construct(psr, ctx); return psr; } /* * Deallocate PSR object. */ void dmub_psr_destroy(struct dmub_psr **dmub) { kfree(*dmub); *dmub = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
/* * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dce_clk_mgr.h" #include "reg_helper.h" #include "dmcu.h" #include "core_types.h" #include "dal_asic_id.h" #define TO_DCE_CLK_MGR(clocks)\ container_of(clocks, struct dce_clk_mgr, base) #define REG(reg) \ (clk_mgr_dce->regs->reg) #undef FN #define FN(reg_name, field_name) \ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name #define CTX \ clk_mgr_dce->base.ctx #define DC_LOGGER \ clk_mgr->ctx->logger /* Max clock values for each state indexed by "enum clocks_state": */ static const struct state_dependent_clocks dce80_max_clks_by_state[] = { /* ClocksStateInvalid - should not be used */ { .display_clk_khz = 0, .pixel_clk_khz = 0 }, /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ { .display_clk_khz = 0, .pixel_clk_khz = 0 }, /* ClocksStateLow */ { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, /* ClocksStateNominal */ { .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, /* ClocksStatePerformance */ { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; static const struct state_dependent_clocks dce110_max_clks_by_state[] = { /*ClocksStateInvalid - should not be used*/ { .display_clk_khz = 0, .pixel_clk_khz = 0 }, /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, /*ClocksStateLow*/ { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, /*ClocksStateNominal*/ { .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, /*ClocksStatePerformance*/ { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; static const struct state_dependent_clocks dce112_max_clks_by_state[] = { /*ClocksStateInvalid - should not be used*/ { .display_clk_khz = 0, .pixel_clk_khz = 0 }, /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ { .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, /*ClocksStateLow*/ { .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, /*ClocksStateNominal*/ { .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, /*ClocksStatePerformance*/ { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; static const struct state_dependent_clocks dce120_max_clks_by_state[] = { /*ClocksStateInvalid - should not be used*/ { .display_clk_khz = 0, .pixel_clk_khz = 0 }, /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ { .display_clk_khz = 0, .pixel_clk_khz = 0 }, /*ClocksStateLow*/ { .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, /*ClocksStateNominal*/ { .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, /*ClocksStatePerformance*/ { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; int dentist_get_divider_from_did(int did) { if (did < DENTIST_BASE_DID_1) did = DENTIST_BASE_DID_1; if (did > DENTIST_MAX_DID) did = DENTIST_MAX_DID; if (did < DENTIST_BASE_DID_2) { return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP * (did - DENTIST_BASE_DID_1); } else if (did < DENTIST_BASE_DID_3) { return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP * (did - DENTIST_BASE_DID_2); } else if (did < DENTIST_BASE_DID_4) { return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP * (did - DENTIST_BASE_DID_3); } else { return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP * (did - DENTIST_BASE_DID_4); } } /* SW will adjust DP REF Clock average value for all purposes * (DP DTO / DP Audio DTO and DP GTC) if clock is spread for all cases: -if SS enabled on DP Ref clock and HW de-spreading enabled with SW calculations for DS_INCR/DS_MODULO (this is planned to be default case) -if SS enabled on DP Ref clock and HW de-spreading enabled with HW calculations (not planned to be used, but average clock should still be valid) -if SS enabled on DP Ref clock and HW de-spreading disabled (should not be case with CIK) then SW should program all rates generated according to average value (case as with previous ASICs) */ static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz) { if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) { struct fixed31_32 ss_percentage = dc_fixpt_div_int( dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage, clk_mgr_dce->dprefclk_ss_divider), 200); struct fixed31_32 adj_dp_ref_clk_khz; ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); } return dp_ref_clk_khz; } static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); int dprefclk_wdivider; int dprefclk_src_sel; int dp_ref_clk_khz = 600000; int target_div; /* ASSERT DP Reference Clock source is from DFS*/ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); ASSERT(dprefclk_src_sel == 0); /* Read the mmDENTIST_DISPCLK_CNTL to get the currently * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ target_div = dentist_get_divider_from_did(dprefclk_wdivider); /* Calculate the current DFS clock, in kHz.*/ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr_dce->dentist_vco_freq_khz) / target_div; return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz); } int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz); } /* unit: in_khz before mode set, get pixel clock from context. ASIC register * may not be programmed yet */ static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context) { uint32_t max_pix_clk = 0; int i; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream == NULL) continue; /* do not check under lay */ if (pipe_ctx->top_pipe) continue; if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk) max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS * logic for HBR3 still needs Nominal (0.8V) on VDDC rail */ if (dc_is_dp_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk) max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk; } return max_pix_clk; } static enum dm_pp_clocks_state dce_get_required_clocks_state( struct clk_mgr *clk_mgr, struct dc_state *context) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); int i; enum dm_pp_clocks_state low_req_clk; int max_pix_clk = get_max_pixel_clock_for_all_paths(context); /* Iterate from highest supported to lowest valid state, and update * lowest RequiredState with the lowest state that satisfies * all required clocks */ for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) if (context->bw_ctx.bw.dce.dispclk_khz > clk_mgr_dce->max_clks_by_state[i].display_clk_khz || max_pix_clk > clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz) break; low_req_clk = i + 1; if (low_req_clk > clk_mgr_dce->max_clks_state) { /* set max clock state for high phyclock, invalid on exceeding display clock */ if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz < context->bw_ctx.bw.dce.dispclk_khz) low_req_clk = DM_PP_CLOCKS_STATE_INVALID; else low_req_clk = clk_mgr_dce->max_clks_state; } return low_req_clk; } static int dce_set_clock( struct clk_mgr *clk_mgr, int requested_clk_khz) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; struct dc_bios *bp = clk_mgr->ctx->dc_bios; int actual_clock = requested_clk_khz; struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu; /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, clk_mgr_dce->dentist_vco_freq_khz / 64); /* Prepare to program display clock*/ pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; if (clk_mgr_dce->dfs_bypass_active) pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); if (clk_mgr_dce->dfs_bypass_active) { /* Cache the fixed display clock*/ clk_mgr_dce->dfs_bypass_disp_clk = pxl_clk_params.dfs_bypass_display_clock; actual_clock = pxl_clk_params.dfs_bypass_display_clock; } /* from power down, we need mark the clock state as ClocksStateNominal * from HWReset, so when resume we will call pplib voltage regulator.*/ if (requested_clk_khz == 0) clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); return actual_clock; } int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct bp_set_dce_clock_parameters dce_clk_params; struct dc_bios *bp = clk_mgr->ctx->dc_bios; struct dc *core_dc = clk_mgr->ctx->dc; struct dmcu *dmcu = core_dc->res_pool->dmcu; int actual_clock = requested_clk_khz; /* Prepare to program display clock*/ memset(&dce_clk_params, 0, sizeof(dce_clk_params)); /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, clk_mgr_dce->dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; bp->funcs->set_dce_clock(bp, &dce_clk_params); actual_clock = dce_clk_params.target_clock_frequency; /* from power down, we need mark the clock state as ClocksStateNominal * from HWReset, so when resume we will call pplib voltage regulator.*/ if (requested_clk_khz == 0) clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; /*Program DP ref Clock*/ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ dce_clk_params.target_clock_frequency = 0; dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; if (!((clk_mgr->ctx->asic_id.chip_family == FAMILY_AI) && ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))) dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = (dce_clk_params.pll_id == CLOCK_SOURCE_COMBO_DISPLAY_PLL0); else dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; bp->funcs->set_dce_clock(bp, &dce_clk_params); if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); } } clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; return actual_clock; } static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce) { struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug; struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; struct integrated_info info = { { { 0 } } }; struct dc_firmware_info fw_info = { { 0 } }; int i; if (bp->integrated_info) info = *bp->integrated_info; clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq; if (clk_mgr_dce->dentist_vco_freq_khz == 0) { bp->funcs->get_firmware_info(bp, &fw_info); clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; if (clk_mgr_dce->dentist_vco_freq_khz == 0) clk_mgr_dce->dentist_vco_freq_khz = 3600000; } /*update the maximum display clock for each power state*/ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; switch (i) { case 0: clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; break; case 1: clk_state = DM_PP_CLOCKS_STATE_LOW; break; case 2: clk_state = DM_PP_CLOCKS_STATE_NOMINAL; break; case 3: clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; break; default: clk_state = DM_PP_CLOCKS_STATE_INVALID; break; } /*Do not allow bad VBIOS/SBIOS to override with invalid values, * check for > 100MHz*/ if (info.disp_clk_voltage[i].max_supported_clk >= 100000) clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz = info.disp_clk_voltage[i].max_supported_clk; } if (!debug->disable_dfs_bypass && bp->integrated_info) if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) clk_mgr_dce->dfs_bypass_enabled = true; } void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) { struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; int ss_info_num = bp->funcs->get_ss_entry_number( bp, AS_SIGNAL_TYPE_GPU_PLL); if (ss_info_num) { struct spread_spectrum_info info = { { 0 } }; enum bp_result result = bp->funcs->get_spread_spectrum_info( bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS * even if SS not enabled and in that case * SSInfo.spreadSpectrumPercentage !=0 would be sign * that SS is enabled */ if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) { clk_mgr_dce->ss_on_dprefclk = true; clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; if (info.type.CENTER_MODE == 0) { /* TODO: Currently for DP Reference clock we * need only SS percentage for * downspread */ clk_mgr_dce->dprefclk_ss_percentage = info.spread_spectrum_percentage; } return; } result = bp->funcs->get_spread_spectrum_info( bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS * even if SS not enabled and in that case * SSInfo.spreadSpectrumPercentage !=0 would be sign * that SS is enabled */ if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) { clk_mgr_dce->ss_on_dprefclk = true; clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; if (info.type.CENTER_MODE == 0) { /* Currently for DP Reference clock we * need only SS percentage for * downspread */ clk_mgr_dce->dprefclk_ss_percentage = info.spread_spectrum_percentage; } if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss) clk_mgr_dce->dprefclk_ss_percentage = 0; } } } /** * dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info * @clk_mgr: clock manager base structure * * Reads from VBIOS the XGMI spread spectrum info and saves it within * the dce clock manager. This operation will overwrite the existing dprefclk * SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also * sets the ->xgmi_enabled flag. */ void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); enum bp_result result; struct spread_spectrum_info info = { { 0 } }; struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; clk_mgr_dce->xgmi_enabled = false; result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI, 0, &info); if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) { clk_mgr_dce->xgmi_enabled = true; clk_mgr_dce->ss_on_dprefclk = true; clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; if (info.type.CENTER_MODE == 0) { /* Currently for DP Reference clock we * need only SS percentage for * downspread */ clk_mgr_dce->dprefclk_ss_percentage = info.spread_spectrum_percentage; } } } void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg) { int j; int num_cfgs = 0; for (j = 0; j < context->stream_count; j++) { int k; const struct dc_stream_state *stream = context->streams[j]; struct dm_pp_single_disp_config *cfg = &pp_display_cfg->disp_configs[num_cfgs]; const struct pipe_ctx *pipe_ctx = NULL; for (k = 0; k < MAX_PIPES; k++) if (stream == context->res_ctx.pipe_ctx[k].stream) { pipe_ctx = &context->res_ctx.pipe_ctx[k]; break; } ASSERT(pipe_ctx != NULL); /* only notify active stream */ if (stream->dpms_off) continue; num_cfgs++; cfg->signal = pipe_ctx->stream->signal; cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; cfg->src_height = stream->src.height; cfg->src_width = stream->src.width; cfg->ddi_channel_mapping = stream->link->ddi_channel_mapping.raw; cfg->transmitter = stream->link->link_enc->transmitter; cfg->link_settings.lane_count = stream->link->cur_link_settings.lane_count; cfg->link_settings.link_rate = stream->link->cur_link_settings.link_rate; cfg->link_settings.link_spread = stream->link->cur_link_settings.link_spread; cfg->sym_clock = stream->phy_pix_clk; /* Round v_refresh*/ cfg->v_refresh = stream->timing.pix_clk_100hz * 100; cfg->v_refresh /= stream->timing.h_total; cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) / stream->timing.v_total; } pp_display_cfg->display_count = num_cfgs; } static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) { uint8_t j; uint32_t min_vertical_blank_time = -1; for (j = 0; j < context->stream_count; j++) { struct dc_stream_state *stream = context->streams[j]; uint32_t vertical_blank_in_pixels = 0; uint32_t vertical_blank_time = 0; vertical_blank_in_pixels = stream->timing.h_total * (stream->timing.v_total - stream->timing.v_addressable); vertical_blank_time = vertical_blank_in_pixels * 10000 / stream->timing.pix_clk_100hz; if (min_vertical_blank_time > vertical_blank_time) min_vertical_blank_time = vertical_blank_time; } return min_vertical_blank_time; } static int determine_sclk_from_bounding_box( const struct dc *dc, int required_sclk) { int i; /* * Some asics do not give us sclk levels, so we just report the actual * required sclk */ if (dc->sclk_lvls.num_levels == 0) return required_sclk; for (i = 0; i < dc->sclk_lvls.num_levels; i++) { if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) return dc->sclk_lvls.clocks_in_khz[i]; } /* * even maximum level could not satisfy requirement, this * is unexpected at this stage, should have been caught at * validation time */ ASSERT(0); return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; } static void dce_pplib_apply_display_requirements( struct dc *dc, struct dc_state *context) { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); } static void dce11_pplib_apply_display_requirements( struct dc *dc, struct dc_state *context) { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; pp_display_cfg->all_displays_in_sync = context->bw_ctx.bw.dce.all_displays_in_sync; pp_display_cfg->nb_pstate_switch_disable = context->bw_ctx.bw.dce.nbp_state_change_enable == false; pp_display_cfg->cpu_cc6_disable = context->bw_ctx.bw.dce.cpuc_state_change_enable == false; pp_display_cfg->cpu_pstate_disable = context->bw_ctx.bw.dce.cpup_state_change_enable == false; pp_display_cfg->cpu_pstate_separation_time = context->bw_ctx.bw.dce.blackout_recovery_time_us; pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz / MEMORY_TYPE_MULTIPLIER_CZ; pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( dc, context->bw_ctx.bw.dce.sclk_khz); /* * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. * This is not required for less than 5 displays, * thus don't request decfclk in dc to avoid impact * on power saving. * */ pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4) ? pp_display_cfg->min_engine_clock_khz : 0; pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); /* TODO: dce11.2*/ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; dce110_fill_display_configs(context, pp_display_cfg); /* TODO: is this still applicable?*/ if (pp_display_cfg->display_count == 1) { const struct dc_crtc_timing *timing = &context->streams[0]->timing; pp_display_cfg->crtc_index = pp_display_cfg->disp_configs[0].pipe_idx; pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; } if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); } static void dce_update_clocks(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) patched_disp_clk = patched_disp_clk * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; } if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { patched_disp_clk = dce_set_clock(clk_mgr, patched_disp_clk); clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); } static void dce11_update_clocks(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) patched_disp_clk = patched_disp_clk * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; } if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); } static void dce112_update_clocks(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) patched_disp_clk = patched_disp_clk * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; } if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { patched_disp_clk = dce112_set_clock(clk_mgr, patched_disp_clk); clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); } static void dce12_update_clocks(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; int max_pix_clk = get_max_pixel_clock_for_all_paths(context); int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) patched_disp_clk = patched_disp_clk * 115 / 100; if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; /* * When xGMI is enabled, the display clk needs to be adjusted * with the WAFL link's SS percentage. */ if (clk_mgr_dce->xgmi_enabled) patched_disp_clk = clk_mgr_adjust_dp_ref_freq_for_ss( clk_mgr_dce, patched_disp_clk); clock_voltage_req.clocks_in_khz = patched_disp_clk; clk_mgr->clks.dispclk_khz = dce112_set_clock(clk_mgr, patched_disp_clk); dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); } if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) { clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; clock_voltage_req.clocks_in_khz = max_pix_clk; clk_mgr->clks.phyclk_khz = max_pix_clk; dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); } static const struct clk_mgr_funcs dce120_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = dce12_update_clocks }; static const struct clk_mgr_funcs dce112_funcs = { .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, .update_clocks = dce112_update_clocks }; static const struct clk_mgr_funcs dce110_funcs = { .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, .update_clocks = dce11_update_clocks, }; static const struct clk_mgr_funcs dce_funcs = { .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, .update_clocks = dce_update_clocks }; static void dce_clk_mgr_construct( struct dce_clk_mgr *clk_mgr_dce, struct dc_context *ctx, const struct clk_mgr_registers *regs, const struct clk_mgr_shift *clk_shift, const struct clk_mgr_mask *clk_mask) { struct clk_mgr *base = &clk_mgr_dce->base; struct dm_pp_static_clock_info static_clk_info = {0}; base->ctx = ctx; base->funcs = &dce_funcs; clk_mgr_dce->regs = regs; clk_mgr_dce->clk_mgr_shift = clk_shift; clk_mgr_dce->clk_mgr_mask = clk_mask; clk_mgr_dce->dfs_bypass_disp_clk = 0; clk_mgr_dce->dprefclk_ss_percentage = 0; clk_mgr_dce->dprefclk_ss_divider = 1000; clk_mgr_dce->ss_on_dprefclk = false; if (dm_pp_get_static_clocks(ctx, &static_clk_info)) clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state; else clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; dce_clock_read_integrated_info(clk_mgr_dce); dce_clock_read_ss_info(clk_mgr_dce); } struct clk_mgr *dce_clk_mgr_create( struct dc_context *ctx, const struct clk_mgr_registers *regs, const struct clk_mgr_shift *clk_shift, const struct clk_mgr_mask *clk_mask) { struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); if (clk_mgr_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } memcpy(clk_mgr_dce->max_clks_by_state, dce80_max_clks_by_state, sizeof(dce80_max_clks_by_state)); dce_clk_mgr_construct( clk_mgr_dce, ctx, regs, clk_shift, clk_mask); return &clk_mgr_dce->base; } struct clk_mgr *dce110_clk_mgr_create( struct dc_context *ctx, const struct clk_mgr_registers *regs, const struct clk_mgr_shift *clk_shift, const struct clk_mgr_mask *clk_mask) { struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); if (clk_mgr_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } memcpy(clk_mgr_dce->max_clks_by_state, dce110_max_clks_by_state, sizeof(dce110_max_clks_by_state)); dce_clk_mgr_construct( clk_mgr_dce, ctx, regs, clk_shift, clk_mask); clk_mgr_dce->base.funcs = &dce110_funcs; return &clk_mgr_dce->base; } struct clk_mgr *dce112_clk_mgr_create( struct dc_context *ctx, const struct clk_mgr_registers *regs, const struct clk_mgr_shift *clk_shift, const struct clk_mgr_mask *clk_mask) { struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); if (clk_mgr_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } memcpy(clk_mgr_dce->max_clks_by_state, dce112_max_clks_by_state, sizeof(dce112_max_clks_by_state)); dce_clk_mgr_construct( clk_mgr_dce, ctx, regs, clk_shift, clk_mask); clk_mgr_dce->base.funcs = &dce112_funcs; return &clk_mgr_dce->base; } struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx) { struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); if (clk_mgr_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } memcpy(clk_mgr_dce->max_clks_by_state, dce120_max_clks_by_state, sizeof(dce120_max_clks_by_state)); dce_clk_mgr_construct( clk_mgr_dce, ctx, NULL, NULL, NULL); clk_mgr_dce->dprefclk_khz = 600000; clk_mgr_dce->base.funcs = &dce120_funcs; return &clk_mgr_dce->base; } struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx) { struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); if (clk_mgr_dce == NULL) { BREAK_TO_DEBUGGER(); return NULL; } memcpy(clk_mgr_dce->max_clks_by_state, dce120_max_clks_by_state, sizeof(dce120_max_clks_by_state)); dce_clk_mgr_construct(clk_mgr_dce, ctx, NULL, NULL, NULL); clk_mgr_dce->dprefclk_khz = 625000; clk_mgr_dce->base.funcs = &dce120_funcs; return &clk_mgr_dce->base; } void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); kfree(clk_mgr_dce); *clk_mgr = NULL; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dmub_abm.h" #include "dmub_abm_lcd.h" #include "dce_abm.h" #include "dc.h" #include "dc_dmub_srv.h" #include "dmub/dmub_srv.h" #include "core_types.h" #include "dm_services.h" #include "reg_helper.h" #include "fixed31_32.h" #ifdef _WIN32 #include "atombios.h" #else #include "atom.h" #endif #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) #define REG(reg) \ (dce_abm->regs->reg) #undef FN #define FN(reg_name, field_name) \ dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name #define CTX \ dce_abm->base.ctx #define DISABLE_ABM_IMMEDIATELY 255 static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) { union dmub_rb_cmd cmd; uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0; uint32_t edp_id_count = dc->dc_edp_id_count; int i; uint8_t panel_mask = 0; for (i = 0; i < edp_id_count; i++) panel_mask |= 0x01 << i; memset(&cmd, 0, sizeof(cmd)); cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM; cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC; cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm; cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_abm_init(struct abm *abm, uint32_t backlight) { struct dce_abm *dce_abm = TO_DMUB_ABM(abm); REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3); REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1); REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3); REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1); REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1); REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0, ABM1_HG_NUM_OF_BINS_SEL, 0, ABM1_HG_VMAX_SEL, 1, ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0); REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0, ABM1_IPCSC_COEFF_SEL_R, 2, ABM1_IPCSC_COEFF_SEL_G, 4, ABM1_IPCSC_COEFF_SEL_B, 2); REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL, BL1_PWM_CURRENT_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL, BL1_PWM_TARGET_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight); REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000); REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0, ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1, ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1, ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); dmub_abm_enable_fractional_pwm(abm->ctx); } unsigned int dmub_abm_get_current_backlight(struct abm *abm) { struct dce_abm *dce_abm = TO_DMUB_ABM(abm); unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); /* return backlight in hardware format which is unsigned 17 bits, with * 1 bit integer and 16 bit fractional */ return backlight; } unsigned int dmub_abm_get_target_backlight(struct abm *abm) { struct dce_abm *dce_abm = TO_DMUB_ABM(abm); unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); /* return backlight in hardware format which is unsigned 17 bits, with * 1 bit integer and 16 bit fractional */ return backlight; } bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask) { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; memset(&cmd, 0, sizeof(cmd)); cmd.abm_set_level.header.type = DMUB_CMD__ABM; cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL; cmd.abm_set_level.abm_set_level_data.level = level; cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } void dmub_abm_init_config(struct abm *abm, const char *src, unsigned int bytes, unsigned int inst) { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; uint8_t panel_mask = 0x01 << inst; // TODO: Optimize by only reading back final 4 bytes dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); // Copy iramtable into cw7 memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes); memset(&cmd, 0, sizeof(cmd)); // Fw will copy from cw7 to fw_state cmd.abm_init_config.header.type = DMUB_CMD__ABM; cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG; cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; cmd.abm_init_config.abm_init_config_data.bytes = bytes; cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask; cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; uint8_t panel_mask = 0x01 << panel_inst; memset(&cmd, 0, sizeof(cmd)); cmd.abm_pause.header.type = DMUB_CMD__ABM; cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE; cmd.abm_pause.abm_pause_data.enable = pause; cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } /***************************************************************************** * dmub_abm_save_restore() - dmub interface for abm save+pause and restore+ * un-pause * @dc: dc context * @panel_inst: panel instance index * @pData: contains command to pause/un-pause abm and exchange abm parameters * * When called Pause will get abm data and store in pData, and un-pause will * set/apply abm data stored in pData. * *****************************************************************************/ bool dmub_abm_save_restore( struct dc_context *dc, unsigned int panel_inst, struct abm_save_restore *pData) { union dmub_rb_cmd cmd; uint8_t panel_mask = 0x01 << panel_inst; unsigned int bytes = sizeof(struct abm_save_restore); // TODO: Optimize by only reading back final 4 bytes dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); // Copy iramtable into cw7 memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes); memset(&cmd, 0, sizeof(cmd)); cmd.abm_save_restore.header.type = DMUB_CMD__ABM; cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE; cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; cmd.abm_save_restore.abm_init_config_data.bytes = bytes; cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask; cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); // Copy iramtable data into local structure memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); return true; } bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; uint32_t ramping_boundary = 0xFFFF; memset(&cmd, 0, sizeof(cmd)); cmd.abm_set_pipe.header.type = DMUB_CMD__ABM; cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE; cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst; cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option; cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst; cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } bool dmub_abm_set_backlight_level(struct abm *abm, unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, unsigned int panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; memset(&cmd, 0, sizeof(cmd)); cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD */ #include "dc.h" #include "dc_dmub_srv.h" #include "dmub_outbox.h" #include "dmub/inc/dmub_cmd.h" /* * Function: dmub_enable_outbox_notification * * @brief * Sends inbox cmd to dmub for enabling outbox notifications to x86. * * @param * [in] dmub_srv: dmub_srv structure */ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv) { union dmub_rb_cmd cmd; memset(&cmd, 0x0, sizeof(cmd)); cmd.outbox1_enable.header.type = DMUB_CMD__OUTBOX1_ENABLE; cmd.outbox1_enable.header.sub_type = 0; cmd.outbox1_enable.header.payload_bytes = sizeof(cmd.outbox1_enable) - sizeof(cmd.outbox1_enable.header); cmd.outbox1_enable.enable = true; dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "dc_dmub_srv.h" #include "panel_cntl.h" #include "dce_panel_cntl.h" #include "atom.h" #define TO_DCE_PANEL_CNTL(panel_cntl)\ container_of(panel_cntl, struct dce_panel_cntl, base) #define CTX \ dce_panel_cntl->base.ctx #define DC_LOGGER \ dce_panel_cntl->base.ctx->logger #define REG(reg)\ dce_panel_cntl->regs->reg #undef FN #define FN(reg_name, field_name) \ dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) { uint64_t current_backlight; uint32_t bl_period, bl_int_count; uint32_t bl_pwm, fractional_duty_cycle_en; uint32_t bl_period_mask, bl_pwm_mask; struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); REG_READ(BL_PWM_PERIOD_CNTL); REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period); REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count); REG_READ(BL_PWM_CNTL); REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm)); REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en); if (bl_int_count == 0) bl_int_count = 16; bl_period_mask = (1 << bl_int_count) - 1; bl_period &= bl_period_mask; bl_pwm_mask = bl_period_mask << (16 - bl_int_count); if (fractional_duty_cycle_en == 0) bl_pwm &= bl_pwm_mask; else bl_pwm &= 0xFFFF; current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count); if (bl_period == 0) bl_period = 0xFFFF; current_backlight = div_u64(current_backlight, bl_period); current_backlight = (current_backlight + 1) >> 1; return (uint32_t)(current_backlight); } static uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl) { struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); uint32_t value; uint32_t current_backlight; /* It must not be 0, so we have to restore them * Bios bug w/a - period resets to zero, * restoring to cache values which is always correct */ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value); if (panel_cntl->stored_backlight_registers.BL_PWM_CNTL != 0) { REG_WRITE(BL_PWM_CNTL, panel_cntl->stored_backlight_registers.BL_PWM_CNTL); REG_WRITE(BL_PWM_CNTL2, panel_cntl->stored_backlight_registers.BL_PWM_CNTL2); REG_WRITE(BL_PWM_PERIOD_CNTL, panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL); REG_UPDATE(PWRSEQ_REF_DIV, BL_PWM_REF_DIV, panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); } else if ((value != 0) && (value != 1)) { panel_cntl->stored_backlight_registers.BL_PWM_CNTL = REG_READ(BL_PWM_CNTL); panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 = REG_READ(BL_PWM_CNTL2); panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = REG_READ(BL_PWM_PERIOD_CNTL); REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV, &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); } else { /* TODO: Note: This should not really happen since VBIOS * should have initialized PWM registers on boot. */ REG_WRITE(BL_PWM_CNTL, 0x8000FA00); REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0); } // Have driver take backlight control // TakeBacklightControl(true) value = REG_READ(BIOS_SCRATCH_2); value |= ATOM_S2_VRI_BRIGHT_ENABLE; REG_WRITE(BIOS_SCRATCH_2, value); // Enable the backlight output REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1); // Unlock group 2 backlight registers REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); current_backlight = dce_get_16_bit_backlight_from_pwm(panel_cntl); return current_backlight; } static bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl) { struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); uint32_t blon, blon_ovrd, pwrseq_target_state; REG_GET_2(PWRSEQ_CNTL, LVTMA_BLON, &blon, LVTMA_BLON_OVRD, &blon_ovrd); REG_GET(PWRSEQ_CNTL, LVTMA_PWRSEQ_TARGET_STATE, &pwrseq_target_state); if (blon_ovrd) return blon; else return pwrseq_target_state; } static bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl) { struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); uint32_t pwr_seq_state, dig_on, dig_on_ovrd; REG_GET(PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state); REG_GET_2(PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd); return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1); } static void dce_store_backlight_level(struct panel_cntl *panel_cntl) { struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); panel_cntl->stored_backlight_registers.BL_PWM_CNTL = REG_READ(BL_PWM_CNTL); panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 = REG_READ(BL_PWM_CNTL2); panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = REG_READ(BL_PWM_PERIOD_CNTL); REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV, &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV); } static void dce_driver_set_backlight(struct panel_cntl *panel_cntl, uint32_t backlight_pwm_u16_16) { uint32_t backlight_16bit; uint32_t masked_pwm_period; uint8_t bit_count; uint64_t active_duty_cycle; uint32_t pwm_period_bitcnt; struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); /* * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight * active duty cycle <= backlight period */ /* 1.1 Apply bitmask for backlight period value based on value of BITCNT */ REG_GET_2(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt, BL_PWM_PERIOD, &masked_pwm_period); if (pwm_period_bitcnt == 0) bit_count = 16; else bit_count = pwm_period_bitcnt; /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */ masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1); /* 1.2 Calculate integer active duty cycle required upper 16 bits * contain integer component, lower 16 bits contain fractional component * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24 */ active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period; /* 1.3 Calculate 16 bit active duty cycle from integer and fractional * components shift by bitCount then mask 16 bits and add rounding bit * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0 */ backlight_16bit = active_duty_cycle >> bit_count; backlight_16bit &= 0xFFFF; backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1; /* * 2. Program register with updated value */ /* 2.1 Lock group 2 backlight registers */ REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1, BL_PWM_GRP1_REG_LOCK, 1); // 2.2 Write new active duty cycle REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit); /* 2.3 Unlock group 2 backlight registers */ REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); /* 3 Wait for pending bit to be cleared */ REG_WAIT(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, 0, 1, 10000); } static void dce_panel_cntl_destroy(struct panel_cntl **panel_cntl) { struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(*panel_cntl); kfree(dce_panel_cntl); *panel_cntl = NULL; } static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = { .destroy = dce_panel_cntl_destroy, .hw_init = dce_panel_cntl_hw_init, .is_panel_backlight_on = dce_is_panel_backlight_on, .is_panel_powered_on = dce_is_panel_powered_on, .store_backlight_level = dce_store_backlight_level, .driver_set_backlight = dce_driver_set_backlight, .get_current_backlight = dce_get_16_bit_backlight_from_pwm, }; void dce_panel_cntl_construct( struct dce_panel_cntl *dce_panel_cntl, const struct panel_cntl_init_data *init_data, const struct dce_panel_cntl_registers *regs, const struct dce_panel_cntl_shift *shift, const struct dce_panel_cntl_mask *mask) { struct panel_cntl *base = &dce_panel_cntl->base; base->stored_backlight_registers.BL_PWM_CNTL = 0; base->stored_backlight_registers.BL_PWM_CNTL2 = 0; base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0; base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0; dce_panel_cntl->regs = regs; dce_panel_cntl->shift = shift; dce_panel_cntl->mask = mask; dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs; dce_panel_cntl->base.ctx = init_data->ctx; dce_panel_cntl->base.inst = init_data->inst; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/delay.h> #include "resource.h" #include "dce_i2c.h" #include "dce_i2c_hw.h" #include "reg_helper.h" #include "include/gpio_service_interface.h" #define CTX \ dce_i2c_hw->ctx #define REG(reg)\ dce_i2c_hw->regs->reg #undef FN #define FN(reg_name, field_name) \ dce_i2c_hw->shifts->field_name, dce_i2c_hw->masks->field_name static void execute_transaction( struct dce_i2c_hw *dce_i2c_hw) { REG_UPDATE_N(SETUP, 5, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0); REG_UPDATE_5(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, 0, DC_I2C_SW_STATUS_RESET, 0, DC_I2C_SEND_RESET, 0, DC_I2C_GO, 0, DC_I2C_TRANSACTION_COUNT, dce_i2c_hw->transaction_count - 1); /* start I2C transfer */ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1); /* all transactions were executed and HW buffer became empty * (even though it actually happens when status becomes DONE) */ dce_i2c_hw->transaction_count = 0; dce_i2c_hw->buffer_used_bytes = 0; } static enum i2c_channel_operation_result get_channel_status( struct dce_i2c_hw *dce_i2c_hw, uint8_t *returned_bytes) { uint32_t i2c_sw_status = 0; uint32_t value = REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) return I2C_CHANNEL_OPERATION_ENGINE_BUSY; else if (value & dce_i2c_hw->masks->DC_I2C_SW_STOPPED_ON_NACK) return I2C_CHANNEL_OPERATION_NO_RESPONSE; else if (value & dce_i2c_hw->masks->DC_I2C_SW_TIMEOUT) return I2C_CHANNEL_OPERATION_TIMEOUT; else if (value & dce_i2c_hw->masks->DC_I2C_SW_ABORTED) return I2C_CHANNEL_OPERATION_FAILED; else if (value & dce_i2c_hw->masks->DC_I2C_SW_DONE) return I2C_CHANNEL_OPERATION_SUCCEEDED; /* * this is the case when HW used for communication, I2C_SW_STATUS * could be zero */ return I2C_CHANNEL_OPERATION_SUCCEEDED; } static uint32_t get_hw_buffer_available_size( const struct dce_i2c_hw *dce_i2c_hw) { return dce_i2c_hw->buffer_size - dce_i2c_hw->buffer_used_bytes; } static void process_channel_reply( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *reply) { uint32_t length = reply->length; uint8_t *buffer = reply->data; REG_SET_3(DC_I2C_DATA, 0, DC_I2C_INDEX, dce_i2c_hw->buffer_used_write, DC_I2C_DATA_RW, 1, DC_I2C_INDEX_WRITE, 1); while (length) { /* after reading the status, * if the I2C operation executed successfully * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller * should read data bytes from I2C circular data buffer */ uint32_t i2c_data; REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data); *buffer++ = i2c_data; --length; } } static bool is_engine_available(struct dce_i2c_hw *dce_i2c_hw) { unsigned int arbitrate; unsigned int i2c_hw_status; REG_GET(HW_STATUS, DC_I2C_DDC1_HW_STATUS, &i2c_hw_status); if (i2c_hw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW) return false; REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); if (arbitrate == DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY) return false; return true; } static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw) { uint32_t i2c_sw_status = 0; REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE) return false; if (is_engine_available(dce_i2c_hw)) return false; return true; } static bool process_transaction( struct dce_i2c_hw *dce_i2c_hw, struct i2c_request_transaction_data *request) { uint32_t length = request->length; uint8_t *buffer = request->data; bool last_transaction = false; uint32_t value = 0; if (is_hw_busy(dce_i2c_hw)) { request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY; return false; } last_transaction = ((dce_i2c_hw->transaction_count == 3) || (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)); switch (dce_i2c_hw->transaction_count) { case 0: REG_UPDATE_5(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, 1, DC_I2C_START0, 1, DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), DC_I2C_COUNT0, length, DC_I2C_STOP0, last_transaction ? 1 : 0); break; case 1: REG_UPDATE_5(DC_I2C_TRANSACTION1, DC_I2C_STOP_ON_NACK0, 1, DC_I2C_START0, 1, DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), DC_I2C_COUNT0, length, DC_I2C_STOP0, last_transaction ? 1 : 0); break; case 2: REG_UPDATE_5(DC_I2C_TRANSACTION2, DC_I2C_STOP_ON_NACK0, 1, DC_I2C_START0, 1, DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), DC_I2C_COUNT0, length, DC_I2C_STOP0, last_transaction ? 1 : 0); break; case 3: REG_UPDATE_5(DC_I2C_TRANSACTION3, DC_I2C_STOP_ON_NACK0, 1, DC_I2C_START0, 1, DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), DC_I2C_COUNT0, length, DC_I2C_STOP0, last_transaction ? 1 : 0); break; default: /* TODO Warning ? */ break; } /* Write the I2C address and I2C data * into the hardware circular buffer, one byte per entry. * As an example, the 7-bit I2C slave address for CRT monitor * for reading DDC/EDID information is 0b1010001. * For an I2C send operation, the LSB must be programmed to 0; * for I2C receive operation, the LSB must be programmed to 1. */ if (dce_i2c_hw->transaction_count == 0) { value = REG_SET_4(DC_I2C_DATA, 0, DC_I2C_DATA_RW, false, DC_I2C_DATA, request->address, DC_I2C_INDEX, 0, DC_I2C_INDEX_WRITE, 1); dce_i2c_hw->buffer_used_write = 0; } else value = REG_SET_2(DC_I2C_DATA, 0, DC_I2C_DATA_RW, false, DC_I2C_DATA, request->address); dce_i2c_hw->buffer_used_write++; if (!(request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) { while (length) { REG_SET_2(DC_I2C_DATA, value, DC_I2C_INDEX_WRITE, 0, DC_I2C_DATA, *buffer++); dce_i2c_hw->buffer_used_write++; --length; } } ++dce_i2c_hw->transaction_count; dce_i2c_hw->buffer_used_bytes += length + 1; return last_transaction; } static inline void reset_hw_engine(struct dce_i2c_hw *dce_i2c_hw) { REG_UPDATE_2(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1, DC_I2C_SW_STATUS_RESET, 1); } static void set_speed( struct dce_i2c_hw *dce_i2c_hw, uint32_t speed) { uint32_t xtal_ref_div = 0, ref_base_div = 0; uint32_t prescale = 0; uint32_t i2c_ref_clock = 0; if (speed == 0) return; REG_GET_2(MICROSECOND_TIME_BASE_DIV, MICROSECOND_TIME_BASE_DIV, &ref_base_div, XTAL_REF_DIV, &xtal_ref_div); if (xtal_ref_div == 0) xtal_ref_div = 2; if (ref_base_div == 0) i2c_ref_clock = (dce_i2c_hw->reference_frequency * 2); else i2c_ref_clock = ref_base_div * 1000; prescale = (i2c_ref_clock / xtal_ref_div) / speed; if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) REG_UPDATE_N(SPEED, 3, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); else REG_UPDATE_N(SPEED, 2, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; uint32_t reset_length = 0; if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) { if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL) { REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0); REG_WAIT(DIO_MEM_PWR_STATUS, I2C_MEM_PWR_STATE, 0, 0, 5); } } /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); if (dce_i2c_hw->setup_limit != 0) i2c_setup_limit = dce_i2c_hw->setup_limit; /* Program pin select */ REG_UPDATE_6(DC_I2C_CONTROL, DC_I2C_GO, 0, DC_I2C_SOFT_RESET, 0, DC_I2C_SEND_RESET, 0, DC_I2C_SW_STATUS_RESET, 1, DC_I2C_TRANSACTION_COUNT, 0, DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id); /* Program time limit */ if (dce_i2c_hw->send_reset_length == 0) { /*pre-dcn*/ REG_UPDATE_N(SETUP, 2, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); } else { reset_length = dce_i2c_hw->send_reset_length; REG_UPDATE_N(SETUP, 3, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); } /* Program HW priority * set to High - interrupt software I2C at any time * Enable restart of SW I2C that was interrupted by HW * disable queuing of software while I2C is in use by HW */ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, 0); return true; } static void release_engine( struct dce_i2c_hw *dce_i2c_hw) { bool safe_to_reset; /* Reset HW engine */ { uint32_t i2c_sw_status = 0; REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); /* if used by SW, safe to reset */ safe_to_reset = (i2c_sw_status == 1); } if (safe_to_reset) REG_UPDATE_2(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, 1, DC_I2C_SW_STATUS_RESET, 1); else REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1); /* HW I2c engine - clock gating feature */ if (!dce_i2c_hw->engine_keep_power_up_count) REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0); /*for HW HDCP Ri polling failure w/a test*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp); /* Release I2C after reset, so HW or DMCU could use it */ REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1, DC_I2C_SW_USE_I2C_REG_REQ, 0); if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) { if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL) REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); } } struct dce_i2c_hw *acquire_i2c_hw_engine( struct resource_pool *pool, struct ddc *ddc) { uint32_t counter = 0; enum gpio_result result; struct dce_i2c_hw *dce_i2c_hw = NULL; if (!ddc) return NULL; if (ddc->hw_info.hw_supported) { enum gpio_ddc_line line = dal_ddc_get_line(ddc); if (line < pool->res_cap->num_ddc) dce_i2c_hw = pool->hw_i2cs[line]; } if (!dce_i2c_hw) return NULL; if (pool->i2c_hw_buffer_in_use || !is_engine_available(dce_i2c_hw)) return NULL; do { result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, GPIO_DDC_CONFIG_TYPE_MODE_I2C); if (result == GPIO_RESULT_OK) break; /* i2c_engine is busy by VBios, lets wait and retry */ udelay(10); ++counter; } while (counter < 2); if (result != GPIO_RESULT_OK) return NULL; dce_i2c_hw->ddc = ddc; if (!setup_engine(dce_i2c_hw)) { release_engine(dce_i2c_hw); return NULL; } pool->i2c_hw_buffer_in_use = true; return dce_i2c_hw; } static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw, uint32_t timeout, enum i2c_channel_operation_result expected_result) { enum i2c_channel_operation_result result; uint32_t i = 0; if (!timeout) return I2C_CHANNEL_OPERATION_SUCCEEDED; do { result = get_channel_status( dce_i2c_hw, NULL); if (result != expected_result) break; udelay(1); ++i; } while (i < timeout); return result; } static void submit_channel_request_hw( struct dce_i2c_hw *dce_i2c_hw, struct i2c_request_transaction_data *request) { request->status = I2C_CHANNEL_OPERATION_SUCCEEDED; if (!process_transaction(dce_i2c_hw, request)) return; if (is_hw_busy(dce_i2c_hw)) { request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY; return; } reset_hw_engine(dce_i2c_hw); execute_transaction(dce_i2c_hw); } static uint32_t get_transaction_timeout_hw( const struct dce_i2c_hw *dce_i2c_hw, uint32_t length, uint32_t speed) { uint32_t period_timeout; uint32_t num_of_clock_stretches; if (!speed) return 0; period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed; num_of_clock_stretches = 1 + (length << 3) + 1; num_of_clock_stretches += (dce_i2c_hw->buffer_used_bytes << 3) + (dce_i2c_hw->transaction_count << 1); return period_timeout * num_of_clock_stretches; } static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *payload, bool middle_of_transaction, uint32_t speed) { struct i2c_request_transaction_data request; uint32_t transaction_timeout; enum i2c_channel_operation_result operation_result; bool result = false; /* We need following: * transaction length will not exceed * the number of free bytes in HW buffer (minus one for address) */ if (payload->length >= get_hw_buffer_available_size(dce_i2c_hw)) { return false; } if (!payload->write) request.action = middle_of_transaction ? DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_READ; else request.action = middle_of_transaction ? DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_WRITE; request.address = (uint8_t) ((payload->address << 1) | !payload->write); request.length = payload->length; request.data = payload->data; /* obtain timeout value before submitting request */ transaction_timeout = get_transaction_timeout_hw( dce_i2c_hw, payload->length + 1, speed); submit_channel_request_hw( dce_i2c_hw, &request); if ((request.status == I2C_CHANNEL_OPERATION_FAILED) || (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) return false; /* wait until transaction proceed */ operation_result = dce_i2c_hw_engine_wait_on_operation_result( dce_i2c_hw, transaction_timeout, I2C_CHANNEL_OPERATION_ENGINE_BUSY); /* update transaction status */ if (operation_result == I2C_CHANNEL_OPERATION_SUCCEEDED) result = true; if (result && (!payload->write)) process_channel_reply(dce_i2c_hw, payload); return result; } bool dce_i2c_submit_command_hw( struct resource_pool *pool, struct ddc *ddc, struct i2c_command *cmd, struct dce_i2c_hw *dce_i2c_hw) { uint8_t index_of_payload = 0; bool result; set_speed(dce_i2c_hw, cmd->speed); result = true; while (index_of_payload < cmd->number_of_payloads) { bool mot = (index_of_payload != cmd->number_of_payloads - 1); struct i2c_payload *payload = cmd->payloads + index_of_payload; if (!dce_i2c_hw_engine_submit_payload( dce_i2c_hw, payload, mot, cmd->speed)) { result = false; break; } ++index_of_payload; } pool->i2c_hw_buffer_in_use = false; release_engine(dce_i2c_hw); dal_ddc_close(dce_i2c_hw->ddc); dce_i2c_hw->ddc = NULL; return result; } void dce_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, uint32_t engine_id, const struct dce_i2c_registers *regs, const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { dce_i2c_hw->ctx = ctx; dce_i2c_hw->engine_id = engine_id; dce_i2c_hw->reference_frequency = (ctx->dc_bios->fw_info.pll_info.crystal_frequency) >> 1; dce_i2c_hw->regs = regs; dce_i2c_hw->shifts = shifts; dce_i2c_hw->masks = masks; dce_i2c_hw->buffer_used_bytes = 0; dce_i2c_hw->transaction_count = 0; dce_i2c_hw->engine_keep_power_up_count = 1; dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->send_reset_length = 0; dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE; dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE; } void dce100_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, uint32_t engine_id, const struct dce_i2c_registers *regs, const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { dce_i2c_hw_construct(dce_i2c_hw, ctx, engine_id, regs, shifts, masks); dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100; } void dce112_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, uint32_t engine_id, const struct dce_i2c_registers *regs, const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { dce100_i2c_hw_construct(dce_i2c_hw, ctx, engine_id, regs, shifts, masks); dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED_100KHZ; } void dcn1_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, uint32_t engine_id, const struct dce_i2c_registers *regs, const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { dce112_i2c_hw_construct(dce_i2c_hw, ctx, engine_id, regs, shifts, masks); dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN; } void dcn2_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, uint32_t engine_id, const struct dce_i2c_registers *regs, const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { dcn1_i2c_hw_construct(dce_i2c_hw, ctx, engine_id, regs, shifts, masks); dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_9; if (ctx->dc->debug.scl_reset_length10) dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce_mem_input.h" #include "reg_helper.h" #include "basics/conversion.h" #define CTX \ dce_mi->base.ctx #define REG(reg)\ dce_mi->regs->reg #undef FN #define FN(reg_name, field_name) \ dce_mi->shifts->field_name, dce_mi->masks->field_name struct pte_setting { unsigned int bpp; unsigned int page_width; unsigned int page_height; unsigned char min_pte_before_flip_horiz_scan; unsigned char min_pte_before_flip_vert_scan; unsigned char pte_req_per_chunk; unsigned char param_6; unsigned char param_7; unsigned char param_8; }; enum mi_bits_per_pixel { mi_bpp_8 = 0, mi_bpp_16, mi_bpp_32, mi_bpp_64, mi_bpp_count, }; enum mi_tiling_format { mi_tiling_linear = 0, mi_tiling_1D, mi_tiling_2D, mi_tiling_count, }; static const struct pte_setting pte_settings[mi_tiling_count][mi_bpp_count] = { [mi_tiling_linear] = { { 8, 4096, 1, 8, 0, 1, 0, 0, 0}, { 16, 2048, 1, 8, 0, 1, 0, 0, 0}, { 32, 1024, 1, 8, 0, 1, 0, 0, 0}, { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */ }, [mi_tiling_1D] = { { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */ { 16, 256, 8, 2, 0, 1, 0, 0, 0}, { 32, 128, 8, 4, 0, 1, 0, 0, 0}, { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */ }, [mi_tiling_2D] = { { 8, 64, 64, 8, 8, 1, 4, 0, 0}, { 16, 64, 32, 8, 16, 1, 8, 0, 0}, { 32, 32, 32, 16, 16, 1, 8, 0, 0}, { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */ }, }; static enum mi_bits_per_pixel get_mi_bpp( enum surface_pixel_format format) { if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616) return mi_bpp_64; else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) return mi_bpp_32; else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB1555) return mi_bpp_16; else return mi_bpp_8; } static enum mi_tiling_format get_mi_tiling( union dc_tiling_info *tiling_info) { switch (tiling_info->gfx8.array_mode) { case DC_ARRAY_1D_TILED_THIN1: case DC_ARRAY_1D_TILED_THICK: case DC_ARRAY_PRT_TILED_THIN1: return mi_tiling_1D; case DC_ARRAY_2D_TILED_THIN1: case DC_ARRAY_2D_TILED_THICK: case DC_ARRAY_2D_TILED_X_THICK: case DC_ARRAY_PRT_2D_TILED_THIN1: case DC_ARRAY_PRT_2D_TILED_THICK: return mi_tiling_2D; case DC_ARRAY_LINEAR_GENERAL: case DC_ARRAY_LINEAR_ALLIGNED: return mi_tiling_linear; default: return mi_tiling_2D; } } static bool is_vert_scan(enum dc_rotation_angle rotation) { switch (rotation) { case ROTATION_ANGLE_90: case ROTATION_ANGLE_270: return true; default: return false; } } static void dce_mi_program_pte_vm( struct mem_input *mi, enum surface_pixel_format format, union dc_tiling_info *tiling_info, enum dc_rotation_angle rotation) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); enum mi_bits_per_pixel mi_bpp = get_mi_bpp(format); enum mi_tiling_format mi_tiling = get_mi_tiling(tiling_info); const struct pte_setting *pte = &pte_settings[mi_tiling][mi_bpp]; unsigned int page_width = log_2(pte->page_width); unsigned int page_height = log_2(pte->page_height); unsigned int min_pte_before_flip = is_vert_scan(rotation) ? pte->min_pte_before_flip_vert_scan : pte->min_pte_before_flip_horiz_scan; REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0x7f); REG_UPDATE_3(DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH, page_width, DVMM_PAGE_HEIGHT, page_height, DVMM_MIN_PTE_BEFORE_FLIP, min_pte_before_flip); REG_UPDATE_2(DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk, DVMM_MAX_PTE_REQ_OUTSTANDING, 0x7f); } static void program_urgency_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t urgency_low_wm, uint32_t urgency_high_wm) { REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, wm_select); REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0, URGENCY_LOW_WATERMARK, urgency_low_wm, URGENCY_HIGH_WATERMARK, urgency_high_wm); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_program_urgency_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t urgency_low_wm, uint32_t urgency_high_wm) { REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL3, URGENCY_WATERMARK_MASK, wm_select); REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0, URGENCY_LOW_WATERMARK, urgency_low_wm, URGENCY_HIGH_WATERMARK, urgency_high_wm); } #endif static void dce120_program_urgency_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t urgency_low_wm, uint32_t urgency_high_wm) { REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, wm_select); REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0, URGENCY_LOW_WATERMARK, urgency_low_wm, URGENCY_HIGH_WATERMARK, urgency_high_wm); REG_SET_2(DPG_PIPE_URGENT_LEVEL_CONTROL, 0, URGENT_LEVEL_LOW_WATERMARK, urgency_low_wm, URGENT_LEVEL_HIGH_WATERMARK, urgency_high_wm); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_program_nbp_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t nbp_wm) { REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select); REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, 1, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1); REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, nbp_wm); } #endif static void program_nbp_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t nbp_wm) { if (REG(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL)) { REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select); REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, 1, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1); REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, nbp_wm); } if (REG(DPG_PIPE_LOW_POWER_CONTROL)) { REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, wm_select); REG_UPDATE_3(DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, 1, PSTATE_CHANGE_URGENT_DURING_REQUEST, 1, PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1); REG_UPDATE(DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_WATERMARK, nbp_wm); } } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_program_stutter_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t stutter_mark) { REG_UPDATE(DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select); REG_UPDATE(DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark); } #endif static void dce120_program_stutter_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t stutter_mark, uint32_t stutter_entry) { REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select); if (REG(DPG_PIPE_STUTTER_CONTROL2)) REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark, STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry); else REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark, STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry); } static void program_stutter_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t stutter_mark) { REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select); if (REG(DPG_PIPE_STUTTER_CONTROL2)) REG_UPDATE(DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark); else REG_UPDATE(DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark); } static void dce_mi_program_display_marks( struct mem_input *mi, struct dce_watermarks nbp, struct dce_watermarks stutter_exit, struct dce_watermarks stutter_enter, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; program_urgency_watermark(dce_mi, 2, /* set a */ urgent.a_mark, total_dest_line_time_ns); program_urgency_watermark(dce_mi, 1, /* set d */ urgent.d_mark, total_dest_line_time_ns); REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, stutter_en, STUTTER_IGNORE_FBC, 1); program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */ program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */ program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark); /* set a */ program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */ } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_mi_program_display_marks( struct mem_input *mi, struct dce_watermarks nbp, struct dce_watermarks stutter_exit, struct dce_watermarks stutter_enter, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; dce60_program_urgency_watermark(dce_mi, 2, /* set a */ urgent.a_mark, total_dest_line_time_ns); dce60_program_urgency_watermark(dce_mi, 1, /* set d */ urgent.d_mark, total_dest_line_time_ns); REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, stutter_en, STUTTER_IGNORE_FBC, 1); dce60_program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */ dce60_program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */ dce60_program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark); /* set a */ dce60_program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */ } #endif static void dce112_mi_program_display_marks(struct mem_input *mi, struct dce_watermarks nbp, struct dce_watermarks stutter_exit, struct dce_watermarks stutter_entry, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; program_urgency_watermark(dce_mi, 0, /* set a */ urgent.a_mark, total_dest_line_time_ns); program_urgency_watermark(dce_mi, 1, /* set b */ urgent.b_mark, total_dest_line_time_ns); program_urgency_watermark(dce_mi, 2, /* set c */ urgent.c_mark, total_dest_line_time_ns); program_urgency_watermark(dce_mi, 3, /* set d */ urgent.d_mark, total_dest_line_time_ns); REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, stutter_en, STUTTER_IGNORE_FBC, 1); program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */ program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */ program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */ program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */ program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark); /* set a */ program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark); /* set b */ program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark); /* set c */ program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark); /* set d */ } static void dce120_mi_program_display_marks(struct mem_input *mi, struct dce_watermarks nbp, struct dce_watermarks stutter_exit, struct dce_watermarks stutter_entry, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; dce120_program_urgency_watermark(dce_mi, 0, /* set a */ urgent.a_mark, total_dest_line_time_ns); dce120_program_urgency_watermark(dce_mi, 1, /* set b */ urgent.b_mark, total_dest_line_time_ns); dce120_program_urgency_watermark(dce_mi, 2, /* set c */ urgent.c_mark, total_dest_line_time_ns); dce120_program_urgency_watermark(dce_mi, 3, /* set d */ urgent.d_mark, total_dest_line_time_ns); REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, stutter_en, STUTTER_IGNORE_FBC, 1); program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */ program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */ program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */ program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */ dce120_program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */ dce120_program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */ dce120_program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */ dce120_program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */ } static void program_tiling( struct dce_mem_input *dce_mi, const union dc_tiling_info *info) { if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */ REG_UPDATE_6(GRPH_CONTROL, GRPH_SW_MODE, info->gfx9.swizzle, GRPH_NUM_BANKS, log_2(info->gfx9.num_banks), GRPH_NUM_SHADER_ENGINES, log_2(info->gfx9.num_shader_engines), GRPH_NUM_PIPES, log_2(info->gfx9.num_pipes), GRPH_COLOR_EXPANSION_MODE, 1, GRPH_SE_ENABLE, info->gfx9.shaderEnable); /* TODO: DCP0_GRPH_CONTROL__GRPH_SE_ENABLE where to get info GRPH_SE_ENABLE, 1, GRPH_Z, 0); */ } if (dce_mi->masks->GRPH_MICRO_TILE_MODE) { /* GFX8 */ REG_UPDATE_9(GRPH_CONTROL, GRPH_NUM_BANKS, info->gfx8.num_banks, GRPH_BANK_WIDTH, info->gfx8.bank_width, GRPH_BANK_HEIGHT, info->gfx8.bank_height, GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect, GRPH_TILE_SPLIT, info->gfx8.tile_split, GRPH_MICRO_TILE_MODE, info->gfx8.tile_mode, GRPH_PIPE_CONFIG, info->gfx8.pipe_config, GRPH_ARRAY_MODE, info->gfx8.array_mode, GRPH_COLOR_EXPANSION_MODE, 1); /* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */ /* GRPH_Z, 0); */ } if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX6 but reuses gfx8 struct */ REG_UPDATE_8(GRPH_CONTROL, GRPH_NUM_BANKS, info->gfx8.num_banks, GRPH_BANK_WIDTH, info->gfx8.bank_width, GRPH_BANK_HEIGHT, info->gfx8.bank_height, GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect, GRPH_TILE_SPLIT, info->gfx8.tile_split, /* DCE6 has no GRPH_MICRO_TILE_MODE mask */ GRPH_PIPE_CONFIG, info->gfx8.pipe_config, GRPH_ARRAY_MODE, info->gfx8.array_mode, GRPH_COLOR_EXPANSION_MODE, 1); /* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */ /* GRPH_Z, 0); */ } } static void program_size_and_rotation( struct dce_mem_input *dce_mi, enum dc_rotation_angle rotation, const struct plane_size *plane_size) { const struct rect *in_rect = &plane_size->surface_size; struct rect hw_rect = plane_size->surface_size; const uint32_t rotation_angles[ROTATION_ANGLE_COUNT] = { [ROTATION_ANGLE_0] = 0, [ROTATION_ANGLE_90] = 1, [ROTATION_ANGLE_180] = 2, [ROTATION_ANGLE_270] = 3, }; if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) { hw_rect.x = in_rect->y; hw_rect.y = in_rect->x; hw_rect.height = in_rect->width; hw_rect.width = in_rect->height; } REG_SET(GRPH_X_START, 0, GRPH_X_START, hw_rect.x); REG_SET(GRPH_Y_START, 0, GRPH_Y_START, hw_rect.y); REG_SET(GRPH_X_END, 0, GRPH_X_END, hw_rect.width); REG_SET(GRPH_Y_END, 0, GRPH_Y_END, hw_rect.height); REG_SET(GRPH_PITCH, 0, GRPH_PITCH, plane_size->surface_pitch); REG_SET(HW_ROTATION, 0, GRPH_ROTATION_ANGLE, rotation_angles[rotation]); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_program_size( struct dce_mem_input *dce_mi, enum dc_rotation_angle rotation, /* not used in DCE6 */ const struct plane_size *plane_size) { struct rect hw_rect = plane_size->surface_size; /* DCE6 has no HW rotation, skip rotation_angles declaration */ /* DCE6 has no HW rotation, skip ROTATION_ANGLE_* processing */ REG_SET(GRPH_X_START, 0, GRPH_X_START, hw_rect.x); REG_SET(GRPH_Y_START, 0, GRPH_Y_START, hw_rect.y); REG_SET(GRPH_X_END, 0, GRPH_X_END, hw_rect.width); REG_SET(GRPH_Y_END, 0, GRPH_Y_END, hw_rect.height); REG_SET(GRPH_PITCH, 0, GRPH_PITCH, plane_size->surface_pitch); /* DCE6 has no HW_ROTATION register, skip setting rotation_angles */ } #endif static void program_grph_pixel_format( struct dce_mem_input *dce_mi, enum surface_pixel_format format) { uint32_t red_xbar = 0, blue_xbar = 0; /* no swap */ uint32_t grph_depth = 0, grph_format = 0; uint32_t sign = 0, floating = 0; if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 || /*todo: doesn't look like we handle BGRA here, * should problem swap endian*/ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) { /* ABGR formats */ red_xbar = 2; blue_xbar = 2; } REG_SET_2(GRPH_SWAP_CNTL, 0, GRPH_RED_CROSSBAR, red_xbar, GRPH_BLUE_CROSSBAR, blue_xbar); switch (format) { case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: grph_depth = 0; grph_format = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: grph_depth = 1; grph_format = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB565: grph_depth = 1; grph_format = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: grph_depth = 2; grph_format = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: grph_depth = 2; grph_format = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: sign = 1; floating = 1; fallthrough; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: grph_depth = 3; grph_format = 0; break; default: DC_ERR("unsupported grph pixel format"); break; } REG_UPDATE_2(GRPH_CONTROL, GRPH_DEPTH, grph_depth, GRPH_FORMAT, grph_format); REG_UPDATE_4(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_SELECT, floating, GRPH_PRESCALE_R_SIGN, sign, GRPH_PRESCALE_G_SIGN, sign, GRPH_PRESCALE_B_SIGN, sign); } static void dce_mi_program_surface_config( struct mem_input *mi, enum surface_pixel_format format, union dc_tiling_info *tiling_info, struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizontal_mirror) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1); program_tiling(dce_mi, tiling_info); program_size_and_rotation(dce_mi, rotation, plane_size); if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN && format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) program_grph_pixel_format(dce_mi, format); } #if defined(CONFIG_DRM_AMD_DC_SI) static void dce60_mi_program_surface_config( struct mem_input *mi, enum surface_pixel_format format, union dc_tiling_info *tiling_info, struct plane_size *plane_size, enum dc_rotation_angle rotation, /* not used in DCE6 */ struct dc_plane_dcc_param *dcc, bool horizontal_mirror) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1); program_tiling(dce_mi, tiling_info); dce60_program_size(dce_mi, rotation, plane_size); if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN && format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) program_grph_pixel_format(dce_mi, format); } #endif static uint32_t get_dmif_switch_time_us( uint32_t h_total, uint32_t v_total, uint32_t pix_clk_khz) { uint32_t frame_time; uint32_t pixels_per_second; uint32_t pixels_per_frame; uint32_t refresh_rate; const uint32_t us_in_sec = 1000000; const uint32_t min_single_frame_time_us = 30000; /*return double of frame time*/ const uint32_t single_frame_time_multiplier = 2; if (!h_total || v_total || !pix_clk_khz) return single_frame_time_multiplier * min_single_frame_time_us; /*TODO: should we use pixel format normalized pixel clock here?*/ pixels_per_second = pix_clk_khz * 1000; pixels_per_frame = h_total * v_total; if (!pixels_per_second || !pixels_per_frame) { /* avoid division by zero */ ASSERT(pixels_per_frame); ASSERT(pixels_per_second); return single_frame_time_multiplier * min_single_frame_time_us; } refresh_rate = pixels_per_second / pixels_per_frame; if (!refresh_rate) { /* avoid division by zero*/ ASSERT(refresh_rate); return single_frame_time_multiplier * min_single_frame_time_us; } frame_time = us_in_sec / refresh_rate; if (frame_time < min_single_frame_time_us) frame_time = min_single_frame_time_us; frame_time *= single_frame_time_multiplier; return frame_time; } static void dce_mi_allocate_dmif( struct mem_input *mi, uint32_t h_total, uint32_t v_total, uint32_t pix_clk_khz, uint32_t total_stream_num) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); const uint32_t retry_delay = 10; uint32_t retry_count = get_dmif_switch_time_us( h_total, v_total, pix_clk_khz) / retry_delay; uint32_t pix_dur; uint32_t buffers_allocated; uint32_t dmif_buffer_control; dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, &buffers_allocated); if (buffers_allocated == 2) return; REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control, DMIF_BUFFERS_ALLOCATED, 2); REG_WAIT(DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, 1, retry_delay, retry_count); if (pix_clk_khz != 0) { pix_dur = 1000000000ULL / pix_clk_khz; REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, pix_dur); } if (dce_mi->wa.single_head_rdreq_dmif_limit) { uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, ENABLE, enable); } } static void dce_mi_free_dmif( struct mem_input *mi, uint32_t total_stream_num) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); uint32_t buffers_allocated; uint32_t dmif_buffer_control; dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, &buffers_allocated); if (buffers_allocated == 0) return; REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control, DMIF_BUFFERS_ALLOCATED, 0); REG_WAIT(DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, 1, 10, 3500); if (dce_mi->wa.single_head_rdreq_dmif_limit) { uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, ENABLE, enable); } } static void program_sec_addr( struct dce_mem_input *dce_mi, PHYSICAL_ADDRESS_LOC address) { /*high register MUST be programmed first*/ REG_SET(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, address.high_part); REG_SET_2(GRPH_SECONDARY_SURFACE_ADDRESS, 0, GRPH_SECONDARY_SURFACE_ADDRESS, address.low_part >> 8, GRPH_SECONDARY_DFQ_ENABLE, 0); } static void program_pri_addr( struct dce_mem_input *dce_mi, PHYSICAL_ADDRESS_LOC address) { /*high register MUST be programmed first*/ REG_SET(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, address.high_part); REG_SET(GRPH_PRIMARY_SURFACE_ADDRESS, 0, GRPH_PRIMARY_SURFACE_ADDRESS, address.low_part >> 8); } static bool dce_mi_is_flip_pending(struct mem_input *mem_input) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mem_input); uint32_t update_pending; REG_GET(GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, &update_pending); if (update_pending) return true; mem_input->current_address = mem_input->request_address; return false; } static bool dce_mi_program_surface_flip_and_addr( struct mem_input *mem_input, const struct dc_plane_address *address, bool flip_immediate) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mem_input); REG_UPDATE(GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); REG_UPDATE( GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN, flip_immediate ? 1 : 0); switch (address->type) { case PLN_ADDR_TYPE_GRAPHICS: if (address->grph.addr.quad_part == 0) break; program_pri_addr(dce_mi, address->grph.addr); break; case PLN_ADDR_TYPE_GRPH_STEREO: if (address->grph_stereo.left_addr.quad_part == 0 || address->grph_stereo.right_addr.quad_part == 0) break; program_pri_addr(dce_mi, address->grph_stereo.left_addr); program_sec_addr(dce_mi, address->grph_stereo.right_addr); break; default: /* not supported */ BREAK_TO_DEBUGGER(); break; } mem_input->request_address = *address; if (flip_immediate) mem_input->current_address = *address; REG_UPDATE(GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); return true; } static const struct mem_input_funcs dce_mi_funcs = { .mem_input_program_display_marks = dce_mi_program_display_marks, .allocate_mem_input = dce_mi_allocate_dmif, .free_mem_input = dce_mi_free_dmif, .mem_input_program_surface_flip_and_addr = dce_mi_program_surface_flip_and_addr, .mem_input_program_pte_vm = dce_mi_program_pte_vm, .mem_input_program_surface_config = dce_mi_program_surface_config, .mem_input_is_flip_pending = dce_mi_is_flip_pending }; #if defined(CONFIG_DRM_AMD_DC_SI) static const struct mem_input_funcs dce60_mi_funcs = { .mem_input_program_display_marks = dce60_mi_program_display_marks, .allocate_mem_input = dce_mi_allocate_dmif, .free_mem_input = dce_mi_free_dmif, .mem_input_program_surface_flip_and_addr = dce_mi_program_surface_flip_and_addr, .mem_input_program_pte_vm = dce_mi_program_pte_vm, .mem_input_program_surface_config = dce60_mi_program_surface_config, .mem_input_is_flip_pending = dce_mi_is_flip_pending }; #endif static const struct mem_input_funcs dce112_mi_funcs = { .mem_input_program_display_marks = dce112_mi_program_display_marks, .allocate_mem_input = dce_mi_allocate_dmif, .free_mem_input = dce_mi_free_dmif, .mem_input_program_surface_flip_and_addr = dce_mi_program_surface_flip_and_addr, .mem_input_program_pte_vm = dce_mi_program_pte_vm, .mem_input_program_surface_config = dce_mi_program_surface_config, .mem_input_is_flip_pending = dce_mi_is_flip_pending }; static const struct mem_input_funcs dce120_mi_funcs = { .mem_input_program_display_marks = dce120_mi_program_display_marks, .allocate_mem_input = dce_mi_allocate_dmif, .free_mem_input = dce_mi_free_dmif, .mem_input_program_surface_flip_and_addr = dce_mi_program_surface_flip_and_addr, .mem_input_program_pte_vm = dce_mi_program_pte_vm, .mem_input_program_surface_config = dce_mi_program_surface_config, .mem_input_is_flip_pending = dce_mi_is_flip_pending }; void dce_mem_input_construct( struct dce_mem_input *dce_mi, struct dc_context *ctx, int inst, const struct dce_mem_input_registers *regs, const struct dce_mem_input_shift *mi_shift, const struct dce_mem_input_mask *mi_mask) { dce_mi->base.ctx = ctx; dce_mi->base.inst = inst; dce_mi->base.funcs = &dce_mi_funcs; dce_mi->regs = regs; dce_mi->shifts = mi_shift; dce_mi->masks = mi_mask; } #if defined(CONFIG_DRM_AMD_DC_SI) void dce60_mem_input_construct( struct dce_mem_input *dce_mi, struct dc_context *ctx, int inst, const struct dce_mem_input_registers *regs, const struct dce_mem_input_shift *mi_shift, const struct dce_mem_input_mask *mi_mask) { dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); dce_mi->base.funcs = &dce60_mi_funcs; } #endif void dce112_mem_input_construct( struct dce_mem_input *dce_mi, struct dc_context *ctx, int inst, const struct dce_mem_input_registers *regs, const struct dce_mem_input_shift *mi_shift, const struct dce_mem_input_mask *mi_mask) { dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); dce_mi->base.funcs = &dce112_mi_funcs; } void dce120_mem_input_construct( struct dce_mem_input *dce_mi, struct dc_context *ctx, int inst, const struct dce_mem_input_registers *regs, const struct dce_mem_input_shift *mi_shift, const struct dce_mem_input_mask *mi_mask) { dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); dce_mi->base.funcs = &dce120_mi_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "dcn30_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" #include "dc.h" #include "core_types.h" #include <linux/delay.h> #define DC_LOGGER \ enc1->base.ctx->logger #define REG(reg)\ (enc1->regs->reg) #undef FN #define FN(reg_name, field_name) \ enc1->se_shift->field_name, enc1->se_mask->field_name #define VBI_LINE_0 0 #define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000 #define CTX \ enc1->base.ctx static void enc3_update_hdmi_info_packet( struct dcn10_stream_encoder *enc1, uint32_t packet_index, const struct dc_info_packet *info_packet) { uint32_t cont, send, line; if (info_packet->valid) { enc1->base.vpg->funcs->update_generic_info_packet( enc1->base.vpg, packet_index, info_packet, true); /* enable transmission of packet(s) - * packet transmission begins on the next frame */ cont = 1; /* send packet(s) every frame */ send = 1; /* select line number to send packets on */ line = 2; } else { cont = 0; send = 0; line = 0; } /* DP_SEC_GSP[x]_LINE_REFERENCE - keep default value REFER_TO_DP_SOF */ /* choose which generic packet control to use */ switch (packet_index) { case 0: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_LINE, line); break; case 1: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_LINE, line); break; case 2: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_CONT, cont, HDMI_GENERIC2_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC2_LINE, line); break; case 3: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_CONT, cont, HDMI_GENERIC3_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC3_LINE, line); break; case 4: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_CONT, cont, HDMI_GENERIC4_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC4_LINE, line); break; case 5: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_CONT, cont, HDMI_GENERIC5_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC5_LINE, line); break; case 6: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_CONT, cont, HDMI_GENERIC6_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC6_LINE, line); break; case 7: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_CONT, cont, HDMI_GENERIC7_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC7_LINE, line); break; case 8: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_CONT, cont, HDMI_GENERIC8_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC8_LINE, line); break; case 9: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_CONT, cont, HDMI_GENERIC9_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC9_LINE, line); break; case 10: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_CONT, cont, HDMI_GENERIC10_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC10_LINE, line); break; case 11: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_CONT, cont, HDMI_GENERIC11_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC11_LINE, line); break; case 12: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_CONT, cont, HDMI_GENERIC12_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC12_LINE, line); break; case 13: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_CONT, cont, HDMI_GENERIC13_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC13_LINE, line); break; case 14: REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_CONT, cont, HDMI_GENERIC14_SEND, send); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL10, HDMI_GENERIC14_LINE, line); break; default: /* invalid HW packet index */ DC_LOG_WARNING( "Invalid HW packet index: %s()\n", __func__); return; } } void enc3_stream_encoder_update_hdmi_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); /* for bring up, disable dp double TODO */ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1); REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); /*Always add mandatory packets first followed by optional ones*/ enc3_update_hdmi_info_packet(enc1, 0, &info_frame->avi); enc3_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif); enc3_update_hdmi_info_packet(enc1, 2, &info_frame->gamut); enc3_update_hdmi_info_packet(enc1, 1, &info_frame->vendor); enc3_update_hdmi_info_packet(enc1, 3, &info_frame->spd); enc3_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd); enc3_update_hdmi_info_packet(enc1, 6, &info_frame->vtem); } void enc3_stream_encoder_stop_hdmi_info_packets( struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); /* stop generic packets 0,1 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC0_CONT, 0, HDMI_GENERIC0_SEND, 0, HDMI_GENERIC1_CONT, 0, HDMI_GENERIC1_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL1, 0, HDMI_GENERIC0_LINE, 0, HDMI_GENERIC1_LINE, 0); /* stop generic packets 2,3 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC2_CONT, 0, HDMI_GENERIC2_SEND, 0, HDMI_GENERIC3_CONT, 0, HDMI_GENERIC3_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL2, 0, HDMI_GENERIC2_LINE, 0, HDMI_GENERIC3_LINE, 0); /* stop generic packets 4,5 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC4_CONT, 0, HDMI_GENERIC4_SEND, 0, HDMI_GENERIC5_CONT, 0, HDMI_GENERIC5_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL3, 0, HDMI_GENERIC4_LINE, 0, HDMI_GENERIC5_LINE, 0); /* stop generic packets 6,7 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0, HDMI_GENERIC6_CONT, 0, HDMI_GENERIC6_SEND, 0, HDMI_GENERIC7_CONT, 0, HDMI_GENERIC7_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL4, 0, HDMI_GENERIC6_LINE, 0, HDMI_GENERIC7_LINE, 0); /* stop generic packets 8,9 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0, HDMI_GENERIC8_CONT, 0, HDMI_GENERIC8_SEND, 0, HDMI_GENERIC9_CONT, 0, HDMI_GENERIC9_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL7, 0, HDMI_GENERIC8_LINE, 0, HDMI_GENERIC9_LINE, 0); /* stop generic packets 10,11 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0, HDMI_GENERIC10_CONT, 0, HDMI_GENERIC10_SEND, 0, HDMI_GENERIC11_CONT, 0, HDMI_GENERIC11_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL8, 0, HDMI_GENERIC10_LINE, 0, HDMI_GENERIC11_LINE, 0); /* stop generic packets 12,13 on HDMI */ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0, HDMI_GENERIC12_CONT, 0, HDMI_GENERIC12_SEND, 0, HDMI_GENERIC13_CONT, 0, HDMI_GENERIC13_SEND, 0); REG_SET_2(HDMI_GENERIC_PACKET_CONTROL9, 0, HDMI_GENERIC12_LINE, 0, HDMI_GENERIC13_LINE, 0); /* stop generic packet 14 on HDMI */ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL6, 0, HDMI_GENERIC14_CONT, 0, HDMI_GENERIC14_SEND, 0); REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL10, HDMI_GENERIC14_LINE, 0); } /* Set DSC-related configuration. * dsc_mode: 0 disables DSC, other values enable DSC in specified format * sc_bytes_per_pixel: Bytes per pixel in u3.28 format * dsc_slice_width: Slice width in pixels */ static void enc3_dp_set_dsc_config(struct stream_encoder *enc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE_2(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode, DP_DSC_SLICE_WIDTH, dsc_slice_width); REG_SET(DP_DSC_BYTES_PER_PIXEL, 0, DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel); } void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps, bool immediate_update) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (enable) { struct dc_info_packet pps_sdp; int i; /* Configure for PPS packet size (128 bytes) */ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP11_PPS, 1); /* We need turn on clock before programming AFMT block * * TODO: We may not need this here anymore since update_generic_info_packet * no longer touches AFMT */ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); /* Load PPS into infoframe (SDP) registers */ pps_sdp.valid = true; pps_sdp.hb0 = 0; pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS; pps_sdp.hb2 = 127; pps_sdp.hb3 = 0; for (i = 0; i < 4; i++) { memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32); enc1->base.vpg->funcs->update_generic_info_packet( enc1->base.vpg, 11 + i, &pps_sdp, immediate_update); } /* SW should make sure VBID[6] update line number is bigger * than PPS transmit line number */ REG_UPDATE(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, 2); REG_UPDATE_2(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, 0, DP_VBID6_LINE_NUM, 3); /* Send PPS data at the line number specified above. * DP spec requires PPS to be sent only when it changes, however since * decoder has to be able to handle its change on every frame, we're * sending it always (i.e. on every frame) to reduce the chance it'd be * missed by decoder. If it turns out required to send PPS only when it * changes, we can use DP_SEC_GSP11_SEND register. */ REG_UPDATE(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, 1); REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } else { /* Disable Generic Stream Packet 11 (GSP) transmission */ REG_UPDATE(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, 0); REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP11_PPS, 0); } } /* this function read dsc related register fields to be logged later in dcn10_log_hw_state * into a dcn_dsc_state struct. */ static void enc3_read_state(struct stream_encoder *enc, struct enc_state *s) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); //if dsc is enabled, continue to read REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode); if (s->dsc_mode) { REG_GET(DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, &s->dsc_slice_width); REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, &s->sec_gsp_pps_line_num); REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference); REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num); REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, &s->sec_gsp_pps_enable); REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable); } } void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( struct stream_encoder *enc, struct encoder_info_frame *info_frame) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (info_frame->adaptive_sync.valid == true && info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1); REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, info_frame->sdp_line_num.adaptive_sync_line_num); } } void enc3_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); uint32_t value = 0; uint32_t dmdata_packet_enabled = 0; if (info_frame->vsc.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 0, /* packetIndex */ &info_frame->vsc, true); } /* TODO: VSC SDP at packetIndex 1 should be retricted only if PSR-SU on. * There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU. * In addition, currently the driver check the valid bit then update and * send the corresponding Infopacket. For PSR-SU, the SDP only be sent * while entering PSR-SU mode. So we need another parameter(e.g. send) * in dc_info_packet to indicate which infopacket should be enabled by * default here. */ if (info_frame->vsc.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 1, /* packetIndex */ &info_frame->vsc, true); } /* TODO: VSC SDP at packetIndex 1 should be restricted only if PSR-SU on. * There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU. * In addition, currently the driver check the valid bit then update and * send the corresponding Infopacket. For PSR-SU, the SDP only be sent * while entering PSR-SU mode. So we need another parameter(e.g. send) * in dc_info_packet to indicate which infopacket should be enabled by * default here. */ if (info_frame->vsc.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 1, /* packetIndex */ &info_frame->vsc, true); } if (info_frame->spd.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 2, /* packetIndex */ &info_frame->spd, true); } if (info_frame->hdrsmd.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 3, /* packetIndex */ &info_frame->hdrsmd, true); } /* packetIndex 4 is used for send immediate sdp message, and please * use other packetIndex (such as 5,6) for other info packet */ if (info_frame->adaptive_sync.valid) enc->vpg->funcs->update_generic_info_packet( enc->vpg, 5, /* packetIndex */ &info_frame->adaptive_sync, true); /* enable/disable transmission of packet(s). * If enabled, packet transmission begins on the next frame */ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid); /* This bit is the master enable bit. * When enabling secondary stream engine, * this master bit must also be set. * This register shared with audio info frame. * Therefore we need to enable master bit * if at least on of the fields is not 0 */ value = REG_READ(DP_SEC_CNTL); if (value) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); /* check if dynamic metadata packet transmission is enabled */ REG_GET(DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, &dmdata_packet_enabled); if (dmdata_packet_enabled) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } static void enc3_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, odm_combine); } /* setup stream encoder in dvi mode */ static void enc3_stream_encoder_dvi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, bool is_dual_link) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { struct bp_encoder_control cntl = {0}; cntl.action = ENCODER_CONTROL_SETUP; cntl.engine_id = enc1->base.id; cntl.signal = is_dual_link ? SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; cntl.enable_dp_audio = false; cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; if (enc1->base.bp->funcs->encoder_control( enc1->base.bp, &cntl) != BP_RESULT_OK) return; } else { //Set pattern for clock channel, default vlue 0x63 does not work REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F); //DIG_BE_TMDS_DVI_MODE : TMDS-DVI mode is already set in link_encoder_setup //DIG_SOURCE_SELECT is already set in dig_connect_to_otg /* set DIG_START to 0x1 to reset FIFO */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); udelay(1); /* write 0 to take the FIFO out of reset */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); udelay(1); } ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888); enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); } /* setup stream encoder in hdmi mode */ static void enc3_stream_encoder_hdmi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, int actual_pix_clk_khz, bool enable_audio) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { struct bp_encoder_control cntl = {0}; cntl.action = ENCODER_CONTROL_SETUP; cntl.engine_id = enc1->base.id; cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; cntl.enable_dp_audio = enable_audio; cntl.pixel_clock = actual_pix_clk_khz; cntl.lanes_number = LANE_COUNT_FOUR; if (enc1->base.bp->funcs->encoder_control( enc1->base.bp, &cntl) != BP_RESULT_OK) return; } else { //Set pattern for clock channel, default vlue 0x63 does not work REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F); //DIG_BE_TMDS_HDMI_MODE : TMDS-HDMI mode is already set in link_encoder_setup //DIG_SOURCE_SELECT is already set in dig_connect_to_otg /* set DIG_START to 0x1 to reset FIFO */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); udelay(1); /* write 0 to take the FIFO out of reset */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); udelay(1); } /* Configure pixel encoding */ enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); /* setup HDMI engine */ REG_UPDATE_6(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, 1, HDMI_KEEPOUT_MODE, 1, HDMI_DEEP_COLOR_ENABLE, 0, HDMI_DATA_SCRAMBLE_EN, 0, HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1, HDMI_CLOCK_CHANNEL_RATE, 0); /* Configure color depth */ switch (crtc_timing->display_color_depth) { case COLOR_DEPTH_888: REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); break; case COLOR_DEPTH_101010: if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1, HDMI_DEEP_COLOR_ENABLE, 0); } else { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1, HDMI_DEEP_COLOR_ENABLE, 1); } break; case COLOR_DEPTH_121212: if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2, HDMI_DEEP_COLOR_ENABLE, 0); } else { REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2, HDMI_DEEP_COLOR_ENABLE, 1); } break; case COLOR_DEPTH_161616: REG_UPDATE_2(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 3, HDMI_DEEP_COLOR_ENABLE, 1); break; default: break; } if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) { /* enable HDMI data scrambler * HDMI_CLOCK_CHANNEL_RATE_MORE_340M * Clock channel frequency is 1/4 of character rate. */ REG_UPDATE_2(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, 1, HDMI_CLOCK_CHANNEL_RATE, 1); } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) { /* TODO: New feature for DCE11, still need to implement */ /* enable HDMI data scrambler * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE * Clock channel frequency is the same * as character rate */ REG_UPDATE_2(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, 1, HDMI_CLOCK_CHANNEL_RATE, 0); } /* Enable transmission of General Control packet on every frame */ REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1, HDMI_GC_SEND, 1, HDMI_NULL_SEND, 1); /* Disable Audio Content Protection packet transmission */ REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0); /* following belongs to audio */ /* Enable Audio InfoFrame packet transmission. */ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); /* update double-buffered AUDIO_INFO registers immediately */ ASSERT (enc->afmt); enc->afmt->funcs->audio_info_immediate_update(enc->afmt); /* Select line number on which to send Audio InfoFrame packets */ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, VBI_LINE_0 + 2); /* set HDMI GC AVMUTE */ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0); } void enc3_audio_mute_control( struct stream_encoder *enc, bool mute) { ASSERT (enc->afmt); enc->afmt->funcs->audio_mute_control(enc->afmt, mute); } void enc3_se_dp_audio_setup( struct stream_encoder *enc, unsigned int az_inst, struct audio_info *info) { ASSERT (enc->afmt); enc->afmt->funcs->se_audio_setup(enc->afmt, az_inst, info); } #define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000 #define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1 static void enc3_se_setup_dp_audio( struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); /* --- DP Audio packet configurations --- */ /* ATP Configuration */ REG_SET(DP_SEC_AUD_N, 0, DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT); /* Async/auto-calc timestamp mode */ REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE, DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC); ASSERT (enc->afmt); enc->afmt->funcs->setup_dp_audio(enc->afmt); } void enc3_se_dp_audio_enable( struct stream_encoder *enc) { enc1_se_enable_audio_clock(enc, true); enc3_se_setup_dp_audio(enc); enc1_se_enable_dp_audio(enc); } static void enc3_se_setup_hdmi_audio( struct stream_encoder *enc, const struct audio_crtc_info *crtc_info) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); struct audio_clock_info audio_clock_info = {0}; /* Setup audio in AFMT - program AFMT block associated with DIO */ ASSERT (enc->afmt); enc->afmt->funcs->setup_hdmi_audio(enc->afmt); /* HDMI_AUDIO_PACKET_CONTROL */ REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); /* HDMI_ACR_PACKET_CONTROL */ REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1, HDMI_ACR_SOURCE, 0, HDMI_ACR_AUDIO_PRIORITY, 0); /* Program audio clock sample/regeneration parameters */ get_audio_clock_info(crtc_info->color_depth, crtc_info->requested_pixel_clock_100Hz, crtc_info->calculated_pixel_clock_100Hz, &audio_clock_info); DC_LOG_HW_AUDIO( "\n%s:Input::requested_pixel_clock_100Hz = %d" \ "calculated_pixel_clock_100Hz = %d \n", __func__, \ crtc_info->requested_pixel_clock_100Hz, \ crtc_info->calculated_pixel_clock_100Hz); /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz); /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */ REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz); /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */ REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz); /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */ REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz); /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */ REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz); /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */ REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz); /* Video driver cannot know in advance which sample rate will * be used by HD Audio driver * HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is * programmed below in interruppt callback */ } void enc3_se_hdmi_audio_setup( struct stream_encoder *enc, unsigned int az_inst, struct audio_info *info, struct audio_crtc_info *audio_crtc_info) { enc1_se_enable_audio_clock(enc, true); enc3_se_setup_hdmi_audio(enc, audio_crtc_info); ASSERT (enc->afmt); enc->afmt->funcs->se_audio_setup(enc->afmt, az_inst, info); } static const struct stream_encoder_funcs dcn30_str_enc_funcs = { .dp_set_odm_combine = enc3_dp_set_odm_combine, .dp_set_stream_attribute = enc2_stream_encoder_dp_set_stream_attribute, .hdmi_set_stream_attribute = enc3_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = enc3_stream_encoder_dvi_set_stream_attribute, .set_throttled_vcp_size = enc1_stream_encoder_set_throttled_vcp_size, .update_hdmi_info_packets = enc3_stream_encoder_update_hdmi_info_packets, .stop_hdmi_info_packets = enc3_stream_encoder_stop_hdmi_info_packets, .update_dp_info_packets_sdp_line_num = enc3_stream_encoder_update_dp_info_packets_sdp_line_num, .update_dp_info_packets = enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = enc2_stream_encoder_dp_unblank, .audio_mute_control = enc3_audio_mute_control, .dp_audio_setup = enc3_se_dp_audio_setup, .dp_audio_enable = enc3_se_dp_audio_enable, .dp_audio_disable = enc1_se_dp_audio_disable, .hdmi_audio_setup = enc3_se_hdmi_audio_setup, .hdmi_audio_disable = enc1_se_hdmi_audio_disable, .setup_stereo_sync = enc1_setup_stereo_sync, .set_avmute = enc1_stream_encoder_set_avmute, .dig_connect_to_otg = enc1_dig_connect_to_otg, .dig_source_otg = enc1_dig_source_otg, .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, .enc_read_state = enc3_read_state, .dp_set_dsc_config = enc3_dp_set_dsc_config, .dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet, .set_dynamic_metadata = enc2_set_dynamic_metadata, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level, }; void dcn30_dio_stream_encoder_construct( struct dcn10_stream_encoder *enc1, struct dc_context *ctx, struct dc_bios *bp, enum engine_id eng_id, struct vpg *vpg, struct afmt *afmt, const struct dcn10_stream_enc_registers *regs, const struct dcn10_stream_encoder_shift *se_shift, const struct dcn10_stream_encoder_mask *se_mask) { enc1->base.funcs = &dcn30_str_enc_funcs; enc1->base.ctx = ctx; enc1->base.id = eng_id; enc1->base.bp = bp; enc1->base.vpg = vpg; enc1->base.afmt = afmt; enc1->regs = regs; enc1->se_shift = se_shift; enc1->se_mask = se_mask; enc1->base.stream_enc_inst = vpg->inst; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
/* * Copyright 2016-2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20/dcn20_hwseq.h" #include "dcn21/dcn21_hwseq.h" #include "dcn30_hwseq.h" #include "dcn30_init.h" static const struct hw_sequencer_funcs dcn30_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn30_init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, .program_output_csc = dcn20_program_output_csc, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, .update_info_frame = dcn30_update_info_frame, .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, .enable_stream = dcn20_enable_stream, .disable_stream = dce110_disable_stream, .unblank_stream = dcn20_unblank_stream, .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn30_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn30_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .edp_wait_for_T12 = dce110_edp_wait_for_T12, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, .program_triplebuffer = dcn20_program_triple_buffer, .enable_writeback = dcn30_enable_writeback, .disable_writeback = dcn30_disable_writeback, .update_writeback = dcn30_update_writeback, .mmhubbub_warmup = dcn30_mmhubbub_warmup, .dmdata_status_done = dcn20_dmdata_status_done, .program_dmdata_engine = dcn30_program_dmdata_engine, .set_dmdata_attributes = dcn20_set_dmdata_attributes, .init_sys_ctx = dcn20_init_sys_ctx, .init_vm_ctx = dcn20_init_vm_ctx, .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations, .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .hardware_release = dcn30_hardware_release, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .is_abm_supported = dcn21_is_abm_supported }; static const struct hwseq_private_funcs dcn30_private_funcs = { .init_pipes = dcn10_init_pipes, .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn30_set_input_transfer_func, .set_output_transfer_func = dcn30_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn20_init_blank, .disable_vga = dcn20_disable_vga, .bios_golden_init = dcn10_bios_golden_init, .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn10_plane_atomic_power_down, .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .set_hdr_multiplier = dcn10_set_hdr_multiplier, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn30_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, }; void dcn30_hw_sequencer_construct(struct dc *dc) { dc->hwss = dcn30_funcs; dc->hwseq->funcs = dcn30_private_funcs; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dc_bios_types.h" #include "hw_shared.h" #include "dcn30_afmt.h" #include "reg_helper.h" #define DC_LOGGER \ afmt3->base.ctx->logger #define REG(reg)\ (afmt3->regs->reg) #undef FN #define FN(reg_name, field_name) \ afmt3->afmt_shift->field_name, afmt3->afmt_mask->field_name #define CTX \ afmt3->base.ctx void afmt3_setup_hdmi_audio( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); if (afmt->funcs->afmt_poweron) afmt->funcs->afmt_poweron(afmt); /* AFMT_AUDIO_PACKET_CONTROL */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); /* AFMT_AUDIO_PACKET_CONTROL2 */ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, 0, AFMT_60958_OSF_OVRD, 0); /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK & * AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */ REG_UPDATE_2(AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1, AFMT_60958_CS_CLOCK_ACCURACY, 0); /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */ REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); /* AFMT_60958_2 now keep this settings until * Programming guide comes out */ REG_UPDATE_6(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3, AFMT_60958_CS_CHANNEL_NUMBER_3, 4, AFMT_60958_CS_CHANNEL_NUMBER_4, 5, AFMT_60958_CS_CHANNEL_NUMBER_5, 6, AFMT_60958_CS_CHANNEL_NUMBER_6, 7, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); } static union audio_cea_channels speakers_to_channels( struct audio_speaker_flags speaker_flags) { union audio_cea_channels cea_channels = {0}; /* these are one to one */ cea_channels.channels.FL = speaker_flags.FL_FR; cea_channels.channels.FR = speaker_flags.FL_FR; cea_channels.channels.LFE = speaker_flags.LFE; cea_channels.channels.FC = speaker_flags.FC; /* if Rear Left and Right exist move RC speaker to channel 7 * otherwise to channel 5 */ if (speaker_flags.RL_RR) { cea_channels.channels.RL_RC = speaker_flags.RL_RR; cea_channels.channels.RR = speaker_flags.RL_RR; cea_channels.channels.RC_RLC_FLC = speaker_flags.RC; } else { cea_channels.channels.RL_RC = speaker_flags.RC; } /* FRONT Left Right Center and REAR Left Right Center are exclusive */ if (speaker_flags.FLC_FRC) { cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC; cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC; } else { cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC; cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC; } return cea_channels; } void afmt3_se_audio_setup( struct afmt *afmt, unsigned int az_inst, struct audio_info *audio_info) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); uint32_t channels = 0; ASSERT(audio_info); /* This should not happen.it does so we don't get BSOD*/ if (audio_info == NULL) return; channels = speakers_to_channels(audio_info->flags.speaker_flags).all; /* setup the audio stream source select (audio -> dig mapping) */ REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst); /* Channel allocation */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels); /* Disable forced mem power off */ if (afmt->funcs->afmt_poweron == NULL) REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0); } void afmt3_audio_mute_control( struct afmt *afmt, bool mute) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); if (mute && afmt->funcs->afmt_powerdown) afmt->funcs->afmt_powerdown(afmt); if (!mute && afmt->funcs->afmt_poweron) afmt->funcs->afmt_poweron(afmt); /* enable/disable transmission of audio packets */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute); } void afmt3_audio_info_immediate_update( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); /* update double-buffered AUDIO_INFO registers immediately */ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); } void afmt3_setup_dp_audio( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); if (afmt->funcs->afmt_poweron) afmt->funcs->afmt_poweron(afmt); /* AFMT_AUDIO_PACKET_CONTROL */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); /* AFMT_AUDIO_PACKET_CONTROL2 */ /* Program the ATP and AIP next */ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, 0, AFMT_60958_OSF_OVRD, 0); /* AFMT_INFOFRAME_CONTROL0 */ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */ REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0); } static struct afmt_funcs dcn30_afmt_funcs = { .setup_hdmi_audio = afmt3_setup_hdmi_audio, .se_audio_setup = afmt3_se_audio_setup, .audio_mute_control = afmt3_audio_mute_control, .audio_info_immediate_update = afmt3_audio_info_immediate_update, .setup_dp_audio = afmt3_setup_dp_audio, }; void afmt3_construct(struct dcn30_afmt *afmt3, struct dc_context *ctx, uint32_t inst, const struct dcn30_afmt_registers *afmt_regs, const struct dcn30_afmt_shift *afmt_shift, const struct dcn30_afmt_mask *afmt_mask) { afmt3->base.ctx = ctx; afmt3->base.inst = inst; afmt3->base.funcs = &dcn30_afmt_funcs; afmt3->regs = afmt_regs; afmt3->afmt_shift = afmt_shift; afmt3->afmt_mask = afmt_mask; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" #include "dcn30_dpp.h" #include "basics/conversion.h" #include "dcn30_cm_common.h" #define REG(reg)\ dpp->tf_regs->reg #define CTX \ dpp->base.ctx #undef FN #define FN(reg_name, field_name) \ dpp->tf_shift->field_name, dpp->tf_mask->field_name void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(DPP_CONTROL, DPP_CLOCK_ENABLE, &s->is_enabled); // TODO: Implement for DCN3 } /*program post scaler scs block in dpp CM*/ void dpp3_program_post_csc( struct dpp *dpp_base, enum dc_color_space color_space, enum dcn10_input_csc_select input_select, const struct out_csc_color_matrix *tbl_entry) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); int i; int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); const uint16_t *regval = NULL; uint32_t cur_select = 0; enum dcn10_input_csc_select select; struct color_matrices_reg gam_regs; if (input_select == INPUT_CSC_SELECT_BYPASS) { REG_SET(CM_POST_CSC_CONTROL, 0, CM_POST_CSC_MODE, 0); return; } if (tbl_entry == NULL) { for (i = 0; i < arr_size; i++) if (dpp_input_csc_matrix[i].color_space == color_space) { regval = dpp_input_csc_matrix[i].regval; break; } if (regval == NULL) { BREAK_TO_DEBUGGER(); return; } } else { regval = tbl_entry->regval; } /* determine which CSC matrix (icsc or coma) we are using * currently. select the alternate set to double buffer * the CSC update so CSC is updated on frame boundary */ REG_GET(CM_POST_CSC_CONTROL, CM_POST_CSC_MODE_CURRENT, &cur_select); if (cur_select != INPUT_CSC_SELECT_ICSC) select = INPUT_CSC_SELECT_ICSC; else select = INPUT_CSC_SELECT_COMA; gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_POST_CSC_C11; gam_regs.masks.csc_c11 = dpp->tf_mask->CM_POST_CSC_C11; gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_POST_CSC_C12; gam_regs.masks.csc_c12 = dpp->tf_mask->CM_POST_CSC_C12; if (select == INPUT_CSC_SELECT_ICSC) { gam_regs.csc_c11_c12 = REG(CM_POST_CSC_C11_C12); gam_regs.csc_c33_c34 = REG(CM_POST_CSC_C33_C34); } else { gam_regs.csc_c11_c12 = REG(CM_POST_CSC_B_C11_C12); gam_regs.csc_c33_c34 = REG(CM_POST_CSC_B_C33_C34); } cm_helper_program_color_matrices( dpp->base.ctx, regval, &gam_regs); REG_SET(CM_POST_CSC_CONTROL, 0, CM_POST_CSC_MODE, select); } /*CNVC degam unit has read only LUTs*/ void dpp3_set_pre_degam(struct dpp *dpp_base, enum dc_transfer_func_predefined tr) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); int pre_degam_en = 1; int degamma_lut_selection = 0; switch (tr) { case TRANSFER_FUNCTION_LINEAR: case TRANSFER_FUNCTION_UNITY: pre_degam_en = 0; //bypass break; case TRANSFER_FUNCTION_SRGB: degamma_lut_selection = 0; break; case TRANSFER_FUNCTION_BT709: degamma_lut_selection = 4; break; case TRANSFER_FUNCTION_PQ: degamma_lut_selection = 5; break; case TRANSFER_FUNCTION_HLG: degamma_lut_selection = 6; break; case TRANSFER_FUNCTION_GAMMA22: degamma_lut_selection = 1; break; case TRANSFER_FUNCTION_GAMMA24: degamma_lut_selection = 2; break; case TRANSFER_FUNCTION_GAMMA26: degamma_lut_selection = 3; break; default: pre_degam_en = 0; break; } REG_SET_2(PRE_DEGAM, 0, PRE_DEGAM_MODE, pre_degam_en, PRE_DEGAM_SELECT, degamma_lut_selection); } void dpp3_cnv_setup ( struct dpp *dpp_base, enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); uint32_t pixel_format = 0; uint32_t alpha_en = 1; enum dc_color_space color_space = COLOR_SPACE_SRGB; enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; bool force_disable_cursor = false; uint32_t is_2bit = 0; uint32_t alpha_plane_enable = 0; uint32_t dealpha_en = 0, dealpha_ablnd_en = 0; uint32_t realpha_en = 0, realpha_ablnd_en = 0; uint32_t program_prealpha_dealpha = 0; struct out_csc_color_matrix tbl_entry; int i; REG_SET_2(FORMAT_CONTROL, 0, CNVC_BYPASS, 0, FORMAT_EXPANSION_MODE, mode); REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_R, 0); REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_G, 1); REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_B, 2); switch (format) { case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: pixel_format = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB565: pixel_format = 3; alpha_en = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: pixel_format = 8; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: pixel_format = 10; is_2bit = 1; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: force_disable_cursor = false; pixel_format = 65; color_space = COLOR_SPACE_YCBCR709; select = INPUT_CSC_SELECT_ICSC; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: force_disable_cursor = true; pixel_format = 64; color_space = COLOR_SPACE_YCBCR709; select = INPUT_CSC_SELECT_ICSC; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: force_disable_cursor = true; pixel_format = 67; color_space = COLOR_SPACE_YCBCR709; select = INPUT_CSC_SELECT_ICSC; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: force_disable_cursor = true; pixel_format = 66; color_space = COLOR_SPACE_YCBCR709; select = INPUT_CSC_SELECT_ICSC; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: pixel_format = 26; /* ARGB16161616_UNORM */ break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: pixel_format = 24; break; case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: pixel_format = 25; break; case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: pixel_format = 12; color_space = COLOR_SPACE_YCBCR709; select = INPUT_CSC_SELECT_ICSC; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: pixel_format = 112; break; case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: pixel_format = 113; break; case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: pixel_format = 114; color_space = COLOR_SPACE_YCBCR709; select = INPUT_CSC_SELECT_ICSC; is_2bit = 1; break; case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: pixel_format = 115; color_space = COLOR_SPACE_YCBCR709; select = INPUT_CSC_SELECT_ICSC; is_2bit = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_RGBE: pixel_format = 116; alpha_plane_enable = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: pixel_format = 116; alpha_plane_enable = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: pixel_format = 118; break; case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: pixel_format = 119; break; default: break; } /* Set default color space based on format if none is given. */ color_space = input_color_space ? input_color_space : color_space; if (is_2bit == 1 && alpha_2bit_lut != NULL) { REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); } REG_SET_2(CNVC_SURFACE_PIXEL_FORMAT, 0, CNVC_SURFACE_PIXEL_FORMAT, pixel_format, CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable); REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); if (program_prealpha_dealpha) { dealpha_en = 1; realpha_en = 1; } REG_SET_2(PRE_DEALPHA, 0, PRE_DEALPHA_EN, dealpha_en, PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en); REG_SET_2(PRE_REALPHA, 0, PRE_REALPHA_EN, realpha_en, PRE_REALPHA_ABLND_EN, realpha_ablnd_en); /* If input adjustment exists, program the ICSC with those values. */ if (input_csc_color_matrix.enable_adjustment == true) { for (i = 0; i < 12; i++) tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; tbl_entry.color_space = input_color_space; if (color_space >= COLOR_SPACE_YCBCR601) select = INPUT_CSC_SELECT_ICSC; else select = INPUT_CSC_SELECT_BYPASS; dpp3_program_post_csc(dpp_base, color_space, select, &tbl_entry); } else { dpp3_program_post_csc(dpp_base, color_space, select, NULL); } if (force_disable_cursor) { REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, 0); REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0); } } #define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) void dpp3_set_cursor_attributes( struct dpp *dpp_base, struct dc_cursor_attributes *cursor_attributes) { enum dc_cursor_color_format color_format = cursor_attributes->color_format; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); int cur_rom_en = 0; if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { cur_rom_en = 1; } } REG_UPDATE_3(CURSOR0_CONTROL, CUR0_MODE, color_format, CUR0_EXPANSION_MODE, 0, CUR0_ROM_EN, cur_rom_en); if (color_format == CURSOR_MODE_MONO) { /* todo: clarify what to program these to */ REG_UPDATE(CURSOR0_COLOR0, CUR0_COLOR0, 0x00000000); REG_UPDATE(CURSOR0_COLOR1, CUR0_COLOR1, 0xFFFFFFFF); } dpp_base->att.cur0_ctl.bits.expansion_mode = 0; dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en; dpp_base->att.cur0_ctl.bits.mode = color_format; } bool dpp3_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) { int num_part_y, num_part_c; int max_taps_y, max_taps_c; int min_taps_y, min_taps_c; enum lb_memory_config lb_config; if (scl_data->viewport.width > scl_data->h_active && dpp->ctx->dc->debug.max_downscale_src_width != 0 && scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) return false; /* * Set default taps if none are provided * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ if (in_taps->h_taps == 0) { if (dc_fixpt_ceil(scl_data->ratios.horz) > 1) scl_data->taps.h_taps = min(2 * dc_fixpt_ceil(scl_data->ratios.horz), 8); else scl_data->taps.h_taps = 4; } else scl_data->taps.h_taps = in_taps->h_taps; if (in_taps->v_taps == 0) { if (dc_fixpt_ceil(scl_data->ratios.vert) > 1) scl_data->taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert, 2)), 8); else scl_data->taps.v_taps = 4; } else scl_data->taps.v_taps = in_taps->v_taps; if (in_taps->v_taps_c == 0) { if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 1) scl_data->taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert_c, 2)), 8); else scl_data->taps.v_taps_c = 4; } else scl_data->taps.v_taps_c = in_taps->v_taps_c; if (in_taps->h_taps_c == 0) { if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 1) scl_data->taps.h_taps_c = min(2 * dc_fixpt_ceil(scl_data->ratios.horz_c), 8); else scl_data->taps.h_taps_c = 4; } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) /* Only 1 and even h_taps_c are supported by hw */ scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; else scl_data->taps.h_taps_c = in_taps->h_taps_c; /*Ensure we can support the requested number of vtaps*/ min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert); min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c); /* Use LB_MEMORY_CONFIG_3 for 4:2:0 */ if ((scl_data->format == PIXEL_FORMAT_420BPP8) || (scl_data->format == PIXEL_FORMAT_420BPP10)) lb_config = LB_MEMORY_CONFIG_3; else lb_config = LB_MEMORY_CONFIG_0; dpp->caps->dscl_calc_lb_num_partitions( scl_data, lb_config, &num_part_y, &num_part_c); /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ if (dc_fixpt_ceil(scl_data->ratios.vert) > 2) max_taps_y = num_part_y - (dc_fixpt_ceil(scl_data->ratios.vert) - 2); else max_taps_y = num_part_y; if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 2) max_taps_c = num_part_c - (dc_fixpt_ceil(scl_data->ratios.vert_c) - 2); else max_taps_c = num_part_c; if (max_taps_y < min_taps_y) return false; else if (max_taps_c < min_taps_c) return false; if (scl_data->taps.v_taps > max_taps_y) scl_data->taps.v_taps = max_taps_y; if (scl_data->taps.v_taps_c > max_taps_c) scl_data->taps.v_taps_c = max_taps_c; if (!dpp->ctx->dc->debug.always_scale) { if (IDENTITY_RATIO(scl_data->ratios.horz)) scl_data->taps.h_taps = 1; if (IDENTITY_RATIO(scl_data->ratios.vert)) scl_data->taps.v_taps = 1; if (IDENTITY_RATIO(scl_data->ratios.horz_c)) scl_data->taps.h_taps_c = 1; if (IDENTITY_RATIO(scl_data->ratios.vert_c)) scl_data->taps.v_taps_c = 1; } return true; } static void dpp3_deferred_update(struct dpp *dpp_base) { int bypass_state; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->deferred_reg_writes.bits.disable_dscl) { REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); dpp_base->deferred_reg_writes.bits.disable_dscl = false; } if (dpp_base->deferred_reg_writes.bits.disable_gamcor) { REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state); if (bypass_state == 0) { // only program if bypass was latched REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3); } else ASSERT(0); // LUT select was updated again before vupdate dpp_base->deferred_reg_writes.bits.disable_gamcor = false; } if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) { REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state); if (bypass_state == 0) { // only program if bypass was latched REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 3); } else ASSERT(0); // LUT select was updated again before vupdate dpp_base->deferred_reg_writes.bits.disable_blnd_lut = false; } if (dpp_base->deferred_reg_writes.bits.disable_3dlut) { REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &bypass_state); if (bypass_state == 0) { // only program if bypass was latched REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 3); } else ASSERT(0); // LUT select was updated again before vupdate dpp_base->deferred_reg_writes.bits.disable_3dlut = false; } if (dpp_base->deferred_reg_writes.bits.disable_shaper) { REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &bypass_state); if (bypass_state == 0) { // only program if bypass was latched REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 3); } else ASSERT(0); // LUT select was updated again before vupdate dpp_base->deferred_reg_writes.bits.disable_shaper = false; } } static void dpp3_power_on_blnd_lut( struct dpp *dpp_base, bool power_on) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { if (power_on) { REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 0); REG_WAIT(CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, 0, 1, 5); } else { dpp_base->ctx->dc->optimized_required = true; dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; } } else { REG_SET(CM_MEM_PWR_CTRL, 0, BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); } } static void dpp3_power_on_hdr3dlut( struct dpp *dpp_base, bool power_on) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { if (power_on) { REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 0); REG_WAIT(CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, 0, 1, 5); } else { dpp_base->ctx->dc->optimized_required = true; dpp_base->deferred_reg_writes.bits.disable_3dlut = true; } } } static void dpp3_power_on_shaper( struct dpp *dpp_base, bool power_on) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { if (power_on) { REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 0); REG_WAIT(CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, 0, 1, 5); } else { dpp_base->ctx->dc->optimized_required = true; dpp_base->deferred_reg_writes.bits.disable_shaper = true; } } } static void dpp3_configure_blnd_lut( struct dpp *dpp_base, bool is_ram_a) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_UPDATE_2(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 7, CM_BLNDGAM_LUT_HOST_SEL, is_ram_a == true ? 0 : 1); REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); } static void dpp3_program_blnd_pwl( struct dpp *dpp_base, const struct pwl_result_data *rgb, uint32_t num) { uint32_t i; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; if (is_rgb_equal(rgb, num)) { for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); } else { REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_blue); } } static void dcn3_dpp_cm_get_reg_field( struct dcn3_dpp *dpp, struct dcn3_xfer_func_reg *reg) { reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; } /*program blnd lut RAM A*/ static void dpp3_program_blnd_luta_settings( struct dpp *dpp_base, const struct pwl_params *params) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); struct dcn3_xfer_func_reg gam_regs; dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B); gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G); gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R); gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); } /*program blnd lut RAM B*/ static void dpp3_program_blnd_lutb_settings( struct dpp *dpp_base, const struct pwl_params *params) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); struct dcn3_xfer_func_reg gam_regs; dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B); gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G); gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R); gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); } static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base) { enum dc_lut_mode mode; uint32_t mode_current = 0; uint32_t in_use = 0; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &mode_current); REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &in_use); switch (mode_current) { case 0: case 1: mode = LUT_BYPASS; break; case 2: if (in_use == 0) mode = LUT_RAM_A; else mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } return mode; } static bool dpp3_program_blnd_lut(struct dpp *dpp_base, const struct pwl_params *params) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (params == NULL) { REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) dpp3_power_on_blnd_lut(dpp_base, false); return false; } current_mode = dpp3_get_blndgam_current(dpp_base); if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_B) next_mode = LUT_RAM_A; else next_mode = LUT_RAM_B; dpp3_power_on_blnd_lut(dpp_base, true); dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dpp3_program_blnd_luta_settings(dpp_base, params); else dpp3_program_blnd_lutb_settings(dpp_base, params); dpp3_program_blnd_pwl( dpp_base, params->rgb_resulted, params->hw_points_num); REG_UPDATE_2(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE, 2, CM_BLNDGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1); return true; } static void dpp3_program_shaper_lut( struct dpp *dpp_base, const struct pwl_result_data *rgb, uint32_t num) { uint32_t i, red, green, blue; uint32_t red_delta, green_delta, blue_delta; uint32_t red_value, green_value, blue_value; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); for (i = 0 ; i < num; i++) { red = rgb[i].red_reg; green = rgb[i].green_reg; blue = rgb[i].blue_reg; red_delta = rgb[i].delta_red_reg; green_delta = rgb[i].delta_green_reg; blue_delta = rgb[i].delta_blue_reg; red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); } } static enum dc_lut_mode dpp3_get_shaper_current(struct dpp *dpp_base) { enum dc_lut_mode mode; uint32_t state_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &state_mode); switch (state_mode) { case 0: mode = LUT_BYPASS; break; case 1: mode = LUT_RAM_A; break; case 2: mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } return mode; } static void dpp3_configure_shaper_lut( struct dpp *dpp_base, bool is_ram_a) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, 7); REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); } /*program shaper RAM A*/ static void dpp3_program_shaper_luta_settings( struct dpp *dpp_base, const struct pwl_params *params) { const struct gamma_curve *curve; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); curve = params->arr_curve_points; REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); } /*program shaper RAM B*/ static void dpp3_program_shaper_lutb_settings( struct dpp *dpp_base, const struct pwl_params *params) { const struct gamma_curve *curve; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); curve = params->arr_curve_points; REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); curve += 2; REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); } static bool dpp3_program_shaper(struct dpp *dpp_base, const struct pwl_params *params) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (params == NULL) { REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) dpp3_power_on_shaper(dpp_base, false); return false; } if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) dpp3_power_on_shaper(dpp_base, true); current_mode = dpp3_get_shaper_current(dpp_base); if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) next_mode = LUT_RAM_B; else next_mode = LUT_RAM_A; dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dpp3_program_shaper_luta_settings(dpp_base, params); else dpp3_program_shaper_lutb_settings(dpp_base, params); dpp3_program_shaper_lut( dpp_base, params->rgb_resulted, params->hw_points_num); REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); return true; } static enum dc_lut_mode get3dlut_config( struct dpp *dpp_base, bool *is_17x17x17, bool *is_12bits_color_channel) { uint32_t i_mode, i_enable_10bits, lut_size; enum dc_lut_mode mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, &i_enable_10bits); REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &i_mode); switch (i_mode) { case 0: mode = LUT_BYPASS; break; case 1: mode = LUT_RAM_A; break; case 2: mode = LUT_RAM_B; break; default: mode = LUT_BYPASS; break; } if (i_enable_10bits > 0) *is_12bits_color_channel = false; else *is_12bits_color_channel = true; REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); if (lut_size == 0) *is_17x17x17 = true; else *is_17x17x17 = false; return mode; } /* * select ramA or ramB, or bypass * select color channel size 10 or 12 bits * select 3dlut size 17x17x17 or 9x9x9 */ static void dpp3_set_3dlut_mode( struct dpp *dpp_base, enum dc_lut_mode mode, bool is_color_channel_12bits, bool is_lut_size17x17x17) { uint32_t lut_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (mode == LUT_BYPASS) lut_mode = 0; else if (mode == LUT_RAM_A) lut_mode = 1; else lut_mode = 2; REG_UPDATE_2(CM_3DLUT_MODE, CM_3DLUT_MODE, lut_mode, CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); } static void dpp3_select_3dlut_ram( struct dpp *dpp_base, enum dc_lut_mode mode, bool is_color_channel_12bits) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, CM_3DLUT_30BIT_EN, is_color_channel_12bits == true ? 0:1); } static void dpp3_set3dlut_ram12( struct dpp *dpp_base, const struct dc_rgb *lut, uint32_t entries) { uint32_t i, red, green, blue, red1, green1, blue1; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); for (i = 0 ; i < entries; i += 2) { red = lut[i].red<<4; green = lut[i].green<<4; blue = lut[i].blue<<4; red1 = lut[i+1].red<<4; green1 = lut[i+1].green<<4; blue1 = lut[i+1].blue<<4; REG_SET_2(CM_3DLUT_DATA, 0, CM_3DLUT_DATA0, red, CM_3DLUT_DATA1, red1); REG_SET_2(CM_3DLUT_DATA, 0, CM_3DLUT_DATA0, green, CM_3DLUT_DATA1, green1); REG_SET_2(CM_3DLUT_DATA, 0, CM_3DLUT_DATA0, blue, CM_3DLUT_DATA1, blue1); } } /* * load selected lut with 10 bits color channels */ static void dpp3_set3dlut_ram10( struct dpp *dpp_base, const struct dc_rgb *lut, uint32_t entries) { uint32_t i, red, green, blue, value; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); for (i = 0; i < entries; i++) { red = lut[i].red; green = lut[i].green; blue = lut[i].blue; value = (red<<20) | (green<<10) | blue; REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); } } static void dpp3_select_3dlut_ram_mask( struct dpp *dpp_base, uint32_t ram_selection_mask) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, ram_selection_mask); REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); } static bool dpp3_program_3dlut(struct dpp *dpp_base, struct tetrahedral_params *params) { enum dc_lut_mode mode; bool is_17x17x17; bool is_12bits_color_channel; struct dc_rgb *lut0; struct dc_rgb *lut1; struct dc_rgb *lut2; struct dc_rgb *lut3; int lut_size0; int lut_size; if (params == NULL) { dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) dpp3_power_on_hdr3dlut(dpp_base, false); return false; } if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) dpp3_power_on_hdr3dlut(dpp_base, true); mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); if (mode == LUT_BYPASS || mode == LUT_RAM_B) mode = LUT_RAM_A; else mode = LUT_RAM_B; is_17x17x17 = !params->use_tetrahedral_9; is_12bits_color_channel = params->use_12bits; if (is_17x17x17) { lut0 = params->tetrahedral_17.lut0; lut1 = params->tetrahedral_17.lut1; lut2 = params->tetrahedral_17.lut2; lut3 = params->tetrahedral_17.lut3; lut_size0 = sizeof(params->tetrahedral_17.lut0)/ sizeof(params->tetrahedral_17.lut0[0]); lut_size = sizeof(params->tetrahedral_17.lut1)/ sizeof(params->tetrahedral_17.lut1[0]); } else { lut0 = params->tetrahedral_9.lut0; lut1 = params->tetrahedral_9.lut1; lut2 = params->tetrahedral_9.lut2; lut3 = params->tetrahedral_9.lut3; lut_size0 = sizeof(params->tetrahedral_9.lut0)/ sizeof(params->tetrahedral_9.lut0[0]); lut_size = sizeof(params->tetrahedral_9.lut1)/ sizeof(params->tetrahedral_9.lut1[0]); } dpp3_select_3dlut_ram(dpp_base, mode, is_12bits_color_channel); dpp3_select_3dlut_ram_mask(dpp_base, 0x1); if (is_12bits_color_channel) dpp3_set3dlut_ram12(dpp_base, lut0, lut_size0); else dpp3_set3dlut_ram10(dpp_base, lut0, lut_size0); dpp3_select_3dlut_ram_mask(dpp_base, 0x2); if (is_12bits_color_channel) dpp3_set3dlut_ram12(dpp_base, lut1, lut_size); else dpp3_set3dlut_ram10(dpp_base, lut1, lut_size); dpp3_select_3dlut_ram_mask(dpp_base, 0x4); if (is_12bits_color_channel) dpp3_set3dlut_ram12(dpp_base, lut2, lut_size); else dpp3_set3dlut_ram10(dpp_base, lut2, lut_size); dpp3_select_3dlut_ram_mask(dpp_base, 0x8); if (is_12bits_color_channel) dpp3_set3dlut_ram12(dpp_base, lut3, lut_size); else dpp3_set3dlut_ram10(dpp_base, lut3, lut_size); dpp3_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, is_17x17x17); return true; } static struct dpp_funcs dcn30_dpp_funcs = { .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, .dpp_read_state = dpp30_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, .dpp_set_csc_adjustment = NULL, .dpp_set_csc_default = NULL, .dpp_program_regamma_pwl = NULL, .dpp_set_pre_degam = dpp3_set_pre_degam, .dpp_program_input_lut = NULL, .dpp_full_bypass = dpp1_full_bypass, .dpp_setup = dpp3_cnv_setup, .dpp_program_degamma_pwl = NULL, .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, .dpp_program_cm_bias = dpp3_program_cm_bias, .dpp_program_blnd_lut = dpp3_program_blnd_lut, .dpp_program_shaper_lut = dpp3_program_shaper, .dpp_program_3dlut = dpp3_program_3dlut, .dpp_deferred_update = dpp3_deferred_update, .dpp_program_bias_and_scale = NULL, .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, .set_cursor_attributes = dpp3_set_cursor_attributes, .set_cursor_position = dpp1_set_cursor_position, .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, }; static struct dpp_caps dcn30_dpp_cap = { .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, }; bool dpp3_construct( struct dcn3_dpp *dpp, struct dc_context *ctx, uint32_t inst, const struct dcn3_dpp_registers *tf_regs, const struct dcn3_dpp_shift *tf_shift, const struct dcn3_dpp_mask *tf_mask) { dpp->base.ctx = ctx; dpp->base.inst = inst; dpp->base.funcs = &dcn30_dpp_funcs; dpp->base.caps = &dcn30_dpp_cap; dpp->tf_regs = tf_regs; dpp->tf_shift = tf_shift; dpp->tf_mask = tf_mask; return true; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "dcn30_optc.h" #include "dc.h" #include "dcn_calc_math.h" #include "dc_dmub_srv.h" #include "dml/dcn30/dcn30_fpu.h" #include "dc_trace.h" #define REG(reg)\ optc1->tg_regs->reg #define CTX \ optc1->base.ctx #undef FN #define FN(reg_name, field_name) \ optc1->tg_shift->field_name, optc1->tg_mask->field_name void optc3_triplebuffer_lock(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_UPDATE(OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); REG_SET(OTG_VUPDATE_KEEPOUT, 0, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); REG_WAIT(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, 1, 1, 10); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } void optc3_lock_doublebuffer_enable(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); uint32_t v_blank_start = 0; uint32_t v_blank_end = 0; uint32_t h_blank_start = 0; uint32_t h_blank_end = 0; REG_GET_2(OTG_V_BLANK_START_END, OTG_V_BLANK_START, &v_blank_start, OTG_V_BLANK_END, &v_blank_end); REG_GET_2(OTG_H_BLANK_START_END, OTG_H_BLANK_START, &h_blank_start, OTG_H_BLANK_END, &h_blank_end); REG_UPDATE_2(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start - 1, MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start); REG_UPDATE_2(OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, h_blank_start - 180 - 1, DIG_UPDATE_POSITION_Y, v_blank_start - 1); // there is a DIG_UPDATE_VCOUNT_MODE and it is 0. REG_UPDATE_3(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1, MASTER_UPDATE_LOCK_DB_END_X, h_blank_start - 180, MASTER_UPDATE_LOCK_DB_EN, 1); REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1); REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } void optc3_lock_doublebuffer_disable(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_UPDATE_2(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, 0, MASTER_UPDATE_LOCK_DB_END_X, 0); REG_UPDATE_2(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, 0, MASTER_UPDATE_LOCK_DB_END_Y, 0); REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0); REG_UPDATE(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, 0); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } void optc3_lock(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_UPDATE(OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); REG_WAIT(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, 1, 1, 10); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_UPDATE(OTG_CONTROL, OTG_OUT_MUX, dest); } void optc3_program_blank_color(struct timing_generator *optc, const struct tg_color *blank_color) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_SET_3(OTG_BLANK_DATA_COLOR, 0, OTG_BLANK_DATA_COLOR_BLUE_CB, blank_color->color_b_cb, OTG_BLANK_DATA_COLOR_GREEN_Y, blank_color->color_g_y, OTG_BLANK_DATA_COLOR_RED_CR, blank_color->color_r_cr); REG_SET_3(OTG_BLANK_DATA_COLOR_EXT, 0, OTG_BLANK_DATA_COLOR_BLUE_CB_EXT, blank_color->color_b_cb >> 10, OTG_BLANK_DATA_COLOR_GREEN_Y_EXT, blank_color->color_g_y >> 10, OTG_BLANK_DATA_COLOR_RED_CR_EXT, blank_color->color_r_cr >> 10); } void optc3_set_drr_trigger_window(struct timing_generator *optc, uint32_t window_start, uint32_t window_end) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_SET_2(OTG_DRR_TRIGGER_WINDOW, 0, OTG_DRR_TRIGGER_WINDOW_START_X, window_start, OTG_DRR_TRIGGER_WINDOW_END_X, window_end); } void optc3_set_vtotal_change_limit(struct timing_generator *optc, uint32_t limit) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_SET(OTG_DRR_V_TOTAL_CHANGE, 0, OTG_DRR_V_TOTAL_CHANGE_LIMIT, limit); } /* Set DSC-related configuration. * dsc_mode: 0 disables DSC, other values enable DSC in specified format * sc_bytes_per_pixel: Bytes per pixel in u3.28 format * dsc_slice_width: Slice width in pixels */ void optc3_set_dsc_config(struct timing_generator *optc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width) { struct optc *optc1 = DCN10TG_FROM_TG(optc); optc2_set_dsc_config(optc, dsc_mode, dsc_bytes_per_pixel, dsc_slice_width); REG_UPDATE(OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, 0); } void optc3_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); enum h_timing_div_mode h_div = H_TIMING_NO_DIV; REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 0, OPTC_SEG0_SRC_SEL, optc->inst, OPTC_SEG1_SRC_SEL, 0xf, OPTC_SEG2_SRC_SEL, 0xf, OPTC_SEG3_SRC_SEL, 0xf ); h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing); REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, h_div); REG_SET(OPTC_MEMORY_CONFIG, 0, OPTC_MEM_SEL, 0); optc1->opp_count = 1; } void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) / opp_cnt; uint32_t memory_mask = 0; /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start * REG_SET_2(OTG_GLOBAL_CONTROL1, 0, * MASTER_UPDATE_LOCK_DB_X, 160, * MASTER_UPDATE_LOCK_DB_Y, 240); */ ASSERT(opp_cnt == 2 || opp_cnt == 4); /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, * however, for ODM combine we can simplify by always using 4. */ if (opp_cnt == 2) { /* To make sure there's no memory overlap, each instance "reserves" 2 * memories and they are uniquely combined here. */ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); } else if (opp_cnt == 4) { /* To make sure there's no memory overlap, each instance "reserves" 1 * memory and they are uniquely combined here. */ memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2) | 0x1 << (opp_id[2] * 2) | 0x1 << (opp_id[3] * 2); } if (REG(OPTC_MEMORY_CONFIG)) REG_SET(OPTC_MEMORY_CONFIG, 0, OPTC_MEM_SEL, memory_mask); if (opp_cnt == 2) { REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 1, OPTC_SEG0_SRC_SEL, opp_id[0], OPTC_SEG1_SRC_SEL, opp_id[1]); } else if (opp_cnt == 4) { REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 3, OPTC_SEG0_SRC_SEL, opp_id[0], OPTC_SEG1_SRC_SEL, opp_id[1], OPTC_SEG2_SRC_SEL, opp_id[2], OPTC_SEG3_SRC_SEL, opp_id[3]); } REG_UPDATE(OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mpcc_hactive); REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); optc1->opp_count = opp_cnt; } /** * optc3_set_timing_double_buffer() - DRR double buffering control * * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. * * @optc: timing_generator instance. * @enable: Enable DRR double buffering control if true, disable otherwise. * * Options: any time, start of frame, dp start of frame (range timing) */ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable) { struct optc *optc1 = DCN10TG_FROM_TG(optc); uint32_t mode = enable ? 2 : 0; REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); } void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */ } void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) { struct dc *dc = optc->ctx->dc; if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max); else optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); } void optc3_tg_init(struct timing_generator *optc) { optc3_set_timing_double_buffer(optc, true); optc1_clear_optc_underflow(optc); } static struct timing_generator_funcs dcn30_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, .program_global_sync = optc1_program_global_sync, .enable_crtc = optc2_enable_crtc, .disable_crtc = optc1_disable_crtc, /* used by enable_timing_synchronization. Not need for FPGA */ .is_counter_moving = optc1_is_counter_moving, .get_position = optc1_get_position, .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, .set_blank_color = optc3_program_blank_color, .did_triggered_reset_occur = optc1_did_triggered_reset_occur, .triplebuffer_lock = optc3_triplebuffer_lock, .triplebuffer_unlock = optc2_triplebuffer_unlock, .enable_reset_trigger = optc1_enable_reset_trigger, .enable_crtc_reset = optc1_enable_crtc_reset, .disable_reset_trigger = optc1_disable_reset_trigger, .lock = optc3_lock, .unlock = optc1_unlock, .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, .enable_optc_clock = optc1_enable_optc_clock, .set_drr = optc1_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc3_set_vtotal_min_max, .set_static_screen_control = optc1_set_static_screen_control, .program_stereo = optc1_program_stereo, .is_stereo_left_eye = optc1_is_stereo_left_eye, .tg_init = optc3_tg_init, .is_tg_enabled = optc1_is_tg_enabled, .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, .clear_optc_underflow = optc1_clear_optc_underflow, .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc3_set_odm_combine, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, .set_drr_trigger_window = optc3_set_drr_trigger_window, .set_vtotal_change_limit = optc3_set_vtotal_change_limit, .set_gsl = optc2_set_gsl, .set_gsl_source_select = optc2_set_gsl_source_select, .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, }; void dcn30_timing_generator_init(struct optc *optc1) { optc1->base.funcs = &dcn30_tg_funcs; optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; optc1->min_h_blank = 32; optc1->min_v_blank = 3; optc1->min_v_blank_interlace = 5; optc1->min_h_sync_width = 4; optc1->min_v_sync_width = 1; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "reg_helper.h" #include "core_types.h" #include "dcn30_dccg.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) #define REG(reg) \ (dccg_dcn->regs->reg) #undef FN #define FN(reg_name, field_name) \ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name #define CTX \ dccg_dcn->base.ctx #define DC_LOGGER \ dccg->ctx->logger static const struct dccg_funcs dccg3_funcs = { .update_dpp_dto = dccg2_update_dpp_dto, .get_dccg_ref_freq = dccg2_get_dccg_ref_freq, .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, .otg_add_pixel = dccg2_otg_add_pixel, .otg_drop_pixel = dccg2_otg_drop_pixel, .dccg_init = dccg2_init }; struct dccg *dccg3_create( struct dc_context *ctx, const struct dccg_registers *regs, const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask) { struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); struct dccg *base; if (dccg_dcn == NULL) { BREAK_TO_DEBUGGER(); return NULL; } base = &dccg_dcn->base; base->ctx = ctx; base->funcs = &dccg3_funcs; dccg_dcn->regs = regs; dccg_dcn->dccg_shift = dccg_shift; dccg_dcn->dccg_mask = dccg_mask; return &dccg_dcn->base; } struct dccg *dccg30_create( struct dc_context *ctx, const struct dccg_registers *regs, const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask) { struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); struct dccg *base; if (dccg_dcn == NULL) { BREAK_TO_DEBUGGER(); return NULL; } base = &dccg_dcn->base; base->ctx = ctx; base->funcs = &dccg3_funcs; dccg_dcn->regs = regs; dccg_dcn->dccg_shift = dccg_shift; dccg_dcn->dccg_mask = dccg_mask; return &dccg_dcn->base; }
linux-master
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c
/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "dm_services.h" #include "reg_helper.h" #include "dcn30_hubbub.h" #define CTX \ hubbub1->base.ctx #define DC_LOGGER \ hubbub1->base.ctx->logger #define REG(reg)\ hubbub1->regs->reg #undef FN #define FN(reg_name, field_name) \ hubbub1->shifts->field_name, hubbub1->masks->field_name #ifdef NUM_VMID #undef NUM_VMID #endif #define NUM_VMID 16 static uint32_t convert_and_clamp( uint32_t wm_ns, uint32_t refclk_mhz, uint32_t clamp_value) { uint32_t ret_val = 0; ret_val = wm_ns * refclk_mhz; ret_val /= 1000; if (ret_val > clamp_value) ret_val = clamp_value; return ret_val; } int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, AGP_BASE, pa_config->system_aperture.agp_base >> 24); if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; phys_config.depth = 0; phys_config.block_size = 0; // Init VMID 0 based on PA config dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); } return NUM_VMID; } bool hubbub3_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); bool wm_pending = false; if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) wm_pending = true; /* * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. * If the memory controller is fully utilized and the DCHub requestors are * well ahead of their amortized schedule, then it is safe to prevent the next winner * from being committed and sent to the fabric. * The utilization of the memory controller is approximated by ensuring that * the number of outstanding requests is greater than a threshold specified * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. * * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF) * to turn off it for now. */ REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF); hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); return wm_pending; } bool hubbub3_dcc_support_swizzle( enum swizzle_mode_values swizzle, unsigned int bytes_per_element, enum segment_order *segment_order_horz, enum segment_order *segment_order_vert) { bool standard_swizzle = false; bool display_swizzle = false; bool render_swizzle = false; switch (swizzle) { case DC_SW_4KB_S: case DC_SW_64KB_S: case DC_SW_VAR_S: case DC_SW_4KB_S_X: case DC_SW_64KB_S_X: case DC_SW_VAR_S_X: standard_swizzle = true; break; case DC_SW_4KB_R: case DC_SW_64KB_R: case DC_SW_VAR_R: case DC_SW_4KB_R_X: case DC_SW_64KB_R_X: case DC_SW_VAR_R_X: render_swizzle = true; break; case DC_SW_4KB_D: case DC_SW_64KB_D: case DC_SW_VAR_D: case DC_SW_4KB_D_X: case DC_SW_64KB_D_X: case DC_SW_VAR_D_X: display_swizzle = true; break; default: break; } if (standard_swizzle) { if (bytes_per_element == 1) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__na; return true; } if (bytes_per_element == 2) { *segment_order_horz = segment_order__non_contiguous; *segment_order_vert = segment_order__contiguous; return true; } if (bytes_per_element == 4) { *segment_order_horz = segment_order__non_contiguous; *segment_order_vert = segment_order__contiguous; return true; } if (bytes_per_element == 8) { *segment_order_horz = segment_order__na; *segment_order_vert = segment_order__contiguous; return true; } } if (render_swizzle) { if (bytes_per_element == 1) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__na; return true; } if (bytes_per_element == 2) { *segment_order_horz = segment_order__non_contiguous; *segment_order_vert = segment_order__contiguous; return true; } if (bytes_per_element == 4) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__non_contiguous; return true; } if (bytes_per_element == 8) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__non_contiguous; return true; } } if (display_swizzle && bytes_per_element == 8) { *segment_order_horz = segment_order__contiguous; *segment_order_vert = segment_order__non_contiguous; return true; } return false; } static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, unsigned int bytes_per_element) { /* copied from DML. might want to refactor DML to leverage from DML */ /* DML : get_blk256_size */ if (bytes_per_element == 1) { *blk256_width = 16; *blk256_height = 16; } else if (bytes_per_element == 2) { *blk256_width = 16; *blk256_height = 8; } else if (bytes_per_element == 4) { *blk256_width = 8; *blk256_height = 8; } else if (bytes_per_element == 8) { *blk256_width = 8; *blk256_height = 4; } } static void hubbub3_det_request_size( unsigned int detile_buf_size, unsigned int height, unsigned int width, unsigned int bpe, bool *req128_horz_wc, bool *req128_vert_wc) { unsigned int blk256_height = 0; unsigned int blk256_width = 0; unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe); swath_bytes_horz_wc = width * blk256_height * bpe; swath_bytes_vert_wc = height * blk256_width * bpe; *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? false : /* full 256B request */ true; /* half 128b request */ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? false : /* full 256B request */ true; /* half 128b request */ } bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { struct dc *dc = hubbub->ctx->dc; /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ enum dcc_control dcc_control; unsigned int bpe; enum segment_order segment_order_horz, segment_order_vert; bool req128_horz_wc, req128_vert_wc; memset(output, 0, sizeof(*output)); if (dc->debug.disable_dcc == DCC_DISABLE) return false; if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe)) return false; if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, &segment_order_horz, &segment_order_vert)) return false; hubbub3_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, input->surface_size.height, input->surface_size.width, bpe, &req128_horz_wc, &req128_vert_wc); if (!req128_horz_wc && !req128_vert_wc) { dcc_control = dcc_control__256_256_xxx; } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { if (!req128_horz_wc) dcc_control = dcc_control__256_256_xxx; else if (segment_order_horz == segment_order__contiguous) dcc_control = dcc_control__128_128_xxx; else dcc_control = dcc_control__256_64_64; } else if (input->scan == SCAN_DIRECTION_VERTICAL) { if (!req128_vert_wc) dcc_control = dcc_control__256_256_xxx; else if (segment_order_vert == segment_order__contiguous) dcc_control = dcc_control__128_128_xxx; else dcc_control = dcc_control__256_64_64; } else { if ((req128_horz_wc && segment_order_horz == segment_order__non_contiguous) || (req128_vert_wc && segment_order_vert == segment_order__non_contiguous)) /* access_dir not known, must use most constraining */ dcc_control = dcc_control__256_64_64; else /* reg128 is true for either horz and vert * but segment_order is contiguous */ dcc_control = dcc_control__128_128_xxx; } /* Exception for 64KB_R_X */ if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) dcc_control = dcc_control__128_128_xxx; if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && dcc_control != dcc_control__256_256_xxx) return false; switch (dcc_control) { case dcc_control__256_256_xxx: output->grph.rgb.max_uncompressed_blk_size = 256; output->grph.rgb.max_compressed_blk_size = 256; output->grph.rgb.independent_64b_blks = false; output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1; output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; break; case dcc_control__128_128_xxx: output->grph.rgb.max_uncompressed_blk_size = 128; output->grph.rgb.max_compressed_blk_size = 128; output->grph.rgb.independent_64b_blks = false; output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1; output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; break; case dcc_control__256_64_64: output->grph.rgb.max_uncompressed_blk_size = 256; output->grph.rgb.max_compressed_blk_size = 64; output->grph.rgb.independent_64b_blks = true; output->grph.rgb.dcc_controls.dcc_256_64_64 = 1; break; case dcc_control__256_128_128: output->grph.rgb.max_uncompressed_blk_size = 256; output->grph.rgb.max_compressed_blk_size = 128; output->grph.rgb.independent_64b_blks = false; output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; break; } output->capable = true; output->const_color_support = true; return true; } void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; uint32_t prog_wm_value = convert_and_clamp(hubbub1->watermarks.a.urgent_ns, refclk_mhz, 0x1fffff); REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value); } void hubbub3_force_pstate_change_control(struct hubbub *hubbub, bool force, bool allow) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, allow, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, force); } /* Copy values from WM set A to all other sets */ void hubbub3_init_watermarks(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); uint32_t reg; reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); reg = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, reg); REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg); } static const struct hubbub_funcs hubbub30_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub3_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, .wm_read_state = hubbub21_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub3_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, }; void hubbub3_construct(struct dcn20_hubbub *hubbub3, struct dc_context *ctx, const struct dcn_hubbub_registers *hubbub_regs, const struct dcn_hubbub_shift *hubbub_shift, const struct dcn_hubbub_mask *hubbub_mask) { hubbub3->base.ctx = ctx; hubbub3->base.funcs = &hubbub30_funcs; hubbub3->regs = hubbub_regs; hubbub3->shifts = hubbub_shift; hubbub3->masks = hubbub_mask; hubbub3->debug_test_index_pstate = 0xB; hubbub3->detile_buf_size = 184 * 1024; /* 184KB for DCN3 */ }
linux-master
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c