python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/interconnect.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
#include <drm/drm_gem.h>
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
#include "msm_gem.h"
#include "msm_gpu_trace.h"
#include "msm_mmu.h"
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
/* FIXME: add a banner here */
gmu->hung = true;
/* Turn off the hangcheck timer while we are resetting */
del_timer(&gpu->hangcheck_timer);
/* Queue the GPU handler because we need to treat this as a recovery */
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
static irqreturn_t a6xx_gmu_irq(int irq, void *data)
{
struct a6xx_gmu *gmu = data;
u32 status;
status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
a6xx_gmu_fault(gmu);
}
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
return IRQ_HANDLED;
}
static irqreturn_t a6xx_hfi_irq(int irq, void *data)
{
struct a6xx_gmu *gmu = data;
u32 status;
status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
a6xx_gmu_fault(gmu);
}
return IRQ_HANDLED;
}
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
{
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (!gmu->initialized)
return false;
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
}
/* Check to see if the GX rail is still powered */
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
{
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (!gmu->initialized)
return false;
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
}
void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 perf_index;
unsigned long gpu_freq;
int ret = 0;
gpu_freq = dev_pm_opp_get_freq(opp);
if (gpu_freq == gmu->freq)
return;
for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
if (gpu_freq == gmu->gpu_freqs[perf_index])
break;
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
trace_msm_gmu_freq_change(gmu->freq, perf_index);
/*
* This can get called from devfreq while the hardware is idle. Don't
* bring up the power if it isn't already active. All we're doing here
* is updating the frequency so that when we come back online we're at
* the right rate.
*/
if (suspended)
return;
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
return;
}
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
((3 & 0xf) << 28) | perf_index);
/*
* Send an invalid index as a vote for the bus bandwidth and let the
* firmware decide on the right vote
*/
gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
/* Set and clear the OOB for DCVS to trigger the GMU */
a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
}
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
return gmu->freq;
}
static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
{
u32 val;
int local = gmu->idle_level;
/* SPTP and IFPC both report as IFPC */
if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
local = GMU_IDLE_STATE_IFPC;
val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
if (val == local) {
if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
!a6xx_gmu_gx_is_on(gmu))
return true;
}
return false;
}
/* Wait for the GMU to get to its most idle state */
int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
{
return spin_until(a6xx_gmu_check_idle_level(gmu));
}
static int a6xx_gmu_start(struct a6xx_gmu *gmu)
{
int ret;
u32 val;
u32 mask, reset_val;
val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
if (val <= 0x20010004) {
mask = 0xffffffff;
reset_val = 0xbabeface;
} else {
mask = 0x1ff;
reset_val = 0x100;
}
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
/* Set the log wptr index
* note: downstream saves the value in poweroff and restores it here
*/
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
(val & mask) == reset_val, 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
return ret;
}
static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
{
u32 val;
int ret;
gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
val & 1, 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
return ret;
}
struct a6xx_gmu_oob_bits {
int set, ack, set_new, ack_new, clear, clear_new;
const char *name;
};
/* These are the interrupt / ack bits for each OOB request that are set
* in a6xx_gmu_set_oob and a6xx_clear_oob
*/
static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
[GMU_OOB_GPU_SET] = {
.name = "GPU_SET",
.set = 16,
.ack = 24,
.set_new = 30,
.ack_new = 31,
.clear = 24,
.clear_new = 31,
},
[GMU_OOB_PERFCOUNTER_SET] = {
.name = "PERFCOUNTER",
.set = 17,
.ack = 25,
.set_new = 28,
.ack_new = 30,
.clear = 25,
.clear_new = 29,
},
[GMU_OOB_BOOT_SLUMBER] = {
.name = "BOOT_SLUMBER",
.set = 22,
.ack = 30,
.clear = 30,
},
[GMU_OOB_DCVS_SET] = {
.name = "GPU_DCVS",
.set = 23,
.ack = 31,
.clear = 31,
},
};
/* Trigger a OOB (out of band) request to the GMU */
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
int ret;
u32 val;
int request, ack;
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL;
if (gmu->legacy) {
request = a6xx_gmu_oob_bits[state].set;
ack = a6xx_gmu_oob_bits[state].ack;
} else {
request = a6xx_gmu_oob_bits[state].set_new;
ack = a6xx_gmu_oob_bits[state].ack_new;
if (!request || !ack) {
DRM_DEV_ERROR(gmu->dev,
"Invalid non-legacy GMU request %s\n",
a6xx_gmu_oob_bits[state].name);
return -EINVAL;
}
}
/* Trigger the equested OOB operation */
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
/* Wait for the acknowledge interrupt */
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
val & (1 << ack), 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
a6xx_gmu_oob_bits[state].name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
/* Clear the acknowledge interrupt */
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
return ret;
}
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
int bit;
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return;
if (gmu->legacy)
bit = a6xx_gmu_oob_bits[state].clear;
else
bit = a6xx_gmu_oob_bits[state].clear_new;
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
}
/* Enable CPU control of SPTP power power collapse */
int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
{
int ret;
u32 val;
if (!gmu->legacy)
return 0;
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
(val & 0x38) == 0x28, 1, 100);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
return 0;
}
/* Disable CPU control of SPTP power power collapse */
void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
{
u32 val;
int ret;
if (!gmu->legacy)
return;
/* Make sure retention is on */
gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
(val & 0x04), 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
/* Let the GMU know we are starting a boot sequence */
static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
{
u32 vote;
/* Let the GMU know we are getting ready for boot */
gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
/* Choose the "default" power level as the highest available */
vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
/* Let the GMU know the boot sequence has started */
return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
}
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
int ret;
/* Disable the power counter so the GMU isn't busy */
gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
/* Disable SPTP_PC if the CPU is responsible for it */
if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
a6xx_sptprac_disable(gmu);
if (!gmu->legacy) {
ret = a6xx_hfi_send_prep_slumber(gmu);
goto out;
}
/* Tell the GMU to get ready to slumber */
gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
if (!ret) {
/* Check to see if the GMU really did slumber */
if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
!= 0x0f) {
DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
ret = -ETIMEDOUT;
}
}
out:
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
}
static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
{
int ret;
u32 val;
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
/* Wait for the register to finish posting */
wmb();
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
val & (1 << 1), 100, 10000);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
return ret;
}
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
return ret;
}
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
return 0;
}
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
{
int ret;
u32 val;
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
{
msm_writel(value, ptr + (offset << 2));
}
static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
const char *name);
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = NULL;
uint32_t pdc_address_offset;
bool pdc_in_aop = false;
if (IS_ERR(pdcptr))
goto err;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
pdc_address_offset = 0x30090;
else if (adreno_is_a619(adreno_gpu))
pdc_address_offset = 0x300a0;
else
pdc_address_offset = 0x30080;
if (!pdc_in_aop) {
seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
if (IS_ERR(seqptr))
goto err;
}
/* Disable SDE clock gating */
gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
/* Setup RSC PDC handshake for sleep and wakeup */
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
/* Load RSC sequencer uCode for sleep and wakeup */
if (adreno_is_a650_family(adreno_gpu)) {
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
} else {
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
}
if (pdc_in_aop)
goto setup_pdc;
/* Load PDC sequencer uCode for power up and power down sequence */
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
/* Set TCS commands used by PDC sequence for low power modes */
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
adreno_is_a650_family(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
setup_pdc:
pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
/* ensure no writes happen before the uCode is fully written */
wmb();
a6xx_rpmh_stop(gmu);
err:
if (!IS_ERR_OR_NULL(pdcptr))
iounmap(pdcptr);
if (!IS_ERR_OR_NULL(seqptr))
iounmap(seqptr);
}
/*
* The lowest 16 bits of this value are the number of XO clock cycles for main
* hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
* for the shorter hysteresis that happens after main - this is 0xa (.5 us)
*/
#define GMU_PWR_COL_HYST 0x000a1680
/* Set up the idle state for the GMU */
static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
{
/* Disable GMU WB/RB buffer */
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
switch (gmu->idle_level) {
case GMU_IDLE_STATE_IFPC:
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
GMU_PWR_COL_HYST);
gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
fallthrough;
case GMU_IDLE_STATE_SPTP:
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
GMU_PWR_COL_HYST);
gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
}
/* Enable RPMh GPU client */
gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
}
struct block_header {
u32 addr;
u32 size;
u32 type;
u32 value;
u32 data[];
};
static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
{
if (!in_range(blk->addr, bo->iova, bo->size))
return false;
memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
return true;
}
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
const struct block_header *blk;
u32 reg_offset;
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
if (adreno_is_a650_family(adreno_gpu))
dtcm_base = 0x10004000;
if (gmu->legacy) {
/* Sanity check the size of the firmware that was loaded */
if (fw_image->size > 0x8000) {
DRM_DEV_ERROR(gmu->dev,
"GMU firmware is bigger than the available region\n");
return -EINVAL;
}
gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
(u32*) fw_image->data, fw_image->size);
return 0;
}
for (blk = (const struct block_header *) fw_image->data;
(const u8*) blk < fw_image->data + fw_image->size;
blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
if (blk->size == 0)
continue;
if (in_range(blk->addr, itcm_base, SZ_16K)) {
reg_offset = (blk->addr - itcm_base) >> 2;
gmu_write_bulk(gmu,
REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
blk->data, blk->size);
} else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
reg_offset = (blk->addr - dtcm_base) >> 2;
gmu_write_bulk(gmu,
REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
blk->data, blk->size);
} else if (!fw_block_mem(&gmu->icache, blk) &&
!fw_block_mem(&gmu->dcache, blk) &&
!fw_block_mem(&gmu->dummy, blk)) {
DRM_DEV_ERROR(gmu->dev,
"failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
blk->addr, blk->size, blk->data[0]);
}
}
return 0;
}
static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int ret;
u32 chipid;
if (adreno_is_a650_family(adreno_gpu)) {
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
}
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
} else {
if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
"GMU firmware is not loaded\n"))
return -ENOENT;
/* Turn on register retention */
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
ret = a6xx_gmu_fw_load(gmu);
if (ret)
return ret;
}
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
/* Write the iova of the HFI table */
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
(1 << 31) | (0xa << 18) | (0xa0));
/*
* Snapshots toggle the NMI bit which will result in a jump to the NMI
* handler instead of __main. Set the M3 config value to avoid that.
*/
gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
/*
* Note that the GMU has a slightly different layout for
* chip_id, for whatever reason, so a bit of massaging
* is needed. The upper 16b are the same, but minor and
* patchid are packed in four bits each with the lower
* 8b unused:
*/
chipid = adreno_gpu->chip_id & 0xffff0000;
chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
gmu->log.iova | (gmu->log.size / SZ_4K - 1));
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
ret = a6xx_gmu_start(gmu);
if (ret)
return ret;
if (gmu->legacy) {
ret = a6xx_gmu_gfx_rail_on(gmu);
if (ret)
return ret;
}
/* Enable SPTP_PC if the CPU is responsible for it */
if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
ret = a6xx_sptprac_enable(gmu);
if (ret)
return ret;
}
ret = a6xx_gmu_hfi_start(gmu);
if (ret)
return ret;
/* FIXME: Do we need this wmb() here? */
wmb();
return 0;
}
#define A6XX_HFI_IRQ_MASK \
(A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
#define A6XX_GMU_IRQ_MASK \
(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
{
disable_irq(gmu->gmu_irq);
disable_irq(gmu->hfi_irq);
gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
}
static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
{
u32 val;
/* Make sure there are no outstanding RPMh votes */
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
(val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
(val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
}
/* Force the GMU off in case it isn't responsive */
static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
/*
* Turn off keep alive that might have been enabled by the hang
* interrupt
*/
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
/* Flush all the queues */
a6xx_hfi_stop(gmu);
/* Stop the interrupts */
a6xx_gmu_irq_disable(gmu);
/* Force off SPTP in case the GMU is managing it */
a6xx_sptprac_disable(gmu);
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
/* Clear the WRITEDROPPED fields and put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
/* Make sure the above writes go through */
wmb();
/* Halt the gmu cm3 core */
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
/* Reset GPU core blocks */
a6xx_gpu_sw_reset(gpu, true);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
{
struct dev_pm_opp *gpu_opp;
unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
if (IS_ERR(gpu_opp))
return;
gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
a6xx_gmu_set_freq(gpu, gpu_opp, false);
dev_pm_opp_put(gpu_opp);
}
static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
{
struct dev_pm_opp *gpu_opp;
unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
if (IS_ERR(gpu_opp))
return;
dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
dev_pm_opp_put(gpu_opp);
}
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int status, ret;
if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
return -EINVAL;
gmu->hung = false;
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
/*
* "enable" the GX power domain which won't actually do anything but it
* will make sure that the refcounting is correct in case we need to
* bring down the GX after a GMU failure
*/
if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_get_sync(gmu->gxpd);
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
clk_set_rate(gmu->hub_clk, 150000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
return ret;
}
/* Set the bus quota to a reasonable value for boot */
a6xx_gmu_set_initial_bw(gpu, gmu);
/* Enable the GMU interrupt */
gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
/*
* Warm boot path does not work on newer GPUs
* Presumably this is because icache/dcache regions must be restored
*/
if (!gmu->legacy)
status = GMU_COLD_BOOT;
ret = a6xx_gmu_fw_start(gmu, status);
if (ret)
goto out;
ret = a6xx_hfi_start(gmu, status);
if (ret)
goto out;
/*
* Turn on the GMU firmware fault interrupt after we know the boot
* sequence is successful
*/
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
enable_irq(gmu->hfi_irq);
/* Set the GPU to the current freq */
a6xx_gmu_set_initial_freq(gpu, gmu);
out:
/* On failure, shut down the GMU to leave it in a good state */
if (ret) {
disable_irq(gmu->gmu_irq);
a6xx_rpmh_stop(gmu);
pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
}
return ret;
}
bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
{
u32 reg;
if (!gmu->initialized)
return true;
reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
return false;
return true;
}
/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val;
/*
* The GMU may still be in slumber unless the GPU started so check and
* skip putting it back into slumber if so
*/
val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
if (val != 0xf) {
int ret = a6xx_gmu_wait_for_idle(gmu);
/* If the GMU isn't responding assume it is hung */
if (ret) {
a6xx_gmu_force_off(gmu);
return;
}
a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
/* tell the GMU we want to slumber */
ret = a6xx_gmu_notify_slumber(gmu);
if (ret) {
a6xx_gmu_force_off(gmu);
return;
}
ret = gmu_poll_timeout(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
100, 10000);
/*
* Let the user know we failed to slumber but don't worry too
* much because we are powering down anyway
*/
if (ret)
DRM_DEV_ERROR(gmu->dev,
"Unable to slumber GMU: status = 0%x/0%x\n",
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
}
/* Turn off HFI */
a6xx_hfi_stop(gmu);
/* Stop the interrupts and mask the hardware */
a6xx_gmu_irq_disable(gmu);
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
}
int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct msm_gpu *gpu = &a6xx_gpu->base.base;
if (!pm_runtime_active(gmu->dev))
return 0;
/*
* Force the GMU off if we detected a hang, otherwise try to shut it
* down gracefully
*/
if (gmu->hung)
a6xx_gmu_force_off(gmu);
else
a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
/*
* Make sure the GX domain is off before turning off the GMU (CX)
* domain. Usually the GMU does this but only if the shutdown sequence
* was successful
*/
if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_put_sync(gmu->gxpd);
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->dev);
return 0;
}
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
msm_gem_address_space_put(gmu->aspace);
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
size_t size, u64 iova, const char *name)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct drm_device *dev = a6xx_gpu->base.base.dev;
uint32_t flags = MSM_BO_WC;
u64 range_start, range_end;
int ret;
size = PAGE_ALIGN(size);
if (!iova) {
/* no fixed address - use GMU's uncached range */
range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
range_end = 0x80000000;
} else {
/* range for fixed address */
range_start = iova;
range_end = iova + size;
/* use IOMMU_PRIV for icache/dcache */
flags |= MSM_BO_MAP_PRIV;
}
bo->obj = msm_gem_new(dev, size, flags);
if (IS_ERR(bo->obj))
return PTR_ERR(bo->obj);
ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
range_start, range_end);
if (ret) {
drm_gem_object_put(bo->obj);
return ret;
}
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
msm_gem_object_set_name(bo->obj, name);
return 0;
}
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
struct msm_mmu *mmu;
mmu = msm_iommu_new(gmu->dev, 0);
if (!mmu)
return -ENODEV;
if (IS_ERR(mmu))
return PTR_ERR(mmu);
gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
if (IS_ERR(gmu->aspace))
return PTR_ERR(gmu->aspace);
return 0;
}
/* Return the 'arc-level' for the given frequency */
static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
unsigned long freq)
{
struct dev_pm_opp *opp;
unsigned int val;
if (!freq)
return 0;
opp = dev_pm_opp_find_freq_exact(dev, freq, true);
if (IS_ERR(opp))
return 0;
val = dev_pm_opp_get_level(opp);
dev_pm_opp_put(opp);
return val;
}
static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
unsigned long *freqs, int freqs_count, const char *id)
{
int i, j;
const u16 *pri, *sec;
size_t pri_count, sec_count;
pri = cmd_db_read_aux_data(id, &pri_count);
if (IS_ERR(pri))
return PTR_ERR(pri);
/*
* The data comes back as an array of unsigned shorts so adjust the
* count accordingly
*/
pri_count >>= 1;
if (!pri_count)
return -EINVAL;
sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
if (IS_ERR(sec))
return PTR_ERR(sec);
sec_count >>= 1;
if (!sec_count)
return -EINVAL;
/* Construct a vote for each frequency */
for (i = 0; i < freqs_count; i++) {
u8 pindex = 0, sindex = 0;
unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
/* Get the primary index that matches the arc level */
for (j = 0; j < pri_count; j++) {
if (pri[j] >= level) {
pindex = j;
break;
}
}
if (j == pri_count) {
DRM_DEV_ERROR(dev,
"Level %u not found in the RPMh list\n",
level);
DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
DRM_DEV_ERROR(dev, " %u\n", pri[j]);
return -EINVAL;
}
/*
* Look for a level in in the secondary list that matches. If
* nothing fits, use the maximum non zero vote
*/
for (j = 0; j < sec_count; j++) {
if (sec[j] >= level) {
sindex = j;
break;
} else if (sec[j]) {
sindex = j;
}
}
/* Construct the vote */
votes[i] = ((pri[pindex] & 0xffff) << 16) |
(sindex << 8) | pindex;
}
return 0;
}
/*
* The GMU votes with the RPMh for itself and on behalf of the GPU but we need
* to construct the list of votes on the CPU and send it over. Query the RPMh
* voltage levels and build the votes
*/
static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
/* Build the GX votes */
ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
/* Build the CX votes */
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
return ret;
}
static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
u32 size)
{
int count = dev_pm_opp_get_opp_count(dev);
struct dev_pm_opp *opp;
int i, index = 0;
unsigned long freq = 1;
/*
* The OPP table doesn't contain the "off" frequency level so we need to
* add 1 to the table size to account for it
*/
if (WARN(count + 1 > size,
"The GMU frequency table is being truncated\n"))
count = size - 1;
/* Set the "off" frequency */
freqs[index++] = 0;
for (i = 0; i < count; i++) {
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp))
break;
dev_pm_opp_put(opp);
freqs[index++] = freq++;
}
return index;
}
static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret = 0;
/*
* The GMU handles its own frequency switching so build a list of
* available frequencies to send during initialization
*/
ret = devm_pm_opp_of_add_table(gmu->dev);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
return ret;
}
gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
/*
* The GMU also handles GPU frequency switching so build a list
* from the GPU OPP table
*/
gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
if (ret < 1)
return ret;
gmu->nr_clocks = ret;
gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
gmu->nr_clocks, "gmu");
gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
gmu->nr_clocks, "hub");
return 0;
}
static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
const char *name)
{
void __iomem *ret;
struct resource *res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, name);
if (!res) {
DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
ret = ioremap(res->start, resource_size(res));
if (!ret) {
DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
return ret;
}
static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
const char *name, irq_handler_t handler)
{
int irq, ret;
irq = platform_get_irq_byname(pdev, name);
ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
name, ret);
return ret;
}
disable_irq(irq);
return irq;
}
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = to_platform_device(gmu->dev);
mutex_lock(&gmu->lock);
if (!gmu->initialized) {
mutex_unlock(&gmu->lock);
return;
}
gmu->initialized = false;
mutex_unlock(&gmu->lock);
pm_runtime_force_suspend(gmu->dev);
/*
* Since cxpd is a virt device, the devlink with gmu-dev will be removed
* automatically when we do detach
*/
dev_pm_domain_detach(gmu->cxpd, false);
if (!IS_ERR_OR_NULL(gmu->gxpd)) {
pm_runtime_disable(gmu->gxpd);
dev_pm_domain_detach(gmu->gxpd, false);
}
iounmap(gmu->mmio);
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
iounmap(gmu->rscc);
gmu->mmio = NULL;
gmu->rscc = NULL;
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
a6xx_gmu_memory_free(gmu);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
}
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
}
static int cxpd_notifier_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb);
if (action == GENPD_NOTIFY_OFF)
complete_all(&gmu->pd_gate);
return 0;
}
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
struct platform_device *pdev = of_find_device_by_node(node);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int ret;
if (!pdev)
return -ENODEV;
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
pm_runtime_enable(gmu->dev);
/* Mark legacy for manual SPTPRAC control */
gmu->legacy = true;
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
if (IS_ERR(gmu->mmio)) {
ret = PTR_ERR(gmu->mmio);
goto err_mmio;
}
gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
if (IS_ERR(gmu->cxpd)) {
ret = PTR_ERR(gmu->cxpd);
goto err_mmio;
}
if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) {
ret = -ENODEV;
goto detach_cxpd;
}
init_completion(&gmu->pd_gate);
complete_all(&gmu->pd_gate);
gmu->pd_nb.notifier_call = cxpd_notifier_cb;
/* Get a link to the GX power domain to reset the GPU */
gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
if (IS_ERR(gmu->gxpd)) {
ret = PTR_ERR(gmu->gxpd);
goto err_mmio;
}
gmu->initialized = true;
return 0;
detach_cxpd:
dev_pm_domain_detach(gmu->cxpd, false);
err_mmio:
iounmap(gmu->mmio);
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
return ret;
}
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
int ret;
if (!pdev)
return -ENODEV;
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
pm_runtime_enable(gmu->dev);
/* Get the list of clocks */
ret = a6xx_gmu_clocks_probe(gmu);
if (ret)
goto err_put_device;
ret = a6xx_gmu_memory_probe(gmu);
if (ret)
goto err_put_device;
/* A660 now requires handling "prealloc requests" in GMU firmware
* For now just hardcode allocations based on the known firmware.
* note: there is no indication that these correspond to "dummy" or
* "debug" regions, but this "guess" allows reusing these BOs which
* are otherwise unused by a660.
*/
gmu->dummy.size = SZ_4K;
if (adreno_is_a660_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
0x60400000, "debug");
if (ret)
goto err_memory;
gmu->dummy.size = SZ_8K;
}
/* Allocate memory for the GMU dummy page */
ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
0x60000000, "dummy");
if (ret)
goto err_memory;
/* Note that a650 family also includes a660 family: */
if (adreno_is_a650_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
/*
* NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition
* to allocate icache/dcache here, as per downstream code flow, but it may not actually be
* necessary. If you omit this step and you don't get random pagefaults, you are likely
* good to go without this!
*/
} else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_256K - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
SZ_256K - SZ_16K, 0x44000, "dcache");
if (ret)
goto err_memory;
} else if (adreno_is_a630_family(adreno_gpu)) {
/* HFI v1, has sptprac */
gmu->legacy = true;
/* Allocate memory for the GMU debug region */
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
if (ret)
goto err_memory;
}
/* Allocate memory for the GMU log region */
ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log");
if (ret)
goto err_memory;
/* Allocate memory for for the HFI queues */
ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
if (ret)
goto err_memory;
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
if (IS_ERR(gmu->mmio)) {
ret = PTR_ERR(gmu->mmio);
goto err_memory;
}
if (adreno_is_a650_family(adreno_gpu)) {
gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
if (IS_ERR(gmu->rscc)) {
ret = -ENODEV;
goto err_mmio;
}
} else {
gmu->rscc = gmu->mmio + 0x23000;
}
/* Get the HFI and GMU interrupts */
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) {
ret = -ENODEV;
goto err_mmio;
}
gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
if (IS_ERR(gmu->cxpd)) {
ret = PTR_ERR(gmu->cxpd);
goto err_mmio;
}
if (!device_link_add(gmu->dev, gmu->cxpd,
DL_FLAG_PM_RUNTIME)) {
ret = -ENODEV;
goto detach_cxpd;
}
init_completion(&gmu->pd_gate);
complete_all(&gmu->pd_gate);
gmu->pd_nb.notifier_call = cxpd_notifier_cb;
/*
* Get a link to the GX power domain to reset the GPU in case of GMU
* crash
*/
gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
/* Initialize RPMh */
a6xx_gmu_rpmh_init(gmu);
gmu->initialized = true;
return 0;
detach_cxpd:
dev_pm_domain_detach(gmu->cxpd, false);
err_mmio:
iounmap(gmu->mmio);
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
iounmap(gmu->rscc);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
err_memory:
a6xx_gmu_memory_free(gmu);
err_put_device:
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
return ret;
}
| linux-master | drivers/gpu/drm/msm/adreno/a6xx_gmu.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_gpu_trace.h"
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
#include <linux/bitfield.h>
#include <linux/devfreq.h>
#include <linux/pm_domain.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
/* Check that the GMU is idle */
if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu))
return false;
/* Check tha the CX master is idle */
if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
return false;
return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
}
static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
/* wait for CP to drain ringbuffer: */
if (!adreno_idle(gpu, ring))
return false;
if (spin_until(_a6xx_check_idle(gpu))) {
DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
gpu->name, __builtin_return_address(0),
gpu_read(gpu, REG_A6XX_RBBM_STATUS),
gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
return false;
}
return true;
}
static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
/* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
OUT_PKT7(ring, CP_WHERE_AM_I, 2);
OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
}
}
static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
uint32_t wptr;
unsigned long flags;
update_shadow_rptr(gpu, ring);
spin_lock_irqsave(&ring->preempt_lock, flags);
/* Copy the shadow to the actual register */
ring->cur = ring->next;
/* Make sure to wrap wptr if we need to */
wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->preempt_lock, flags);
/* Make sure everything is posted before making a decision */
mb();
gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
}
static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
u64 iova)
{
OUT_PKT7(ring, CP_REG_TO_MEM, 3);
OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
CP_REG_TO_MEM_0_CNT(2) |
CP_REG_TO_MEM_0_64B);
OUT_RING(ring, lower_32_bits(iova));
OUT_RING(ring, upper_32_bits(iova));
}
static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_file_private *ctx)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
phys_addr_t ttbr;
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
return;
if (!sysprof) {
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
OUT_RING(ring, 1);
}
/* Execute the table update */
OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
OUT_RING(ring,
CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
CP_SMMU_TABLE_UPDATE_1_ASID(asid));
OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
/*
* Write the new TTBR0 to the memstore. This is good for debugging.
*/
OUT_PKT7(ring, CP_MEM_WRITE, 4);
OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
OUT_RING(ring, lower_32_bits(ttbr));
OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
/*
* And finally, trigger a uche flush to be sure there isn't anything
* lingering in that part of the GPU
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, CACHE_INVALIDATE);
if (!sysprof) {
/*
* Wait for SRAM clear after the pgtable update, so the
* two can happen in parallel:
*/
OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
/* Re-enable protected mode: */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
}
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
/*
* For PM4 the GMU register offsets are calculated from the base of the
* GPU registers so we need to add 0x1a800 to the register value on A630
* to get the right value from PM4.
*/
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
rbmemptr_stats(ring, index, alwayson_start));
/* Invalidate CCU depth and color */
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
/* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
ibs++;
break;
}
/*
* Periodically update shadow-wptr if needed, so that we
* can see partial progress of submits with large # of
* cmds.. otherwise we could needlessly stall waiting for
* ringbuffer state, simply due to looking at a shadow
* rptr value that has not been updated
*/
if ((ibs % 32) == 0)
update_shadow_rptr(gpu, ring);
}
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_end));
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
/*
* Execute a CACHE_FLUSH_TS event. This will ensure that the
* timestamp is written to the memory and then triggers the interrupt
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
trace_msm_gpu_submit_flush(submit,
gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
a6xx_flush(gpu, ring);
}
const struct adreno_reglist a612_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
/* For a615 family (a615, a616, a618 and a619) */
const struct adreno_reglist a615_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002020},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
const struct adreno_reglist a630_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
{REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
{REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
{REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
{REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
{REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
const struct adreno_reglist a640_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
const struct adreno_reglist a650_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
const struct adreno_reglist a660_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
const struct adreno_reglist a690_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_CLOCK_CNTL, 0x8AA8AA82},
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
{}
};
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i;
u32 val, clock_cntl_on;
if (!adreno_gpu->info->hwcg)
return;
if (adreno_is_a630(adreno_gpu))
clock_cntl_on = 0x8aa8aa02;
else if (adreno_is_a610(adreno_gpu))
clock_cntl_on = 0xaaa8aa82;
else
clock_cntl_on = 0x8aa8aa82;
val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
/* Don't re-program the registers if they are already correct */
if ((!state && !val) || (state && (val == clock_cntl_on)))
return;
/* Disable SP clock before programming HWCG registers */
if (!adreno_is_a610(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
if (!adreno_is_a610(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
}
/* For a615, a616, a618, a619, a630, a640 and a680 */
static const u32 a6xx_protect[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
A6XX_PROTECT_NORDWR(0x00510, 0x0000),
A6XX_PROTECT_NORDWR(0x00534, 0x0000),
A6XX_PROTECT_NORDWR(0x00800, 0x0082),
A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
A6XX_PROTECT_NORDWR(0x00900, 0x004d),
A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
A6XX_PROTECT_NORDWR(0x09624, 0x01db),
A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
};
/* These are for a620 and a650 */
static const u32 a650_protect[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
A6XX_PROTECT_NORDWR(0x00510, 0x0000),
A6XX_PROTECT_NORDWR(0x00534, 0x0000),
A6XX_PROTECT_NORDWR(0x00800, 0x0082),
A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
A6XX_PROTECT_NORDWR(0x00900, 0x004d),
A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
A6XX_PROTECT_NORDWR(0x09624, 0x01db),
A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
};
/* These are for a635 and a660 */
static const u32 a660_protect[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
A6XX_PROTECT_NORDWR(0x00510, 0x0000),
A6XX_PROTECT_NORDWR(0x00534, 0x0000),
A6XX_PROTECT_NORDWR(0x00800, 0x0082),
A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
A6XX_PROTECT_NORDWR(0x00900, 0x004d),
A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
A6XX_PROTECT_NORDWR(0x09624, 0x01db),
A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
A6XX_PROTECT_NORDWR(0x0ae50, 0x012f),
A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
A6XX_PROTECT_NORDWR(0x0b608, 0x0006),
A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
A6XX_PROTECT_NORDWR(0x0be20, 0x015f),
A6XX_PROTECT_NORDWR(0x0d000, 0x05ff),
A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
A6XX_PROTECT_NORDWR(0x1a400, 0x1fff),
A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
};
/* These are for a690 */
static const u32 a690_protect[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x004ff),
A6XX_PROTECT_RDONLY(0x00501, 0x00001),
A6XX_PROTECT_RDONLY(0x0050b, 0x002f4),
A6XX_PROTECT_NORDWR(0x0050e, 0x00000),
A6XX_PROTECT_NORDWR(0x00510, 0x00000),
A6XX_PROTECT_NORDWR(0x00534, 0x00000),
A6XX_PROTECT_NORDWR(0x00800, 0x00082),
A6XX_PROTECT_NORDWR(0x008a0, 0x00008),
A6XX_PROTECT_NORDWR(0x008ab, 0x00024),
A6XX_PROTECT_RDONLY(0x008de, 0x000ae),
A6XX_PROTECT_NORDWR(0x00900, 0x0004d),
A6XX_PROTECT_NORDWR(0x0098d, 0x00272),
A6XX_PROTECT_NORDWR(0x00e00, 0x00001),
A6XX_PROTECT_NORDWR(0x00e03, 0x0000c),
A6XX_PROTECT_NORDWR(0x03c00, 0x000c3),
A6XX_PROTECT_RDONLY(0x03cc4, 0x01fff),
A6XX_PROTECT_NORDWR(0x08630, 0x001cf),
A6XX_PROTECT_NORDWR(0x08e00, 0x00000),
A6XX_PROTECT_NORDWR(0x08e08, 0x00007),
A6XX_PROTECT_NORDWR(0x08e50, 0x0001f),
A6XX_PROTECT_NORDWR(0x08e80, 0x0027f),
A6XX_PROTECT_NORDWR(0x09624, 0x001db),
A6XX_PROTECT_NORDWR(0x09e60, 0x00011),
A6XX_PROTECT_NORDWR(0x09e78, 0x00187),
A6XX_PROTECT_NORDWR(0x0a630, 0x001cf),
A6XX_PROTECT_NORDWR(0x0ae02, 0x00000),
A6XX_PROTECT_NORDWR(0x0ae50, 0x0012f),
A6XX_PROTECT_NORDWR(0x0b604, 0x00000),
A6XX_PROTECT_NORDWR(0x0b608, 0x00006),
A6XX_PROTECT_NORDWR(0x0be02, 0x00001),
A6XX_PROTECT_NORDWR(0x0be20, 0x0015f),
A6XX_PROTECT_NORDWR(0x0d000, 0x005ff),
A6XX_PROTECT_NORDWR(0x0f000, 0x00bff),
A6XX_PROTECT_RDONLY(0x0fc00, 0x01fff),
A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */
};
static void a6xx_set_cp_protect(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const u32 *regs = a6xx_protect;
unsigned i, count, count_max;
if (adreno_is_a650(adreno_gpu)) {
regs = a650_protect;
count = ARRAY_SIZE(a650_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
} else if (adreno_is_a690(adreno_gpu)) {
regs = a690_protect;
count = ARRAY_SIZE(a690_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a690_protect) > 48);
} else if (adreno_is_a660_family(adreno_gpu)) {
regs = a660_protect;
count = ARRAY_SIZE(a660_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
} else {
regs = a6xx_protect;
count = ARRAY_SIZE(a6xx_protect);
count_max = 32;
BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
}
/*
* Enable access protection to privileged registers, fault on an access
* protect violation and select the last span to protect from the start
* address all the way to the end of the register address space
*/
gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL,
A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN |
A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN |
A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE);
for (i = 0; i < count - 1; i++) {
/* Intentionally skip writing to some registers */
if (regs[i])
gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
}
/* last CP_PROTECT to have "infinite" length on the last entry */
gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
}
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
/* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
u32 rgb565_predicator = 0;
/* Unknown, introduced with A650 family */
u32 uavflagprd_inv = 0;
/* Whether the minimum access length is 64 bits */
u32 min_acc_len = 0;
/* Entirely magic, per-GPU-gen value */
u32 ubwc_mode = 0;
/*
* The Highest Bank Bit value represents the bit of the highest DDR bank.
* We then subtract 13 from it (13 is the minimum value allowed by hw) and
* write the lowest two bits of the remaining value as hbb_lo and the
* one above it as hbb_hi to the hardware. This should ideally use DRAM
* type detection.
*/
u32 hbb_hi = 0;
u32 hbb_lo = 2;
/* Unknown, introduced with A640/680 */
u32 amsbc = 0;
if (adreno_is_a610(adreno_gpu)) {
/* HBB = 14 */
hbb_lo = 1;
min_acc_len = 1;
ubwc_mode = 1;
}
/* a618 is using the hw default values */
if (adreno_is_a618(adreno_gpu))
return;
if (adreno_is_a619_holi(adreno_gpu))
hbb_lo = 0;
if (adreno_is_a640_family(adreno_gpu))
amsbc = 1;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
hbb_lo = 3;
amsbc = 1;
rgb565_predicator = 1;
uavflagprd_inv = 2;
}
if (adreno_is_a690(adreno_gpu)) {
hbb_lo = 2;
amsbc = 1;
rgb565_predicator = 1;
uavflagprd_inv = 2;
}
if (adreno_is_7c3(adreno_gpu)) {
hbb_lo = 1;
amsbc = 1;
rgb565_predicator = 1;
uavflagprd_inv = 2;
}
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 |
min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
uavflagprd_inv << 4 | min_acc_len << 3 |
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT7(ring, CP_ME_INIT, 8);
OUT_RING(ring, 0x0000002f);
/* Enable multiple hardware contexts */
OUT_RING(ring, 0x00000003);
/* Enable error detection */
OUT_RING(ring, 0x20000000);
/* Don't enable header dump */
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
/* No workarounds enabled */
OUT_RING(ring, 0x00000000);
/* Pad rest of the cmds with 0's */
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
a6xx_flush(gpu, ring);
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
/*
* Check that the microcode version is new enough to include several key
* security fixes. Return true if the ucode is safe.
*/
static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
struct drm_gem_object *obj)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE];
u32 *buf = msm_gem_get_vaddr(obj);
bool ret = false;
if (IS_ERR(buf))
return false;
/*
* Targets up to a640 (a618, a630 and a640) need to check for a
* microcode version that is patched to support the whereami opcode or
* one that is new enough to include it by default.
*
* a650 tier targets don't need whereami but still need to be
* equal to or newer than 0.95 for other security fixes
*
* a660 targets have all the critical security fixes from the start
*/
if (!strcmp(sqe_name, "a630_sqe.fw")) {
/*
* If the lowest nibble is 0xa that is an indication that this
* microcode has been patched. The actual version is in dword
* [3] but we only care about the patchlevel which is the lowest
* nibble of dword [3]
*
* Otherwise check that the firmware is greater than or equal
* to 1.90 which was the first version that had this fix built
* in
*/
if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
(buf[0] & 0xfff) >= 0x190) {
a6xx_gpu->has_whereami = true;
ret = true;
goto out;
}
DRM_DEV_ERROR(&gpu->pdev->dev,
"a630 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x190);
} else if (!strcmp(sqe_name, "a650_sqe.fw")) {
if ((buf[0] & 0xfff) >= 0x095) {
ret = true;
goto out;
}
DRM_DEV_ERROR(&gpu->pdev->dev,
"a650 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x095);
} else if (!strcmp(sqe_name, "a660_sqe.fw")) {
ret = true;
} else {
DRM_DEV_ERROR(&gpu->pdev->dev,
"unknown GPU, add it to a6xx_ucode_check_version()!!\n");
}
out:
msm_gem_put_vaddr(obj);
return ret;
}
static int a6xx_ucode_load(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (!a6xx_gpu->sqe_bo) {
a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
if (IS_ERR(a6xx_gpu->sqe_bo)) {
int ret = PTR_ERR(a6xx_gpu->sqe_bo);
a6xx_gpu->sqe_bo = NULL;
DRM_DEV_ERROR(&gpu->pdev->dev,
"Could not allocate SQE ucode: %d\n", ret);
return ret;
}
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put(a6xx_gpu->sqe_bo);
a6xx_gpu->sqe_bo = NULL;
return -EPERM;
}
}
/*
* Expanded APRIV and targets that support WHERE_AM_I both need a
* privileged buffer to store the RPTR shadow
*/
if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) &&
!a6xx_gpu->shadow_bo) {
a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
gpu->aspace, &a6xx_gpu->shadow_bo,
&a6xx_gpu->shadow_iova);
if (IS_ERR(a6xx_gpu->shadow))
return PTR_ERR(a6xx_gpu->shadow);
msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
}
return 0;
}
static int a6xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
int ret;
if (loaded)
return 0;
ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
loaded = !ret;
return ret;
}
#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
A6XX_RBBM_INT_0_MASK_CP_IB2 | \
A6XX_RBBM_INT_0_MASK_CP_IB1 | \
A6XX_RBBM_INT_0_MASK_CP_RB | \
A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
static int hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int ret;
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
/* Make sure the GMU keeps the GPU on while we set it up */
ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
if (ret)
return ret;
}
/* Clear GBIF halt in case GX domain was not collapsed */
if (adreno_is_a619_holi(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0);
/* Let's make extra sure that the GPU can access the memory.. */
mb();
} else if (a6xx_has_gbif(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
/* Let's make extra sure that the GPU can access the memory.. */
mb();
}
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
if (adreno_is_a619_holi(adreno_gpu))
a6xx_sptprac_enable(gmu);
/*
* Disable the trusted memory range - we don't actually supported secure
* memory rendering at this point in time and we don't want to block off
* part of the virtual memory space.
*/
gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
/* Turn on 64 bit addressing for all blocks */
gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
if (adreno_is_a610(adreno_gpu) ||
adreno_is_a640_family(adreno_gpu) ||
adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
} else {
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
}
if (adreno_is_a630(adreno_gpu))
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
/* Disable L2 bypass in the UCHE */
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
if (!adreno_is_a650_family(adreno_gpu)) {
/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, 0x00100000);
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX,
0x00100000 + adreno_gpu->info->gmem - 1);
}
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
} else if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
} else {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
}
if (adreno_is_a660_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
/* Setting the mem pool size */
if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
} else
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values,
* and vccCacheSkipDis=1 bit (0x200) for A640 and newer
*/
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
else
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
/* Turn on performance counters */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
a6xx_set_ubwc_config(gpu);
/* Enable fault detection */
if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
else
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
/* Set weights for bicubic filtering */
if (adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
0x3fe05ff4);
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
0x3fa0ebee);
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
0x3f5193ed);
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
0x3f0243f0);
}
/* Set up the CX GMU counter 0 to count busy ticks */
gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
/* Enable the power counter */
gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, BIT(5));
gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
/* Protect registers from the CP */
a6xx_set_cp_protect(gpu);
if (adreno_is_a660_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
}
/* Set dualQ + disable afull for A660 GPU */
if (adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
}
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
ret = adreno_hw_init(gpu);
if (ret)
goto out;
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
/* Targets that support extended APRIV can use the RPTR shadow from
* hardware but all the other ones need to disable the feature. Targets
* that support the WHERE_AM_I opcode can use that instead
*/
if (adreno_gpu->base.hw_apriv)
gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
else
gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Configure the RPTR shadow if needed: */
if (a6xx_gpu->shadow_bo) {
gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR,
shadowptr(a6xx_gpu, gpu->rb[0]));
}
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
ret = a6xx_cp_init(gpu);
if (ret)
goto out;
/*
* Try to load a zap shader into the secure world. If successful
* we can use the CP to switch out of secure mode. If not then we
* have no resource but to try to switch ourselves out manually. If we
* guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
* be blocked and a permissions violation will soon follow.
*/
ret = a6xx_zap_shader_init(gpu);
if (!ret) {
OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
OUT_RING(gpu->rb[0], 0x00000000);
a6xx_flush(gpu, gpu->rb[0]);
if (!a6xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else if (ret == -ENODEV) {
/*
* This device does not use zap shader (but print a warning
* just in case someone got their dt wrong.. hopefully they
* have a debug UART to realize the error of their ways...
* if you mess this up you are about to crash horribly)
*/
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
ret = 0;
} else {
return ret;
}
out:
if (adreno_has_gmu_wrapper(adreno_gpu))
return ret;
/*
* Tell the GMU that we are done touching the GPU and it can start power
* management
*/
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
if (a6xx_gpu->gmu.legacy) {
/* Take the GMU out of its special boot mode */
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
}
return ret;
}
static int a6xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
mutex_lock(&a6xx_gpu->gmu.lock);
ret = hw_init(gpu);
mutex_unlock(&a6xx_gpu->gmu.lock);
return ret;
}
static void a6xx_dump(struct msm_gpu *gpu)
{
DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
gpu_read(gpu, REG_A6XX_RBBM_STATUS));
adreno_dump(gpu);
}
static void a6xx_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int i, active_submits;
adreno_dump_info(gpu);
for (i = 0; i < 8; i++)
DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
if (hang_debug)
a6xx_dump(gpu);
/*
* To handle recovery specific sequences during the rpm suspend we are
* about to trigger
*/
a6xx_gpu->hung = true;
/* Halt SQE first */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
/* active_submit won't change until we make a submission */
mutex_lock(&gpu->active_lock);
active_submits = gpu->active_submits;
/*
* Temporarily clear active_submits count to silence a WARN() in the
* runtime suspend cb
*/
gpu->active_submits = 0;
if (adreno_has_gmu_wrapper(adreno_gpu)) {
/* Drain the outstanding traffic on memory buses */
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
/* Reset the GPU to a clean state */
a6xx_gpu_sw_reset(gpu, true);
a6xx_gpu_sw_reset(gpu, false);
}
reinit_completion(&gmu->pd_gate);
dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
dev_pm_genpd_synced_poweroff(gmu->cxpd);
/* Drop the rpm refcount from active submits */
if (active_submits)
pm_runtime_put(&gpu->pdev->dev);
/* And the final one from recover worker */
pm_runtime_put_sync(&gpu->pdev->dev);
if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000)))
DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n");
dev_pm_genpd_remove_notifier(gmu->cxpd);
pm_runtime_use_autosuspend(&gpu->pdev->dev);
if (active_submits)
pm_runtime_get(&gpu->pdev->dev);
pm_runtime_get_sync(&gpu->pdev->dev);
gpu->active_submits = active_submits;
mutex_unlock(&gpu->active_lock);
msm_gpu_hw_init(gpu);
a6xx_gpu->hung = false;
}
static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
{
static const char *uche_clients[7] = {
"VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ",
};
u32 val;
if (mid < 1 || mid > 3)
return "UNKNOWN";
/*
* The source of the data depends on the mid ID read from FSYNR1.
* and the client ID read from the UCHE block
*/
val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF);
/* mid = 3 is most precise and refers to only one block per client */
if (mid == 3)
return uche_clients[val & 7];
/* For mid=2 the source is TP or VFD except when the client id is 0 */
if (mid == 2)
return ((val & 7) == 0) ? "TP" : "TP|VFD";
/* For mid=1 just return "UCHE" as a catchall for everything else */
return "UCHE";
}
static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id)
{
if (id == 0)
return "CP";
else if (id == 4)
return "CCU";
else if (id == 6)
return "CDP Prefetch";
return a6xx_uche_fault_block(gpu, id);
}
static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
{
struct msm_gpu *gpu = arg;
struct adreno_smmu_fault_info *info = data;
const char *block = "unknown";
u32 scratch[] = {
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)),
};
if (info)
block = a6xx_fault_block(gpu, info->fsynr1 & 0xff);
return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
}
static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
{
u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
u32 val;
gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
dev_err_ratelimited(&gpu->pdev->dev,
"CP | opcode error | possible opcode=0x%8.8X\n",
val);
}
if (status & A6XX_CP_INT_CP_UCODE_ERROR)
dev_err_ratelimited(&gpu->pdev->dev,
"CP ucode error interrupt\n");
if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
dev_err_ratelimited(&gpu->pdev->dev,
"CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
val & (1 << 20) ? "READ" : "WRITE",
(val & 0x3ffff), val);
}
if (status & A6XX_CP_INT_CP_AHB_ERROR)
dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
}
static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
/*
* If stalled on SMMU fault, we could trip the GPU's hang detection,
* but the fault handler will trigger the devcore dump, and we want
* to otherwise resume normally rather than killing the submit, so
* just bail.
*/
if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
return;
/*
* Force the GPU to stay on until after we finish
* collecting information
*/
if (!adreno_has_gmu_wrapper(adreno_gpu))
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A6XX_RBBM_STATUS),
gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
/* Turn off the hangcheck timer to keep it from bothering us */
del_timer(&gpu->hangcheck_timer);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
if (priv->disable_err_irq)
status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
a6xx_fault_detect_irq(gpu);
if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
a6xx_cp_hw_err_irq(gpu);
if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
msm_gpu_retire(gpu);
return IRQ_HANDLED;
}
static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
{
llcc_slice_deactivate(a6xx_gpu->llc_slice);
llcc_slice_deactivate(a6xx_gpu->htw_llc_slice);
}
static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
u32 cntl1_regval = 0;
if (IS_ERR(a6xx_gpu->llc_mmio))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= 0x1f;
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
(gpu_scid << 15) | (gpu_scid << 20);
/* On A660, the SCID programming for UCHE traffic is done in
* A6XX_GBIF_SCACHE_CNTL0[14:10]
*/
if (adreno_is_a660_family(adreno_gpu))
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
(1 << 8), (gpu_scid << 10) | (1 << 8));
}
/*
* For targets with a MMU500, activate the slice but don't program the
* register. The XBL will take care of that.
*/
if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) {
if (!a6xx_gpu->have_mmu500) {
u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice);
gpuhtw_scid &= 0x1f;
cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid);
}
}
if (!cntl1_regval)
return;
/*
* Program the slice IDs for the various GPU blocks and GPU MMU
* pagetables
*/
if (!a6xx_gpu->have_mmu500) {
a6xx_llc_write(a6xx_gpu,
REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
/*
* Program cacheability overrides to not allocate cache
* lines on a write miss
*/
a6xx_llc_rmw(a6xx_gpu,
REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
return;
}
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
}
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
{
/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
return;
llcc_slice_putd(a6xx_gpu->llc_slice);
llcc_slice_putd(a6xx_gpu->htw_llc_slice);
}
static void a6xx_llc_slices_init(struct platform_device *pdev,
struct a6xx_gpu *a6xx_gpu)
{
struct device_node *phandle;
/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
return;
/*
* There is a different programming path for targets with an mmu500
* attached, so detect if that is the case
*/
phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
a6xx_gpu->have_mmu500 = (phandle &&
of_device_is_compatible(phandle, "arm,mmu-500"));
of_node_put(phandle);
if (a6xx_gpu->have_mmu500)
a6xx_gpu->llc_mmio = NULL;
else
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
}
#define GBIF_CLIENT_HALT_MASK BIT(0)
#define GBIF_ARB_HALT_MASK BIT(1)
#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
#define VBIF_RESET_ACK_MASK 0xF0
#define GPR0_GBIF_HALT_REQUEST 0x1E0
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off)
{
struct msm_gpu *gpu = &adreno_gpu->base;
if (adreno_is_a619_holi(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST);
spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) &
(VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK);
} else if (!a6xx_has_gbif(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK);
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
(VBIF_XIN_HALT_CTRL0_MASK)) == VBIF_XIN_HALT_CTRL0_MASK);
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
return;
}
if (gx_off) {
/* Halt the gx side of GBIF */
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
}
/* Halt new client requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
/* Halt all AXI requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
/* The GBIF halt needs to be explicitly cleared */
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
}
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
{
/* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
if (adreno_is_a610(to_adreno_gpu(gpu)))
return;
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
/* Perform a bogus read and add a brief delay to ensure ordering. */
gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD);
udelay(1);
/* The reset line needs to be asserted for at least 100 us */
if (assert)
udelay(100);
}
static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
gpu->needs_hw_init = true;
trace_msm_gpu_resume(0);
mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_resume(a6xx_gpu);
mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret)
return ret;
msm_devfreq_resume(gpu);
a6xx_llc_activate(a6xx_gpu);
return ret;
}
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
unsigned long freq = gpu->fast_rate;
struct dev_pm_opp *opp;
int ret;
gpu->needs_hw_init = true;
trace_msm_gpu_resume(0);
mutex_lock(&a6xx_gpu->gmu.lock);
opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq);
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
goto err_set_opp;
}
dev_pm_opp_put(opp);
/* Set the core clock and bus bw, having VDD scaling in mind */
dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_resume_and_get(gmu->dev);
pm_runtime_resume_and_get(gmu->gxpd);
ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
if (ret)
goto err_bulk_clk;
if (adreno_is_a619_holi(adreno_gpu))
a6xx_sptprac_enable(gmu);
/* If anything goes south, tear the GPU down piece by piece.. */
if (ret) {
err_bulk_clk:
pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
}
err_set_opp:
mutex_unlock(&a6xx_gpu->gmu.lock);
if (!ret)
msm_devfreq_resume(gpu);
return ret;
}
static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int i, ret;
trace_msm_gpu_suspend(0);
a6xx_llc_deactivate(a6xx_gpu);
msm_devfreq_suspend(gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_stop(a6xx_gpu);
mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret)
return ret;
if (a6xx_gpu->shadow_bo)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
gpu->suspend_count++;
return 0;
}
static int a6xx_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int i;
trace_msm_gpu_suspend(0);
msm_devfreq_suspend(gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
/* Drain the outstanding traffic on memory buses */
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
if (adreno_is_a619_holi(adreno_gpu))
a6xx_sptprac_disable(gmu);
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
pm_runtime_put_sync(gmu->gxpd);
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
pm_runtime_put_sync(gmu->dev);
mutex_unlock(&a6xx_gpu->gmu.lock);
if (a6xx_gpu->shadow_bo)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
gpu->suspend_count++;
return 0;
}
static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
mutex_unlock(&a6xx_gpu->gmu.lock);
return 0;
}
static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
return 0;
}
static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
return a6xx_gpu->cur_ring;
}
static void a6xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put(a6xx_gpu->sqe_bo);
}
if (a6xx_gpu->shadow_bo) {
msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
drm_gem_object_put(a6xx_gpu->shadow_bo);
}
a6xx_llc_slices_destroy(a6xx_gpu);
a6xx_gmu_remove(a6xx_gpu);
adreno_gpu_cleanup(adreno_gpu);
kfree(a6xx_gpu);
}
static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
u64 busy_cycles;
/* 19.2MHz */
*out_sample_rate = 19200000;
busy_cycles = gmu_read64(&a6xx_gpu->gmu,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
return busy_cycles;
}
static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
a6xx_gmu_set_freq(gpu, opp, suspended);
mutex_unlock(&a6xx_gpu->gmu.lock);
}
static struct msm_gem_address_space *
a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
unsigned long quirks = 0;
/*
* This allows GPU to set the bus attributes required to use system
* cache on behalf of the iommu page table walker.
*/
if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) &&
!device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
return adreno_iommu_create_address_space(gpu, pdev, quirks);
}
static struct msm_gem_address_space *
a6xx_create_private_address_space(struct msm_gpu *gpu)
{
struct msm_mmu *mmu;
mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
"gpu", 0x100000000ULL,
adreno_private_address_space_size(gpu));
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
return a6xx_gpu->shadow[ring->id];
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
}
static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct msm_cp_state cp_state = {
.ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
.ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
.ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
.ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
};
bool progress;
/*
* Adjust the remaining data to account for what has already been
* fetched from memory, but not yet consumed by the SQE.
*
* This is not *technically* correct, the amount buffered could
* exceed the IB size due to hw prefetching ahead, but:
*
* (1) We aren't trying to find the exact position, just whether
* progress has been made
* (2) The CP_REG_TO_MEM at the end of a submit should be enough
* to prevent prefetching into an unrelated submit. (And
* either way, at some point the ROQ will be full.)
*/
cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16;
cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16;
progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state));
ring->last_cp_state = cp_state;
return progress;
}
static u32 fuse_to_supp_hw(const struct adreno_info *info, u32 fuse)
{
if (!info->speedbins)
return UINT_MAX;
for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++)
if (info->speedbins[i].fuse == fuse)
return BIT(info->speedbins[i].speedbin);
return UINT_MAX;
}
static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *info)
{
u32 supp_hw;
u32 speedbin;
int ret;
ret = adreno_read_speedbin(dev, &speedbin);
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
*/
if (ret == -ENOENT) {
return 0;
} else if (ret) {
dev_err_probe(dev, ret,
"failed to read speed-bin. Some OPPs may not be supported by hardware\n");
return ret;
}
supp_hw = fuse_to_supp_hw(info, speedbin);
if (supp_hw == UINT_MAX) {
DRM_DEV_ERROR(dev,
"missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
speedbin);
return UINT_MAX;
}
ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
if (ret)
return ret;
return 0;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
.hw_init = a6xx_hw_init,
.ucode_load = a6xx_ucode_load,
.pm_suspend = a6xx_gmu_pm_suspend,
.pm_resume = a6xx_gmu_pm_resume,
.recover = a6xx_recover,
.submit = a6xx_submit,
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.show = a6xx_show,
#endif
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gpu_set_freq,
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
.create_address_space = a6xx_create_address_space,
.create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
.get_timestamp = a6xx_gmu_get_timestamp,
};
static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
.hw_init = a6xx_hw_init,
.ucode_load = a6xx_ucode_load,
.pm_suspend = a6xx_pm_suspend,
.pm_resume = a6xx_pm_resume,
.recover = a6xx_recover,
.submit = a6xx_submit,
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.show = a6xx_show,
#endif
.gpu_busy = a6xx_gpu_busy,
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
.create_address_space = a6xx_create_address_space,
.create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
.get_timestamp = a6xx_get_timestamp,
};
struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
struct device_node *node;
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
int ret;
a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
if (!a6xx_gpu)
return ERR_PTR(-ENOMEM);
adreno_gpu = &a6xx_gpu->base;
gpu = &adreno_gpu->base;
mutex_init(&a6xx_gpu->gmu.lock);
adreno_gpu->registers = NULL;
/* Check if there is a GMU phandle and set it up */
node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
/* FIXME: How do we gracefully handle this? */
BUG_ON(!node);
adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
adreno_gpu->base.hw_apriv =
!!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
a6xx_llc_slices_init(pdev, a6xx_gpu);
ret = a6xx_set_supported_hw(&pdev->dev, config->info);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
}
if (adreno_has_gmu_wrapper(adreno_gpu))
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
else
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
}
/*
* For now only clamp to idle freq for devices where this is known not
* to cause power supply issues:
*/
if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
priv->gpu_clamp_to_idle = true;
if (adreno_has_gmu_wrapper(adreno_gpu))
ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
else
ret = a6xx_gmu_init(a6xx_gpu, node);
of_node_put(node);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
}
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
a6xx_fault_handler);
return gpu;
}
| linux-master | drivers/gpu/drm/msm/adreno/a6xx_gpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*/
#include <linux/types.h>
#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "a5xx_gpu.h"
static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
drm_printf(p, "PFP state:\n");
for (i = 0; i < 36; i++) {
gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i);
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
}
}
static void me_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
drm_printf(p, "ME state:\n");
for (i = 0; i < 29; i++) {
gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i);
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
}
}
static void meq_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
drm_printf(p, "MEQ state:\n");
gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
for (i = 0; i < 64; i++) {
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
}
}
static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
drm_printf(p, "ROQ state:\n");
gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
for (i = 0; i < 512 / 4; i++) {
uint32_t val[4];
int j;
for (j = 0; j < 4; j++)
val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA);
drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
val[0], val[1], val[2], val[3]);
}
}
static int show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_printer p = drm_seq_file_printer(m);
void (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
node->info_ent->data;
show(priv->gpu, &p);
return 0;
}
#define ENT(n) { .name = #n, .show = show, .data = n ##_print }
static struct drm_info_list a5xx_debugfs_list[] = {
ENT(pfp),
ENT(me),
ENT(meq),
ENT(roq),
};
/* for debugfs files that can be written to, we can't use drm helper: */
static int
reset_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
/* TODO do we care about trying to make sure the GPU is idle?
* Since this is just a debug feature limited to CAP_SYS_ADMIN,
* maybe it is fine to let the user keep both pieces if they
* try to reset an active GPU.
*/
mutex_lock(&gpu->lock);
release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]);
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
gpu->needs_hw_init = true;
pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
mutex_unlock(&gpu->lock);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
{
struct drm_device *dev;
if (!minor)
return;
dev = minor->dev;
drm_debugfs_create_files(a5xx_debugfs_list,
ARRAY_SIZE(a5xx_debugfs_list),
minor->debugfs_root, minor);
debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev,
&reset_fops);
}
| linux-master | drivers/gpu/drm/msm/adreno/a5xx_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
#include "a3xx_gpu.h"
#define A3XX_INT0_MASK \
(A3XX_INT0_RBBM_AHB_ERROR | \
A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
A3XX_INT0_CP_T0_PACKET_IN_IB | \
A3XX_INT0_CP_OPCODE_ERROR | \
A3XX_INT0_CP_RESERVED_BIT_ERROR | \
A3XX_INT0_CP_HW_FAULT | \
A3XX_INT0_CP_IB1_INT | \
A3XX_INT0_CP_IB2_INT | \
A3XX_INT0_CP_RB_INT | \
A3XX_INT0_CP_REG_PROTECT_FAULT | \
A3XX_INT0_CP_AHB_ERROR_HALT | \
A3XX_INT0_CACHE_FLUSH_TS | \
A3XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
static bool a3xx_idle(struct msm_gpu *gpu);
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
}
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->seqno);
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed:
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
/* wait for idle before cache flush/interrupt */
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
#if 0
/* Dummy set-constant to trigger context rollover */
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
OUT_RING(ring, 0x00000000);
#endif
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
}
static bool a3xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000080);
OUT_RING(ring, 0x00000100);
OUT_RING(ring, 0x00000180);
OUT_RING(ring, 0x00006600);
OUT_RING(ring, 0x00000150);
OUT_RING(ring, 0x0000014e);
OUT_RING(ring, 0x00000154);
OUT_RING(ring, 0x00000001);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
return a3xx_idle(gpu);
}
static int a3xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
uint32_t *ptr, len;
int i, ret;
DBG("%s", gpu->name);
if (adreno_is_a305(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
/* Enable WR-REQ: */
gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
/* Set up round robin arbitration between both AXI ports: */
gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
/* Set up AOOO: */
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
} else if (adreno_is_a306(adreno_gpu)) {
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
} else if (adreno_is_a320(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
/* Enable WR-REQ: */
gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
/* Set up round robin arbitration between both AXI ports: */
gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
/* Set up AOOO: */
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
/* Enable 1K sort: */
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
} else if (adreno_is_a330v2(adreno_gpu)) {
/*
* Most of the VBIF registers on 8974v2 have the correct
* values at power on, so we won't modify those if we don't
* need to
*/
/* Enable 1k sort: */
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
/* Enable WR-REQ: */
gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
} else if (adreno_is_a330(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
/* Enable WR-REQ: */
gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
/* Set up round robin arbitration between both AXI ports: */
gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
/* Set up AOOO: */
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
/* Enable 1K sort: */
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
/* Disable VBIF clock gating. This is to enable AXI running
* higher frequency than GPU:
*/
gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
} else {
BUG();
}
/* Make all blocks contribute to the GPU BUSY perf counter: */
gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
/* Tune the hystersis counters for SP and CP idle detection: */
gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
/* Enable the RBBM error reporting bits. This lets us get
* useful information on failure:
*/
gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
/* Enable AHB error reporting: */
gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
/* Turn on the power counters: */
gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
/* Turn on hang detection - this spews a lot of useful information
* into the RBBM registers on a hang:
*/
gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
/* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
if (adreno_is_a306(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
else if (adreno_is_a320(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
else if (adreno_is_a330v2(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
else if (adreno_is_a330(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
if (adreno_is_a330v2(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
else if (adreno_is_a330(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
/* Set the OCMEM base address for A330, etc */
if (a3xx_gpu->ocmem.hdl) {
gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
(unsigned int)(a3xx_gpu->ocmem.base >> 14));
}
/* Turn on performance counters: */
gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
/* Enable the perfcntrs that we use.. */
for (i = 0; i < gpu->num_perfcntrs; i++) {
const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
}
gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
ret = adreno_hw_init(gpu);
if (ret)
return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* setup access protection: */
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
/* RBBM registers */
gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
/* CP registers */
gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
/* RB registers */
gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
/* VBIF registers */
gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
/* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but
* parameterize the pfp ucode addr/data registers..
*/
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %x", ptr[1]);
gpu_write(gpu, REG_AXXX_CP_DEBUG,
AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
adreno_is_a320(adreno_gpu)) {
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
} else if (adreno_is_a330(adreno_gpu)) {
/* NOTE: this (value take from downstream android driver)
* includes some bits outside of the known bitfields. But
* A330 has this "MERCIU queue" thing too, which might
* explain a new bitfield or reshuffling:
*/
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
}
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
return a3xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a3xx_recover(struct msm_gpu *gpu)
{
int i;
adreno_dump_info(gpu);
for (i = 0; i < 8; i++) {
printk("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
}
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a3xx_dump(gpu);
gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
adreno_recover(gpu);
}
static void a3xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
DBG("%s", gpu->name);
adreno_gpu_cleanup(adreno_gpu);
adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem);
kfree(a3xx_gpu);
}
static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
A3XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
return false;
}
return true;
}
static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
{
uint32_t status;
status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
DBG("%s: %08x", gpu->name, status);
// TODO
gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
msm_gpu_retire(gpu);
return IRQ_HANDLED;
}
static const unsigned int a3xx_registers[] = {
0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
~0 /* sentinel */
};
/* would be nice to not have to duplicate the _show() stuff with printk(): */
static void a3xx_dump(struct msm_gpu *gpu)
{
printk("status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
adreno_gpu_state_get(gpu, state);
state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
return state;
}
static u64 a3xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
u64 busy_cycles;
busy_cycles = gpu_read64(gpu, REG_A3XX_RBBM_PERFCTR_RBBM_1_LO);
*out_sample_rate = clk_get_rate(gpu->core_clk);
return busy_cycles;
}
static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
return ring->memptrs->rptr;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
.hw_init = a3xx_hw_init,
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
.submit = a3xx_submit,
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = adreno_show,
#endif
.gpu_busy = a3xx_gpu_busy,
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = adreno_create_address_space,
.get_rptr = a3xx_get_rptr,
},
};
static const struct msm_gpu_perfcntr perfcntrs[] = {
{ REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
{ REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
};
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
{
struct a3xx_gpu *a3xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct icc_path *ocmem_icc_path;
struct icc_path *icc_path;
int ret;
if (!pdev) {
DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
ret = -ENXIO;
goto fail;
}
a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
if (!a3xx_gpu) {
ret = -ENOMEM;
goto fail;
}
adreno_gpu = &a3xx_gpu->base;
gpu = &adreno_gpu->base;
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
adreno_gpu->registers = a3xx_registers;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
/* if needed, allocate gmem: */
if (adreno_is_a330(adreno_gpu)) {
ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
adreno_gpu, &a3xx_gpu->ocmem);
if (ret)
goto fail;
}
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
* limp along with just modesetting. If it turns out
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
if (!allow_vram_carveout) {
ret = -ENXIO;
goto fail;
}
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
if (IS_ERR(icc_path)) {
ret = PTR_ERR(icc_path);
goto fail;
}
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
if (IS_ERR(ocmem_icc_path)) {
ret = PTR_ERR(ocmem_icc_path);
/* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA)
goto fail;
ocmem_icc_path = NULL;
}
/*
* Set the ICC path to maximum speed for now by multiplying the fastest
* frequency by the bus width (8). We'll want to scale this later on to
* improve battery life.
*/
icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
return gpu;
fail:
if (a3xx_gpu)
a3xx_destroy(&a3xx_gpu->base.base);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/adreno/a3xx_gpu.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
#include "a2xx_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
extern bool hang_debug;
static void a2xx_dump(struct msm_gpu *gpu);
static bool a2xx_idle(struct msm_gpu *gpu);
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
}
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->seqno);
/* wait for idle before cache flush/interrupt */
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
OUT_PKT3(ring, CP_INTERRUPT, 1);
OUT_RING(ring, 0x80000000);
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
}
static bool a2xx_me_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 18);
/* All fields present (bits 9:0) */
OUT_RING(ring, 0x000003ff);
/* Disable/Enable Real-Time Stream processing (present but ignored) */
OUT_RING(ring, 0x00000000);
/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
OUT_RING(ring, 0x00000000);
OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
/* Vertex and Pixel Shader Start Addresses in instructions
* (3 DWORDS per instruction) */
OUT_RING(ring, 0x80000180);
/* Maximum Contexts */
OUT_RING(ring, 0x00000001);
/* Write Confirm Interval and The CP will wait the
* wait_interval * 16 clocks between polling */
OUT_RING(ring, 0x00000000);
/* NQ and External Memory Swap */
OUT_RING(ring, 0x00000000);
/* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
if (a2xx_gpu->protection_disabled)
OUT_RING(ring, 0x00000000);
else
OUT_RING(ring, 0x200001f2);
/* Disable header dumping and Header dump address */
OUT_RING(ring, 0x00000000);
/* Header dump size */
OUT_RING(ring, 0x00000000);
if (!a2xx_gpu->protection_disabled) {
/* enable protected mode */
OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
}
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
return a2xx_idle(gpu);
}
static int a2xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
dma_addr_t pt_base, tran_error;
uint32_t *ptr, len;
int i, ret;
msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
DBG("%s", gpu->name);
/* halt ME to avoid ucode upload issues on a20x */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
/* note: kgsl uses 0x00000001 after first reset on a22x */
gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
msleep(30);
gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
if (adreno_is_a225(adreno_gpu))
gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
/* note: kgsl uses 0x0000ffff for a20x */
gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
/* MPU: physical range */
gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
/* same as parameters in adreno_gpu */
gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
if (!adreno_is_a20x(adreno_gpu))
gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
/* note: gsl doesn't set this */
gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
AXXX_CP_INT_CNTL_IB_ERROR_MASK |
AXXX_CP_INT_CNTL_IB1_INT_MASK |
AXXX_CP_INT_CNTL_RB_INT_MASK);
gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
for (i = 3; i <= 5; i++)
if ((SZ_16K << i) == adreno_gpu->info->gmem)
break;
gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
ret = adreno_hw_init(gpu);
if (ret)
return ret;
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but
* parameterize the pfp ucode addr/data registers..
*/
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %x", ptr[1]);
/*
* New firmware files seem to have GPU and firmware version in this
* word (0x20xxxx for A200, 0x220xxx for A220, 0x225xxx for A225).
* Older firmware files, which lack protection support, have 0 instead.
*/
if (ptr[1] == 0) {
dev_warn(gpu->dev->dev,
"Legacy firmware detected, disabling protection support\n");
a2xx_gpu->protection_disabled = true;
}
gpu_write(gpu, REG_AXXX_CP_DEBUG,
AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
return a2xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a2xx_recover(struct msm_gpu *gpu)
{
int i;
adreno_dump_info(gpu);
for (i = 0; i < 8; i++) {
printk("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
}
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a2xx_dump(gpu);
gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
adreno_recover(gpu);
}
static void a2xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
DBG("%s", gpu->name);
adreno_gpu_cleanup(adreno_gpu);
kfree(a2xx_gpu);
}
static bool a2xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
A2XX_RBBM_STATUS_GUI_ACTIVE))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
return false;
}
return true;
}
static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
{
uint32_t mstatus, status;
mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
}
if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
/* only RB_INT is expected */
if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
}
if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
}
msm_gpu_retire(gpu);
return IRQ_HANDLED;
}
static const unsigned int a200_registers[] = {
0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
~0 /* sentinel */
};
static const unsigned int a220_registers[] = {
0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
0x4900, 0x4900, 0x4908, 0x4908,
~0 /* sentinel */
};
static const unsigned int a225_registers[] = {
0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
0x4908, 0x4908,
~0 /* sentinel */
};
/* would be nice to not have to duplicate the _show() stuff with printk(): */
static void a2xx_dump(struct msm_gpu *gpu)
{
printk("status: %08x\n",
gpu_read(gpu, REG_A2XX_RBBM_STATUS));
adreno_dump(gpu);
}
static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
adreno_gpu_state_get(gpu, state);
state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
return state;
}
static struct msm_gem_address_space *
a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
0xfff * SZ_64K);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
return aspace;
}
static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
return ring->memptrs->rptr;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
.hw_init = a2xx_hw_init,
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a2xx_recover,
.submit = a2xx_submit,
.active_ring = adreno_active_ring,
.irq = a2xx_irq,
.destroy = a2xx_destroy,
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = adreno_show,
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = a2xx_create_address_space,
.get_rptr = a2xx_get_rptr,
},
};
static const struct msm_gpu_perfcntr perfcntrs[] = {
/* TODO */
};
struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
{
struct a2xx_gpu *a2xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
int ret;
if (!pdev) {
dev_err(dev->dev, "no a2xx device\n");
ret = -ENXIO;
goto fail;
}
a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
if (!a2xx_gpu) {
ret = -ENOMEM;
goto fail;
}
adreno_gpu = &a2xx_gpu->base;
gpu = &adreno_gpu->base;
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
if (adreno_is_a20x(adreno_gpu))
adreno_gpu->registers = a200_registers;
else if (adreno_is_a225(adreno_gpu))
adreno_gpu->registers = a225_registers;
else
adreno_gpu->registers = a220_registers;
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
if (!allow_vram_carveout) {
ret = -ENXIO;
goto fail;
}
}
return gpu;
fail:
if (a2xx_gpu)
a2xx_destroy(&a2xx_gpu->base.base);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/adreno/a2xx_gpu.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#include <linux/ascii85.h>
#include "msm_gem.h"
#include "a6xx_gpu.h"
#include "a6xx_gmu.h"
#include "a6xx_gpu_state.h"
#include "a6xx_gmu.xml.h"
struct a6xx_gpu_state_obj {
const void *handle;
u32 *data;
};
struct a6xx_gpu_state {
struct msm_gpu_state base;
struct a6xx_gpu_state_obj *gmu_registers;
int nr_gmu_registers;
struct a6xx_gpu_state_obj *registers;
int nr_registers;
struct a6xx_gpu_state_obj *shaders;
int nr_shaders;
struct a6xx_gpu_state_obj *clusters;
int nr_clusters;
struct a6xx_gpu_state_obj *dbgahb_clusters;
int nr_dbgahb_clusters;
struct a6xx_gpu_state_obj *indexed_regs;
int nr_indexed_regs;
struct a6xx_gpu_state_obj *debugbus;
int nr_debugbus;
struct a6xx_gpu_state_obj *vbif_debugbus;
struct a6xx_gpu_state_obj *cx_debugbus;
int nr_cx_debugbus;
struct msm_gpu_state_bo *gmu_log;
struct msm_gpu_state_bo *gmu_hfi;
struct msm_gpu_state_bo *gmu_debug;
s32 hfi_queue_history[2][HFI_HISTORY_SZ];
struct list_head objs;
bool gpu_initialized;
};
static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
{
in[0] = val;
in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
return 2;
}
static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
{
in[0] = target;
in[1] = (((u64) reg) << 44 | dwords);
return 2;
}
static inline int CRASHDUMP_FINI(u64 *in)
{
in[0] = 0;
in[1] = 0;
return 2;
}
struct a6xx_crashdumper {
void *ptr;
struct drm_gem_object *bo;
u64 iova;
};
struct a6xx_state_memobj {
struct list_head node;
unsigned long long data[];
};
static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
{
struct a6xx_state_memobj *obj =
kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
list_add_tail(&obj->node, &a6xx_state->objs);
return &obj->data;
}
static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
size_t size)
{
void *dst = state_kcalloc(a6xx_state, 1, size);
if (dst)
memcpy(dst, src, size);
return dst;
}
/*
* Allocate 1MB for the crashdumper scratch region - 8k for the script and
* the rest for the data
*/
#define A6XX_CD_DATA_OFFSET 8192
#define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
static int a6xx_crashdumper_init(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
SZ_1M, MSM_BO_WC, gpu->aspace,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
msm_gem_object_set_name(dumper->bo, "crashdump");
return PTR_ERR_OR_ZERO(dumper->ptr);
}
static int a6xx_crashdumper_run(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
u32 val;
int ret;
if (IS_ERR_OR_NULL(dumper->ptr))
return -EINVAL;
if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
return -EINVAL;
/* Make sure all pending memory writes are posted */
wmb();
gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova);
gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
val & 0x02, 100, 10000);
gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
return ret;
}
/* read a value from the GX debug bus */
static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
u32 *data)
{
u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
/* Wait 1 us to make sure the data is flowing */
udelay(1);
data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
return 2;
}
#define cxdbg_write(ptr, offset, val) \
msm_writel((val), (ptr) + ((offset) << 2))
#define cxdbg_read(ptr, offset) \
msm_readl((ptr) + ((offset) << 2))
/* read a value from the CX debug bus */
static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
u32 *data)
{
u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
/* Wait 1 us to make sure the data is flowing */
udelay(1);
data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
return 2;
}
/* Read a chunk of data from the VBIF debug bus */
static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
u32 reg, int count, u32 *data)
{
int i;
gpu_write(gpu, ctrl0, reg);
for (i = 0; i < count; i++) {
gpu_write(gpu, ctrl1, i);
data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
}
return count;
}
#define AXI_ARB_BLOCKS 2
#define XIN_AXI_BLOCKS 5
#define XIN_CORE_BLOCKS 4
#define VBIF_DEBUGBUS_BLOCK_SIZE \
((16 * AXI_ARB_BLOCKS) + \
(18 * XIN_AXI_BLOCKS) + \
(12 * XIN_CORE_BLOCKS))
static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_gpu_state_obj *obj)
{
u32 clk, *ptr;
int i;
obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
sizeof(u32));
if (!obj->data)
return;
obj->handle = NULL;
/* Get the current clock setting */
clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
/* Force on the bus so we can read it */
gpu_write(gpu, REG_A6XX_VBIF_CLKON,
clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
/* We will read from BUS2 first, so disable BUS1 */
gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
/* Enable the VBIF bus for reading */
gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
ptr = obj->data;
for (i = 0; i < AXI_ARB_BLOCKS; i++)
ptr += vbif_debugbus_read(gpu,
REG_A6XX_VBIF_TEST_BUS2_CTRL0,
REG_A6XX_VBIF_TEST_BUS2_CTRL1,
1 << (i + 16), 16, ptr);
for (i = 0; i < XIN_AXI_BLOCKS; i++)
ptr += vbif_debugbus_read(gpu,
REG_A6XX_VBIF_TEST_BUS2_CTRL0,
REG_A6XX_VBIF_TEST_BUS2_CTRL1,
1 << i, 18, ptr);
/* Stop BUS2 so we can turn on BUS1 */
gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
for (i = 0; i < XIN_CORE_BLOCKS; i++)
ptr += vbif_debugbus_read(gpu,
REG_A6XX_VBIF_TEST_BUS1_CTRL0,
REG_A6XX_VBIF_TEST_BUS1_CTRL1,
1 << i, 12, ptr);
/* Restore the VBIF clock setting */
gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
}
static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_debugbus_block *block,
struct a6xx_gpu_state_obj *obj)
{
int i;
u32 *ptr;
obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
if (!obj->data)
return;
obj->handle = block;
for (ptr = obj->data, i = 0; i < block->count; i++)
ptr += debugbus_read(gpu, block->id, i, ptr);
}
static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_debugbus_block *block,
struct a6xx_gpu_state_obj *obj)
{
int i;
u32 *ptr;
obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
if (!obj->data)
return;
obj->handle = block;
for (ptr = obj->data, i = 0; i < block->count; i++)
ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
}
static void a6xx_get_debugbus(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
struct resource *res;
void __iomem *cxdbg = NULL;
int nr_debugbus_blocks;
/* Set up the GX debug bus */
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
/* Set up the CX debug bus - it lives elsewhere in the system so do a
* temporary ioremap for the registers
*/
res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
"cx_dbgc");
if (res)
cxdbg = ioremap(res->start, resource_size(res));
if (cxdbg) {
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
0x76543210);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
0xFEDCBA98);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
(a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
if (adreno_is_a650_family(to_adreno_gpu(gpu)))
nr_debugbus_blocks += ARRAY_SIZE(a650_debugbus_blocks);
a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
sizeof(*a6xx_state->debugbus));
if (a6xx_state->debugbus) {
int i;
for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
a6xx_get_debugbus_block(gpu,
a6xx_state,
&a6xx_debugbus_blocks[i],
&a6xx_state->debugbus[i]);
a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
/*
* GBIF has same debugbus as of other GPU blocks, fall back to
* default path if GPU uses GBIF, also GBIF uses exactly same
* ID as of VBIF.
*/
if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
a6xx_get_debugbus_block(gpu, a6xx_state,
&a6xx_gbif_debugbus_block,
&a6xx_state->debugbus[i]);
a6xx_state->nr_debugbus += 1;
}
if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
for (i = 0; i < ARRAY_SIZE(a650_debugbus_blocks); i++)
a6xx_get_debugbus_block(gpu,
a6xx_state,
&a650_debugbus_blocks[i],
&a6xx_state->debugbus[i]);
}
}
/* Dump the VBIF debugbus on applicable targets */
if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
a6xx_state->vbif_debugbus =
state_kcalloc(a6xx_state, 1,
sizeof(*a6xx_state->vbif_debugbus));
if (a6xx_state->vbif_debugbus)
a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
a6xx_state->vbif_debugbus);
}
if (cxdbg) {
a6xx_state->cx_debugbus =
state_kcalloc(a6xx_state,
ARRAY_SIZE(a6xx_cx_debugbus_blocks),
sizeof(*a6xx_state->cx_debugbus));
if (a6xx_state->cx_debugbus) {
int i;
for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
a6xx_get_cx_debugbus_block(cxdbg,
a6xx_state,
&a6xx_cx_debugbus_blocks[i],
&a6xx_state->cx_debugbus[i]);
a6xx_state->nr_cx_debugbus =
ARRAY_SIZE(a6xx_cx_debugbus_blocks);
}
iounmap(cxdbg);
}
}
#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
/* Read a data cluster from behind the AHB aperture */
static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_dbgahb_cluster *dbgahb,
struct a6xx_gpu_state_obj *obj,
struct a6xx_crashdumper *dumper)
{
u64 *in = dumper->ptr;
u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
size_t datasize;
int i, regcount = 0;
for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
int j;
in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
(dbgahb->statetype + i * 2) << 8);
for (j = 0; j < dbgahb->count; j += 2) {
int count = RANGE(dbgahb->registers, j);
u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
dbgahb->registers[j] - (dbgahb->base >> 2);
in += CRASHDUMP_READ(in, offset, count, out);
out += count * sizeof(u32);
if (i == 0)
regcount += count;
}
}
CRASHDUMP_FINI(in);
datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
return;
if (a6xx_crashdumper_run(gpu, dumper))
return;
obj->handle = dbgahb;
obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
datasize);
}
static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i;
a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
ARRAY_SIZE(a6xx_dbgahb_clusters),
sizeof(*a6xx_state->dbgahb_clusters));
if (!a6xx_state->dbgahb_clusters)
return;
a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
a6xx_get_dbgahb_cluster(gpu, a6xx_state,
&a6xx_dbgahb_clusters[i],
&a6xx_state->dbgahb_clusters[i], dumper);
}
/* Read a data cluster from the CP aperture with the crashdumper */
static void a6xx_get_cluster(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_cluster *cluster,
struct a6xx_gpu_state_obj *obj,
struct a6xx_crashdumper *dumper)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
u64 *in = dumper->ptr;
u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
size_t datasize;
int i, regcount = 0;
u32 id = cluster->id;
/* Skip registers that are not present on older generation */
if (!adreno_is_a660_family(adreno_gpu) &&
cluster->registers == a660_fe_cluster)
return;
if (adreno_is_a650_family(adreno_gpu) &&
cluster->registers == a6xx_ps_cluster)
id = CLUSTER_VPC_PS;
/* Some clusters need a selector register to be programmed too */
if (cluster->sel_reg)
in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
int j;
in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
(id << 8) | (i << 4) | i);
for (j = 0; j < cluster->count; j += 2) {
int count = RANGE(cluster->registers, j);
in += CRASHDUMP_READ(in, cluster->registers[j],
count, out);
out += count * sizeof(u32);
if (i == 0)
regcount += count;
}
}
CRASHDUMP_FINI(in);
datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
return;
if (a6xx_crashdumper_run(gpu, dumper))
return;
obj->handle = cluster;
obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
datasize);
}
static void a6xx_get_clusters(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i;
a6xx_state->clusters = state_kcalloc(a6xx_state,
ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
if (!a6xx_state->clusters)
return;
a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
&a6xx_state->clusters[i], dumper);
}
/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
static void a6xx_get_shader_block(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_shader_block *block,
struct a6xx_gpu_state_obj *obj,
struct a6xx_crashdumper *dumper)
{
u64 *in = dumper->ptr;
size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
int i;
if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
return;
for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
(block->type << 8) | i);
in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
}
CRASHDUMP_FINI(in);
if (a6xx_crashdumper_run(gpu, dumper))
return;
obj->handle = block;
obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
datasize);
}
static void a6xx_get_shaders(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i;
a6xx_state->shaders = state_kcalloc(a6xx_state,
ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
if (!a6xx_state->shaders)
return;
a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
&a6xx_state->shaders[i], dumper);
}
/* Read registers from behind the HLSQ aperture with the crashdumper */
static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_registers *regs,
struct a6xx_gpu_state_obj *obj,
struct a6xx_crashdumper *dumper)
{
u64 *in = dumper->ptr;
u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
int i, regcount = 0;
in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
for (i = 0; i < regs->count; i += 2) {
u32 count = RANGE(regs->registers, i);
u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
regs->registers[i] - (regs->val0 >> 2);
in += CRASHDUMP_READ(in, offset, count, out);
out += count * sizeof(u32);
regcount += count;
}
CRASHDUMP_FINI(in);
if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
return;
if (a6xx_crashdumper_run(gpu, dumper))
return;
obj->handle = regs;
obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
regcount * sizeof(u32));
}
/* Read a block of registers using the crashdumper */
static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_registers *regs,
struct a6xx_gpu_state_obj *obj,
struct a6xx_crashdumper *dumper)
{
u64 *in = dumper->ptr;
u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
int i, regcount = 0;
/* Skip unsupported registers on older generations */
if (!adreno_is_a660_family(to_adreno_gpu(gpu)) &&
(regs->registers == a660_registers))
return;
/* Some blocks might need to program a selector register first */
if (regs->val0)
in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
for (i = 0; i < regs->count; i += 2) {
u32 count = RANGE(regs->registers, i);
in += CRASHDUMP_READ(in, regs->registers[i], count, out);
out += count * sizeof(u32);
regcount += count;
}
CRASHDUMP_FINI(in);
if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
return;
if (a6xx_crashdumper_run(gpu, dumper))
return;
obj->handle = regs;
obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
regcount * sizeof(u32));
}
/* Read a block of registers via AHB */
static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_registers *regs,
struct a6xx_gpu_state_obj *obj)
{
int i, regcount = 0, index = 0;
/* Skip unsupported registers on older generations */
if (!adreno_is_a660_family(to_adreno_gpu(gpu)) &&
(regs->registers == a660_registers))
return;
for (i = 0; i < regs->count; i += 2)
regcount += RANGE(regs->registers, i);
obj->handle = (const void *) regs;
obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
if (!obj->data)
return;
for (i = 0; i < regs->count; i += 2) {
u32 count = RANGE(regs->registers, i);
int j;
for (j = 0; j < count; j++)
obj->data[index++] = gpu_read(gpu,
regs->registers[i] + j);
}
}
/* Read a block of GMU registers */
static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_registers *regs,
struct a6xx_gpu_state_obj *obj,
bool rscc)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int i, regcount = 0, index = 0;
for (i = 0; i < regs->count; i += 2)
regcount += RANGE(regs->registers, i);
obj->handle = (const void *) regs;
obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
if (!obj->data)
return;
for (i = 0; i < regs->count; i += 2) {
u32 count = RANGE(regs->registers, i);
int j;
for (j = 0; j < count; j++) {
u32 offset = regs->registers[i] + j;
u32 val;
if (rscc)
val = gmu_read_rscc(gmu, offset);
else
val = gmu_read(gmu, offset);
obj->data[index++] = val;
}
}
}
static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
3, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
a6xx_state->nr_gmu_registers = 3;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
&a6xx_state->gmu_registers[0], false);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
&a6xx_state->gmu_registers[1], true);
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return;
/* Set the fence to ALLOW mode so we can access the registers */
gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
&a6xx_state->gmu_registers[2], false);
}
static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
struct a6xx_gpu_state *a6xx_state, struct a6xx_gmu_bo *bo)
{
struct msm_gpu_state_bo *snapshot;
if (!bo->size)
return NULL;
snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
if (!snapshot)
return NULL;
snapshot->iova = bo->iova;
snapshot->size = bo->size;
snapshot->data = kvzalloc(snapshot->size, GFP_KERNEL);
if (!snapshot->data)
return NULL;
memcpy(snapshot->data, bo->virt, bo->size);
return snapshot;
}
static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
unsigned i, j;
BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history));
for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
struct a6xx_hfi_queue *queue = &gmu->queues[i];
for (j = 0; j < HFI_HISTORY_SZ; j++) {
unsigned idx = (j + queue->history_idx) % HFI_HISTORY_SZ;
a6xx_state->hfi_queue_history[i][j] = queue->history[idx];
}
}
}
#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
ARRAY_SIZE(a6xx_reglist) +
ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
int index = 0;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
a6xx_state->registers = state_kcalloc(a6xx_state,
count, sizeof(*a6xx_state->registers));
if (!a6xx_state->registers)
return;
a6xx_state->nr_registers = count;
for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_ahb_reglist[i],
&a6xx_state->registers[index++]);
if (a6xx_has_gbif(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_gbif_reglist,
&a6xx_state->registers[index++]);
else
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_vbif_reglist,
&a6xx_state->registers[index++]);
if (!dumper) {
/*
* We can't use the crashdumper when the SMMU is stalled,
* because the GPU has no memory access until we resume
* translation (but we don't want to do that until after
* we have captured as much useful GPU state as possible).
* So instead collect registers via the CPU:
*/
for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_reglist[i],
&a6xx_state->registers[index++]);
return;
}
for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
a6xx_get_crashdumper_registers(gpu,
a6xx_state, &a6xx_reglist[i],
&a6xx_state->registers[index++],
dumper);
for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
a6xx_get_crashdumper_hlsq_registers(gpu,
a6xx_state, &a6xx_hlsq_reglist[i],
&a6xx_state->registers[index++],
dumper);
}
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu)
{
/* The value at [16:31] is in 4dword units. Convert it to dwords */
return gpu_read(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2) >> 14;
}
/* Read a block of data from an indexed register pair */
static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_indexed_registers *indexed,
struct a6xx_gpu_state_obj *obj)
{
int i;
obj->handle = (const void *) indexed;
if (indexed->count_fn)
indexed->count = indexed->count_fn(gpu);
obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
if (!obj->data)
return;
/* All the indexed banks start at address 0 */
gpu_write(gpu, indexed->addr, 0);
/* Read the data - each read increments the internal address by 1 */
for (i = 0; i < indexed->count; i++)
obj->data[i] = gpu_read(gpu, indexed->data);
}
static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
u32 mempool_size;
int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
int i;
a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
sizeof(*a6xx_state->indexed_regs));
if (!a6xx_state->indexed_regs)
return;
for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
&a6xx_state->indexed_regs[i]);
if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
u32 val;
val = gpu_read(gpu, REG_A6XX_CP_CHICKEN_DBG);
gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, val | 4);
/* Get the contents of the CP mempool */
a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
&a6xx_state->indexed_regs[i]);
gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, val);
a6xx_state->nr_indexed_regs = count;
return;
}
/* Set the CP mempool size to 0 to stabilize it while dumping */
mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
/* Get the contents of the CP mempool */
a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
&a6xx_state->indexed_regs[i]);
/*
* Offset 0x2000 in the mempool is the size - copy the saved size over
* so the data is consistent
*/
a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
/* Restore the size in the hardware */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
a6xx_state->nr_indexed_regs = count;
}
struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
{
struct a6xx_crashdumper _dumper = { 0 }, *dumper = NULL;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
GFP_KERNEL);
bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
if (!a6xx_state)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&a6xx_state->objs);
/* Get the generic state from the adreno core */
adreno_gpu_state_get(gpu, &a6xx_state->base);
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
a6xx_get_gmu_registers(gpu, a6xx_state);
a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi);
a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug);
a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state);
}
/* If GX isn't on the rest of the data isn't going to be accessible */
if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return &a6xx_state->base;
/* Get the banks of indexed registers */
a6xx_get_indexed_registers(gpu, a6xx_state);
/*
* Try to initialize the crashdumper, if we are not dumping state
* with the SMMU stalled. The crashdumper needs memory access to
* write out GPU state, so we need to skip this when the SMMU is
* stalled in response to an iova fault
*/
if (!stalled && !gpu->needs_hw_init &&
!a6xx_crashdumper_init(gpu, &_dumper)) {
dumper = &_dumper;
}
a6xx_get_registers(gpu, a6xx_state, dumper);
if (dumper) {
a6xx_get_shaders(gpu, a6xx_state, dumper);
a6xx_get_clusters(gpu, a6xx_state, dumper);
a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
msm_gem_kernel_put(dumper->bo, gpu->aspace);
}
if (snapshot_debugbus)
a6xx_get_debugbus(gpu, a6xx_state);
a6xx_state->gpu_initialized = !gpu->needs_hw_init;
return &a6xx_state->base;
}
static void a6xx_gpu_state_destroy(struct kref *kref)
{
struct a6xx_state_memobj *obj, *tmp;
struct msm_gpu_state *state = container_of(kref,
struct msm_gpu_state, ref);
struct a6xx_gpu_state *a6xx_state = container_of(state,
struct a6xx_gpu_state, base);
if (a6xx_state->gmu_log)
kvfree(a6xx_state->gmu_log->data);
if (a6xx_state->gmu_hfi)
kvfree(a6xx_state->gmu_hfi->data);
if (a6xx_state->gmu_debug)
kvfree(a6xx_state->gmu_debug->data);
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) {
list_del(&obj->node);
kvfree(obj);
}
adreno_gpu_state_destroy(state);
kfree(a6xx_state);
}
int a6xx_gpu_state_put(struct msm_gpu_state *state)
{
if (IS_ERR_OR_NULL(state))
return 1;
return kref_put(&state->ref, a6xx_gpu_state_destroy);
}
static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
struct drm_printer *p)
{
int i, index = 0;
if (!data)
return;
for (i = 0; i < count; i += 2) {
u32 count = RANGE(registers, i);
u32 offset = registers[i];
int j;
for (j = 0; j < count; index++, offset++, j++) {
if (data[index] == 0xdeafbead)
continue;
drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
offset << 2, data[index]);
}
}
}
static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
{
char out[ASCII85_BUFSZ];
long i, l, datalen = 0;
for (i = 0; i < len >> 2; i++) {
if (data[i])
datalen = (i + 1) << 2;
}
if (datalen == 0)
return;
drm_puts(p, " data: !!ascii85 |\n");
drm_puts(p, " ");
l = ascii85_encode_len(datalen);
for (i = 0; i < l; i++)
drm_puts(p, ascii85_encode(data[i], out));
drm_puts(p, "\n");
}
static void print_name(struct drm_printer *p, const char *fmt, const char *name)
{
drm_puts(p, fmt);
drm_puts(p, name);
drm_puts(p, "\n");
}
static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
struct drm_printer *p)
{
const struct a6xx_shader_block *block = obj->handle;
int i;
if (!obj->handle)
return;
print_name(p, " - type: ", block->name);
for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
drm_printf(p, " - bank: %d\n", i);
drm_printf(p, " size: %d\n", block->size);
if (!obj->data)
continue;
print_ascii85(p, block->size << 2,
obj->data + (block->size * i));
}
}
static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
struct drm_printer *p)
{
int ctx, index = 0;
for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
int j;
drm_printf(p, " - context: %d\n", ctx);
for (j = 0; j < size; j += 2) {
u32 count = RANGE(registers, j);
u32 offset = registers[j];
int k;
for (k = 0; k < count; index++, offset++, k++) {
if (data[index] == 0xdeafbead)
continue;
drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
offset << 2, data[index]);
}
}
}
}
static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
struct drm_printer *p)
{
const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
if (dbgahb) {
print_name(p, " - cluster-name: ", dbgahb->name);
a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
obj->data, p);
}
}
static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
struct drm_printer *p)
{
const struct a6xx_cluster *cluster = obj->handle;
if (cluster) {
print_name(p, " - cluster-name: ", cluster->name);
a6xx_show_cluster_data(cluster->registers, cluster->count,
obj->data, p);
}
}
static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
struct drm_printer *p)
{
const struct a6xx_indexed_registers *indexed = obj->handle;
if (!indexed)
return;
print_name(p, " - regs-name: ", indexed->name);
drm_printf(p, " dwords: %d\n", indexed->count);
print_ascii85(p, indexed->count << 2, obj->data);
}
static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
u32 *data, struct drm_printer *p)
{
if (block) {
print_name(p, " - debugbus-block: ", block->name);
/*
* count for regular debugbus data is in quadwords,
* but print the size in dwords for consistency
*/
drm_printf(p, " count: %d\n", block->count << 1);
print_ascii85(p, block->count << 3, data);
}
}
static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
struct drm_printer *p)
{
int i;
for (i = 0; i < a6xx_state->nr_debugbus; i++) {
struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
a6xx_show_debugbus_block(obj->handle, obj->data, p);
}
if (a6xx_state->vbif_debugbus) {
struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
drm_puts(p, " - debugbus-block: A6XX_DBGBUS_VBIF\n");
drm_printf(p, " count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
/* vbif debugbus data is in dwords. Confusing, huh? */
print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
}
for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
a6xx_show_debugbus_block(obj->handle, obj->data, p);
}
}
void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p)
{
struct a6xx_gpu_state *a6xx_state = container_of(state,
struct a6xx_gpu_state, base);
int i;
if (IS_ERR_OR_NULL(state))
return;
drm_printf(p, "gpu-initialized: %d\n", a6xx_state->gpu_initialized);
adreno_show(gpu, state, p);
drm_puts(p, "gmu-log:\n");
if (a6xx_state->gmu_log) {
struct msm_gpu_state_bo *gmu_log = a6xx_state->gmu_log;
drm_printf(p, " iova: 0x%016llx\n", gmu_log->iova);
drm_printf(p, " size: %zu\n", gmu_log->size);
adreno_show_object(p, &gmu_log->data, gmu_log->size,
&gmu_log->encoded);
}
drm_puts(p, "gmu-hfi:\n");
if (a6xx_state->gmu_hfi) {
struct msm_gpu_state_bo *gmu_hfi = a6xx_state->gmu_hfi;
unsigned i, j;
drm_printf(p, " iova: 0x%016llx\n", gmu_hfi->iova);
drm_printf(p, " size: %zu\n", gmu_hfi->size);
for (i = 0; i < ARRAY_SIZE(a6xx_state->hfi_queue_history); i++) {
drm_printf(p, " queue-history[%u]:", i);
for (j = 0; j < HFI_HISTORY_SZ; j++) {
drm_printf(p, " %d", a6xx_state->hfi_queue_history[i][j]);
}
drm_printf(p, "\n");
}
adreno_show_object(p, &gmu_hfi->data, gmu_hfi->size,
&gmu_hfi->encoded);
}
drm_puts(p, "gmu-debug:\n");
if (a6xx_state->gmu_debug) {
struct msm_gpu_state_bo *gmu_debug = a6xx_state->gmu_debug;
drm_printf(p, " iova: 0x%016llx\n", gmu_debug->iova);
drm_printf(p, " size: %zu\n", gmu_debug->size);
adreno_show_object(p, &gmu_debug->data, gmu_debug->size,
&gmu_debug->encoded);
}
drm_puts(p, "registers:\n");
for (i = 0; i < a6xx_state->nr_registers; i++) {
struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
const struct a6xx_registers *regs = obj->handle;
if (!obj->handle)
continue;
a6xx_show_registers(regs->registers, obj->data, regs->count, p);
}
drm_puts(p, "registers-gmu:\n");
for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
const struct a6xx_registers *regs = obj->handle;
if (!obj->handle)
continue;
a6xx_show_registers(regs->registers, obj->data, regs->count, p);
}
drm_puts(p, "indexed-registers:\n");
for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
drm_puts(p, "shader-blocks:\n");
for (i = 0; i < a6xx_state->nr_shaders; i++)
a6xx_show_shader(&a6xx_state->shaders[i], p);
drm_puts(p, "clusters:\n");
for (i = 0; i < a6xx_state->nr_clusters; i++)
a6xx_show_cluster(&a6xx_state->clusters[i], p);
for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
drm_puts(p, "debugbus:\n");
a6xx_show_debugbus(a6xx_state, p);
}
| linux-master | drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
#include <linux/ascii85.h>
#include <linux/interconnect.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/nvmem-consumer.h>
#include <soc/qcom/ocmem.h>
#include "adreno_gpu.h"
#include "a6xx_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
static u64 address_space_size = 0;
MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
module_param(address_space_size, ullong, 0600);
static bool zap_available = true;
static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
u32 pasid)
{
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
const char *signed_fwname = NULL;
struct device_node *np, *mem_np;
struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
void *mem_region = NULL;
int ret;
if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
zap_available = false;
return -EINVAL;
}
np = of_get_child_by_name(dev->of_node, "zap-shader");
if (!np) {
zap_available = false;
return -ENODEV;
}
mem_np = of_parse_phandle(np, "memory-region", 0);
of_node_put(np);
if (!mem_np) {
zap_available = false;
return -EINVAL;
}
ret = of_address_to_resource(mem_np, 0, &r);
of_node_put(mem_np);
if (ret)
return ret;
mem_phys = r.start;
/*
* Check for a firmware-name property. This is the new scheme
* to handle firmware that may be signed with device specific
* keys, allowing us to have a different zap fw path for different
* devices.
*
* If the firmware-name property is found, we bypass the
* adreno_request_fw() mechanism, because we don't need to handle
* the /lib/firmware/qcom/... vs /lib/firmware/... case.
*
* If the firmware-name property is not found, for backwards
* compatibility we fall back to the fwname from the gpulist
* table.
*/
of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
if (signed_fwname) {
fwname = signed_fwname;
ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
if (ret)
fw = ERR_PTR(ret);
} else if (fwname) {
/* Request the MDT file from the default location: */
fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
} else {
/*
* For new targets, we require the firmware-name property,
* if a zap-shader is required, rather than falling back
* to a firmware name specified in gpulist.
*
* Because the firmware is signed with a (potentially)
* device specific key, having the name come from gpulist
* was a bad idea, and is only provided for backwards
* compatibility for older targets.
*/
return -ENODEV;
}
if (IS_ERR(fw)) {
DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
return PTR_ERR(fw);
}
/* Figure out how much memory we need */
mem_size = qcom_mdt_get_size(fw);
if (mem_size < 0) {
ret = mem_size;
goto out;
}
if (mem_size > resource_size(&r)) {
DRM_DEV_ERROR(dev,
"memory region is too small to load the MDT\n");
ret = -E2BIG;
goto out;
}
/* Allocate memory for the firmware image */
mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) {
ret = -ENOMEM;
goto out;
}
/*
* Load the rest of the MDT
*
* Note that we could be dealing with two different paths, since
* with upstream linux-firmware it would be in a qcom/ subdir..
* adreno_request_fw() handles this, but qcom_mdt_load() does
* not. But since we've already gotten through adreno_request_fw()
* we know which of the two cases it is:
*/
if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
ret = qcom_mdt_load(dev, fw, fwname, pasid,
mem_region, mem_phys, mem_size, NULL);
} else {
char *newname;
newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
ret = qcom_mdt_load(dev, fw, newname, pasid,
mem_region, mem_phys, mem_size, NULL);
kfree(newname);
}
if (ret)
goto out;
/* Send the image to the secure world */
ret = qcom_scm_pas_auth_and_reset(pasid);
/*
* If the scm call returns -EOPNOTSUPP we assume that this target
* doesn't need/support the zap shader so quietly fail
*/
if (ret == -EOPNOTSUPP)
zap_available = false;
else if (ret)
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
out:
if (mem_region)
memunmap(mem_region);
release_firmware(fw);
return ret;
}
int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct platform_device *pdev = gpu->pdev;
/* Short cut if we determine the zap shader isn't available/needed */
if (!zap_available)
return -ENODEV;
/* We need SCM to be able to load the firmware */
if (!qcom_scm_is_available()) {
DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
return -EPROBE_DEFER;
}
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
struct msm_gem_address_space *
adreno_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
return adreno_iommu_create_address_space(gpu, pdev, 0);
}
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev,
unsigned long quirks)
{
struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
u64 start, size;
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
if (IS_ERR_OR_NULL(mmu))
return ERR_CAST(mmu);
geometry = msm_iommu_get_geometry(mmu);
if (IS_ERR(geometry))
return ERR_CAST(geometry);
/*
* Use the aperture start or SZ_16M, whichever is greater. This will
* ensure that we align with the allocated pagetable range while still
* allowing room in the lower 32 bits for GMEM and whatnot
*/
start = max_t(u64, SZ_16M, geometry->aperture_start);
size = geometry->aperture_end - start + 1;
aspace = msm_gem_address_space_create(mmu, "gpu",
start & GENMASK_ULL(48, 0), size);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
return aspace;
}
u64 adreno_private_address_space_size(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
if (address_space_size)
return address_space_size;
if (adreno_gpu->info->address_space_size)
return adreno_gpu->info->address_space_size;
return SZ_4G;
}
#define ARM_SMMU_FSR_TF BIT(1)
#define ARM_SMMU_FSR_PF BIT(3)
#define ARM_SMMU_FSR_EF BIT(4)
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4])
{
const char *type = "UNKNOWN";
bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
/*
* If we aren't going to be resuming later from fault_worker, then do
* it now.
*/
if (!do_devcoredump) {
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
}
/*
* Print a default message if we couldn't get the data from the
* adreno-smmu-priv
*/
if (!info) {
pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n",
iova, flags,
scratch[0], scratch[1], scratch[2], scratch[3]);
return 0;
}
if (info->fsr & ARM_SMMU_FSR_TF)
type = "TRANSLATION";
else if (info->fsr & ARM_SMMU_FSR_PF)
type = "PERMISSION";
else if (info->fsr & ARM_SMMU_FSR_EF)
type = "EXTERNAL";
pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%u)\n",
info->ttbr0, iova,
flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ",
type, block,
scratch[0], scratch[1], scratch[2], scratch[3]);
if (do_devcoredump) {
/* Turn off the hangcheck timer to keep it from bothering us */
del_timer(&gpu->hangcheck_timer);
gpu->fault_info.ttbr0 = info->ttbr0;
gpu->fault_info.iova = iova;
gpu->fault_info.flags = flags;
gpu->fault_info.type = type;
gpu->fault_info.block = block;
kthread_queue_work(gpu->worker, &gpu->fault_work);
}
return 0;
}
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
/* No pointer params yet */
if (*len != 0)
return -EINVAL;
switch (param) {
case MSM_PARAM_GPU_ID:
*value = adreno_gpu->info->revn;
return 0;
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->info->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
*value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->chip_id;
if (!adreno_gpu->info->revn)
*value |= ((uint64_t) adreno_gpu->speedbin) << 32;
return 0;
case MSM_PARAM_MAX_FREQ:
*value = adreno_gpu->base.fast_rate;
return 0;
case MSM_PARAM_TIMESTAMP:
if (adreno_gpu->funcs->get_timestamp) {
int ret;
pm_runtime_get_sync(&gpu->pdev->dev);
ret = adreno_gpu->funcs->get_timestamp(gpu, value);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
return ret;
}
return -EINVAL;
case MSM_PARAM_PRIORITIES:
*value = gpu->nr_rings * NR_SCHED_PRIORITIES;
return 0;
case MSM_PARAM_PP_PGTABLE:
*value = 0;
return 0;
case MSM_PARAM_FAULTS:
if (ctx->aspace)
*value = gpu->global_faults + ctx->aspace->faults;
else
*value = gpu->global_faults;
return 0;
case MSM_PARAM_SUSPENDS:
*value = gpu->suspend_count;
return 0;
case MSM_PARAM_VA_START:
if (ctx->aspace == gpu->aspace)
return -EINVAL;
*value = ctx->aspace->va_start;
return 0;
case MSM_PARAM_VA_SIZE:
if (ctx->aspace == gpu->aspace)
return -EINVAL;
*value = ctx->aspace->va_size;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
}
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t value, uint32_t len)
{
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE:
/* kstrdup_quotable_cmdline() limits to PAGE_SIZE, so
* that should be a reasonable upper bound
*/
if (len > PAGE_SIZE)
return -EINVAL;
break;
default:
if (len != 0)
return -EINVAL;
}
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE: {
char *str, **paramp;
str = memdup_user_nul(u64_to_user_ptr(value), len);
if (IS_ERR(str))
return PTR_ERR(str);
mutex_lock(&gpu->lock);
if (param == MSM_PARAM_COMM) {
paramp = &ctx->comm;
} else {
paramp = &ctx->cmdline;
}
kfree(*paramp);
*paramp = str;
mutex_unlock(&gpu->lock);
return 0;
}
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return msm_file_private_set_sysprof(ctx, gpu, value);
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
}
const struct firmware *
adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
{
struct drm_device *drm = adreno_gpu->base.dev;
const struct firmware *fw = NULL;
char *newname;
int ret;
newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
if (!newname)
return ERR_PTR(-ENOMEM);
/*
* Try first to load from qcom/$fwfile using a direct load (to avoid
* a potential timeout waiting for usermode helper)
*/
if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
(adreno_gpu->fwloc == FW_LOCATION_NEW)) {
ret = request_firmware_direct(&fw, newname, drm->dev);
if (!ret) {
DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
}
}
/*
* Then try the legacy location without qcom/ prefix
*/
if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
(adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
fwname, ret);
fw = ERR_PTR(ret);
goto out;
}
}
/*
* Finally fall back to request_firmware() for cases where the
* usermode helper is needed (I think mainly android)
*/
if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
(adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
ret = request_firmware(&fw, newname, drm->dev);
if (!ret) {
DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
}
}
DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
fw = ERR_PTR(-ENOENT);
out:
kfree(newname);
return fw;
}
int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
const struct firmware *fw;
if (!adreno_gpu->info->fw[i])
continue;
/* Skip loading GMU firwmare with GMU Wrapper */
if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU)
continue;
/* Skip if the firmware has already been loaded */
if (adreno_gpu->fw[i])
continue;
fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
if (IS_ERR(fw))
return PTR_ERR(fw);
adreno_gpu->fw[i] = fw;
}
return 0;
}
struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
struct drm_gem_object *bo;
void *ptr;
ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
memcpy(ptr, &fw->data[4], fw->size - 4);
msm_gem_put_vaddr(bo);
return bo;
}
int adreno_hw_init(struct msm_gpu *gpu)
{
VERB("%s", gpu->name);
for (int i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
if (!ring)
continue;
ring->cur = ring->start;
ring->next = ring->start;
ring->memptrs->rptr = 0;
/* Detect and clean up an impossible fence, ie. if GPU managed
* to scribble something invalid, we don't want that to confuse
* us into mistakingly believing that submits have completed.
*/
if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
ring->memptrs->fence = ring->fctx->last_fence;
}
}
return 0;
}
/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
struct msm_gpu *gpu = &adreno_gpu->base;
return gpu->funcs->get_rptr(gpu, ring);
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
return gpu->rb[0];
}
void adreno_recover(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
int ret;
// XXX pm-runtime?? we *need* the device to be off after this
// so maybe continuing to call ->pm_suspend/resume() is better?
gpu->funcs->pm_suspend(gpu);
gpu->funcs->pm_resume(gpu);
ret = msm_gpu_hw_init(gpu);
if (ret) {
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
}
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
{
uint32_t wptr;
/* Copy the shadow to the actual register */
ring->cur = ring->next;
/*
* Mask wptr value that we calculate to fit in the HW range. This is
* to account for the possibility that the last command fit exactly into
* the ringbuffer and rb->next hasn't wrapped to zero yet
*/
wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */
mb();
gpu_write(gpu, reg, wptr);
}
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */
if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
return false;
}
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i, count = 0;
WARN_ON(!mutex_is_locked(&gpu->lock));
kref_init(&state->ref);
ktime_get_real_ts64(&state->time);
for (i = 0; i < gpu->nr_rings; i++) {
int size = 0, j;
state->ring[i].fence = gpu->rb[i]->memptrs->fence;
state->ring[i].iova = gpu->rb[i]->iova;
state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
state->ring[i].wptr = get_wptr(gpu->rb[i]);
/* Copy at least 'wptr' dwords of the data */
size = state->ring[i].wptr;
/* After wptr find the last non zero dword to save space */
for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
if (gpu->rb[i]->start[j])
size = j + 1;
if (size) {
state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
if (state->ring[i].data) {
memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
state->ring[i].data_size = size << 2;
}
}
}
/* Some targets prefer to collect their own registers */
if (!adreno_gpu->registers)
return 0;
/* Count the number of registers */
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
count += adreno_gpu->registers[i + 1] -
adreno_gpu->registers[i] + 1;
state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
if (state->registers) {
int pos = 0;
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
u32 start = adreno_gpu->registers[i];
u32 end = adreno_gpu->registers[i + 1];
u32 addr;
for (addr = start; addr <= end; addr++) {
state->registers[pos++] = addr;
state->registers[pos++] = gpu_read(gpu, addr);
}
}
state->nr_registers = count;
}
return 0;
}
void adreno_gpu_state_destroy(struct msm_gpu_state *state)
{
int i;
for (i = 0; i < ARRAY_SIZE(state->ring); i++)
kvfree(state->ring[i].data);
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
kfree(state->bos);
kfree(state->comm);
kfree(state->cmd);
kfree(state->registers);
}
static void adreno_gpu_state_kref_destroy(struct kref *kref)
{
struct msm_gpu_state *state = container_of(kref,
struct msm_gpu_state, ref);
adreno_gpu_state_destroy(state);
kfree(state);
}
int adreno_gpu_state_put(struct msm_gpu_state *state)
{
if (IS_ERR_OR_NULL(state))
return 1;
return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
}
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
{
void *buf;
size_t buf_itr = 0, buffer_size;
char out[ASCII85_BUFSZ];
long l;
int i;
if (!src || !len)
return NULL;
l = ascii85_encode_len(len);
/*
* Ascii85 outputs either a 5 byte string or a 1 byte string. So we
* account for the worst case of 5 bytes per dword plus the 1 for '\0'
*/
buffer_size = (l * 5) + 1;
buf = kvmalloc(buffer_size, GFP_KERNEL);
if (!buf)
return NULL;
for (i = 0; i < l; i++)
buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
ascii85_encode(src[i], out));
return buf;
}
/* len is expected to be in bytes
*
* WARNING: *ptr should be allocated with kvmalloc or friends. It can be free'd
* with kvfree() and replaced with a newly kvmalloc'd buffer on the first call
* when the unencoded raw data is encoded
*/
void adreno_show_object(struct drm_printer *p, void **ptr, int len,
bool *encoded)
{
if (!*ptr || !len)
return;
if (!*encoded) {
long datalen, i;
u32 *buf = *ptr;
/*
* Only dump the non-zero part of the buffer - rarely will
* any data completely fill the entire allocated size of
* the buffer.
*/
for (datalen = 0, i = 0; i < len >> 2; i++)
if (buf[i])
datalen = ((i + 1) << 2);
/*
* If we reach here, then the originally captured binary buffer
* will be replaced with the ascii85 encoded string
*/
*ptr = adreno_gpu_ascii85_encode(buf, datalen);
kvfree(buf);
*encoded = true;
}
if (!*ptr)
return;
drm_puts(p, " data: !!ascii85 |\n");
drm_puts(p, " ");
drm_puts(p, *ptr);
drm_puts(p, "\n");
}
void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
if (IS_ERR_OR_NULL(state))
return;
drm_printf(p, "revision: %u (%"ADRENO_CHIPID_FMT")\n",
adreno_gpu->info->revn,
ADRENO_CHIPID_ARGS(adreno_gpu->chip_id));
/*
* If this is state collected due to iova fault, so fault related info
*
* TTBR0 would not be zero, so this is a good way to distinguish
*/
if (state->fault_info.ttbr0) {
const struct msm_gpu_fault_info *info = &state->fault_info;
drm_puts(p, "fault-info:\n");
drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0);
drm_printf(p, " - iova=%.16lx\n", info->iova);
drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
drm_printf(p, " - type=%s\n", info->type);
drm_printf(p, " - source=%s\n", info->block);
}
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
drm_puts(p, "ringbuffer:\n");
for (i = 0; i < gpu->nr_rings; i++) {
drm_printf(p, " - id: %d\n", i);
drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
drm_printf(p, " last-fence: %u\n", state->ring[i].seqno);
drm_printf(p, " retired-fence: %u\n", state->ring[i].fence);
drm_printf(p, " rptr: %u\n", state->ring[i].rptr);
drm_printf(p, " wptr: %u\n", state->ring[i].wptr);
drm_printf(p, " size: %u\n", MSM_GPU_RINGBUFFER_SZ);
adreno_show_object(p, &state->ring[i].data,
state->ring[i].data_size, &state->ring[i].encoded);
}
if (state->bos) {
drm_puts(p, "bos:\n");
for (i = 0; i < state->nr_bos; i++) {
drm_printf(p, " - iova: 0x%016llx\n",
state->bos[i].iova);
drm_printf(p, " size: %zd\n", state->bos[i].size);
drm_printf(p, " name: %-32s\n", state->bos[i].name);
adreno_show_object(p, &state->bos[i].data,
state->bos[i].size, &state->bos[i].encoded);
}
}
if (state->nr_registers) {
drm_puts(p, "registers:\n");
for (i = 0; i < state->nr_registers; i++) {
drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
state->registers[i * 2] << 2,
state->registers[(i * 2) + 1]);
}
}
}
#endif
/* Dump common gpu status and scratch registers on any hang, to make
* the hangcheck logs more useful. The scratch registers seem always
* safe to read when GPU has hung (unlike some other regs, depending
* on how the GPU hung), and they are useful to match up to cmdstream
* dumps when debugging hangs:
*/
void adreno_dump_info(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
printk("revision: %u (%"ADRENO_CHIPID_FMT")\n",
adreno_gpu->info->revn,
ADRENO_CHIPID_ARGS(adreno_gpu->chip_id));
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
printk("rb %d: fence: %d/%d\n", i,
ring->memptrs->fence,
ring->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
printk("rb wptr: %d\n", get_wptr(ring));
}
}
/* would be nice to not have to duplicate the _show() stuff with printk(): */
void adreno_dump(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
if (!adreno_gpu->registers)
return;
/* dump these out in a form that can be parsed by demsm: */
printk("IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
uint32_t start = adreno_gpu->registers[i];
uint32_t end = adreno_gpu->registers[i+1];
uint32_t addr;
for (addr = start; addr <= end; addr++) {
uint32_t val = gpu_read(gpu, addr);
printk("IO:R %08x %08x\n", addr<<2, val);
}
}
}
static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
/* Use ring->next to calculate free size */
uint32_t wptr = ring->next - ring->start;
uint32_t rptr = get_rptr(adreno_gpu, ring);
return (rptr + (size - 1) - wptr) % size;
}
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
if (spin_until(ring_freewords(ring) >= ndwords))
DRM_DEV_ERROR(ring->gpu->dev->dev,
"timeout waiting for space in ringbuffer %d\n",
ring->id);
}
static int adreno_get_pwrlevels(struct device *dev,
struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned long freq = ULONG_MAX;
struct dev_pm_opp *opp;
int ret;
gpu->fast_rate = 0;
/* devm_pm_opp_of_add_table may error out but will still create an OPP table */
ret = devm_pm_opp_of_add_table(dev);
if (ret == -ENODEV) {
/* Special cases for ancient hw with ancient DT bindings */
if (adreno_is_a2xx(adreno_gpu)) {
dev_warn(dev, "Unable to find the OPP table. Falling back to 200 MHz.\n");
dev_pm_opp_add(dev, 200000000, 0);
} else if (adreno_is_a320(adreno_gpu)) {
dev_warn(dev, "Unable to find the OPP table. Falling back to 450 MHz.\n");
dev_pm_opp_add(dev, 450000000, 0);
} else {
DRM_DEV_ERROR(dev, "Unable to find the OPP table\n");
return -ENODEV;
}
} else if (ret) {
DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
return ret;
}
/* Find the fastest defined rate */
opp = dev_pm_opp_find_freq_floor(dev, &freq);
if (IS_ERR(opp))
return PTR_ERR(opp);
gpu->fast_rate = freq;
dev_pm_opp_put(opp);
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
return 0;
}
int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
struct adreno_ocmem *adreno_ocmem)
{
struct ocmem_buf *ocmem_hdl;
struct ocmem *ocmem;
ocmem = of_get_ocmem(dev);
if (IS_ERR(ocmem)) {
if (PTR_ERR(ocmem) == -ENODEV) {
/*
* Return success since either the ocmem property was
* not specified in device tree, or ocmem support is
* not compiled into the kernel.
*/
return 0;
}
return PTR_ERR(ocmem);
}
ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem);
if (IS_ERR(ocmem_hdl))
return PTR_ERR(ocmem_hdl);
adreno_ocmem->ocmem = ocmem;
adreno_ocmem->base = ocmem_hdl->addr;
adreno_ocmem->hdl = ocmem_hdl;
if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem))
return -ENOMEM;
return 0;
}
void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
{
if (adreno_ocmem && adreno_ocmem->base)
ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
adreno_ocmem->hdl);
}
int adreno_read_speedbin(struct device *dev, u32 *speedbin)
{
return nvmem_cell_read_variable_le_u32(dev, "speed_bin", speedbin);
}
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
{
struct device *dev = &pdev->dev;
struct adreno_platform_config *config = dev->platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
const char *gpu_name;
u32 speedbin;
int ret;
adreno_gpu->funcs = funcs;
adreno_gpu->info = config->info;
adreno_gpu->chip_id = config->chip_id;
gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
/* Only handle the core clock when GMU is not in use (or is absent). */
if (adreno_has_gmu_wrapper(adreno_gpu) ||
adreno_gpu->info->family < ADRENO_6XX_GEN1) {
/*
* This can only be done before devm_pm_opp_of_add_table(), or
* dev_pm_opp_set_config() will WARN_ON()
*/
if (IS_ERR(devm_clk_get(dev, "core"))) {
/*
* If "core" is absent, go for the legacy clock name.
* If we got this far in probing, it's a given one of
* them exists.
*/
devm_pm_opp_set_clkname(dev, "core_clk");
} else
devm_pm_opp_set_clkname(dev, "core");
}
if (adreno_read_speedbin(dev, &speedbin) || !speedbin)
speedbin = 0xffff;
adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin);
gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%"ADRENO_CHIPID_FMT,
ADRENO_CHIPID_ARGS(config->chip_id));
if (!gpu_name)
return -ENOMEM;
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
adreno_gpu_config.nr_rings = nr_rings;
ret = adreno_get_pwrlevels(dev, gpu);
if (ret)
return ret;
pm_runtime_set_autosuspend_delay(dev,
adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(dev);
return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
gpu_name, &adreno_gpu_config);
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
pm_runtime_disable(&priv->gpu_pdev->dev);
msm_gpu_cleanup(&adreno_gpu->base);
}
| linux-master | drivers/gpu/drm/msm/adreno/adreno_gpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
*/
#include <linux/pm_opp.h>
#include "a5xx_gpu.h"
/*
* The GPMU data block is a block of shared registers that can be used to
* communicate back and forth. These "registers" are by convention with the GPMU
* firwmare and not bound to any specific hardware design
*/
#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
#define AGC_POWER_CONFIG_PRODUCTION_ID 1
#define AGC_INIT_MSG_VALUE 0xBABEFACE
/* AGC_LM_CONFIG (A540+) */
#define AGC_LM_CONFIG (136/4)
#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
#define AGC_LEVEL_CONFIG (140/4)
static struct {
uint32_t reg;
uint32_t value;
} a5xx_sequence_regs[] = {
{ 0xB9A1, 0x00010303 },
{ 0xB9A2, 0x13000000 },
{ 0xB9A3, 0x00460020 },
{ 0xB9A4, 0x10000000 },
{ 0xB9A5, 0x040A1707 },
{ 0xB9A6, 0x00010000 },
{ 0xB9A7, 0x0E000904 },
{ 0xB9A8, 0x10000000 },
{ 0xB9A9, 0x01165000 },
{ 0xB9AA, 0x000E0002 },
{ 0xB9AB, 0x03884141 },
{ 0xB9AC, 0x10000840 },
{ 0xB9AD, 0x572A5000 },
{ 0xB9AE, 0x00000003 },
{ 0xB9AF, 0x00000000 },
{ 0xB9B0, 0x10000000 },
{ 0xB828, 0x6C204010 },
{ 0xB829, 0x6C204011 },
{ 0xB82A, 0x6C204012 },
{ 0xB82B, 0x6C204013 },
{ 0xB82C, 0x6C204014 },
{ 0xB90F, 0x00000004 },
{ 0xB910, 0x00000002 },
{ 0xB911, 0x00000002 },
{ 0xB912, 0x00000002 },
{ 0xB913, 0x00000002 },
{ 0xB92F, 0x00000004 },
{ 0xB930, 0x00000005 },
{ 0xB931, 0x00000005 },
{ 0xB932, 0x00000005 },
{ 0xB933, 0x00000005 },
{ 0xB96F, 0x00000001 },
{ 0xB970, 0x00000003 },
{ 0xB94F, 0x00000004 },
{ 0xB950, 0x0000000B },
{ 0xB951, 0x0000000B },
{ 0xB952, 0x0000000B },
{ 0xB953, 0x0000000B },
{ 0xB907, 0x00000019 },
{ 0xB927, 0x00000019 },
{ 0xB947, 0x00000019 },
{ 0xB967, 0x00000019 },
{ 0xB987, 0x00000019 },
{ 0xB906, 0x00220001 },
{ 0xB926, 0x00220001 },
{ 0xB946, 0x00220001 },
{ 0xB966, 0x00220001 },
{ 0xB986, 0x00300000 },
{ 0xAC40, 0x0340FF41 },
{ 0xAC41, 0x03BEFED0 },
{ 0xAC42, 0x00331FED },
{ 0xAC43, 0x021FFDD3 },
{ 0xAC44, 0x5555AAAA },
{ 0xAC45, 0x5555AAAA },
{ 0xB9BA, 0x00000008 },
};
/*
* Get the actual voltage value for the operating point at the specified
* frequency
*/
static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct dev_pm_opp *opp;
u32 ret = 0;
opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
if (!IS_ERR(opp)) {
ret = dev_pm_opp_get_voltage(opp) / 1000;
dev_pm_opp_put(opp);
}
return ret;
}
/* Setup thermal limit management */
static void a530_lm_setup(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned int i;
/* Write the block of sequence registers */
for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
gpu_write(gpu, a5xx_sequence_regs[i].reg,
a5xx_sequence_regs[i].value);
/* Hard code the A530 GPU thermal sensor ID for the GPMU */
gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
/* Until we get clock scaling 0 is always the active power level */
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
/* The threshold is fixed at 6000 for A530 */
gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
/* Write the voltage table */
gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
gpu_write(gpu, AGC_MSG_STATE, 1);
gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
/* Write the max power - hard coded to 5448 for A530 */
gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
/*
* For now just write the one voltage level - we will do more when we
* can do scaling
*/
gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
}
#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
#define LM_DCVS_LIMIT 1
#define LEVEL_CONFIG ~(0x303)
static void a540_lm_setup(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
u32 config;
/* The battery current limiter isn't enabled for A540 */
config = AGC_LM_CONFIG_BCL_DISABLED;
config |= adreno_patchid(adreno_gpu) << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
/* For now disable GPMU side throttling */
config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
/* Until we get clock scaling 0 is always the active power level */
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
/* Fixed at 6000 for now */
gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
}
/* Enable SP/TP cpower collapse */
static void a5xx_pc_init(struct msm_gpu *gpu)
{
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
}
/* Enable the GPMU microcontroller */
static int a5xx_gpmu_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
if (!a5xx_gpu->gpmu_dwords)
return 0;
/* Turn off protected mode for this operation */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
/* Kick off the IB to load the GPMU microcode */
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
OUT_RING(ring, a5xx_gpu->gpmu_dwords);
/* Turn back on protected mode */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
a5xx_flush(gpu, ring, true);
if (!a5xx_idle(gpu, ring)) {
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
gpu->name);
return -EINVAL;
}
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
/* Kick off the GPMU */
gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
/*
* Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
* won't have advanced power collapse.
*/
if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
0xBABEFACE))
DRM_ERROR("%s: GPMU firmware initialization timed out\n",
gpu->name);
if (!adreno_is_a530(adreno_gpu)) {
u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
if (val)
DRM_ERROR("%s: GPMU firmware initialization failed: %d\n",
gpu->name, val);
}
return 0;
}
/* Enable limits management */
static void a5xx_lm_enable(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
/* This init sequence only applies to A530 */
if (!adreno_is_a530(adreno_gpu))
return;
gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
}
int a5xx_power_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/* Not all A5xx chips have a GPMU */
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return 0;
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
/* Set up SP/TP power collpase */
a5xx_pc_init(gpu);
/* Start the GPMU */
ret = a5xx_gpmu_init(gpu);
if (ret)
return ret;
/* Start the limits management */
a5xx_lm_enable(gpu);
return 0;
}
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct drm_device *drm = gpu->dev;
uint32_t dwords = 0, offset = 0, bosize;
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return;
if (a5xx_gpu->gpmu_bo)
return;
data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
/*
* The first dword is the size of the remaining data in dwords. Use it
* as a checksum of sorts and make sure it matches the actual size of
* the firmware that we read
*/
if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
(data[0] < 2) || (data[0] >=
(adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
return;
/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
if (data[1] != 2)
return;
cmds = data + data[2] + 3;
cmds_size = data[0] - data[2] - 2;
/*
* A single type4 opcode can only have so many values attached so
* add enough opcodes to load the all the commands
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
ptr = msm_gem_kernel_new(drm, bosize,
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
return;
msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
while (cmds_size > 0) {
int i;
uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
TYPE4_MAX_PAYLOAD : cmds_size;
ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
_size);
for (i = 0; i < _size; i++)
ptr[dwords++] = *cmds++;
offset += _size;
cmds_size -= _size;
}
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
}
| linux-master | drivers/gpu/drm/msm/adreno/a5xx_power.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/completion.h>
#include <linux/circ_buf.h>
#include <linux/list.h>
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
#include "a6xx_gpu.h"
#define HFI_MSG_ID(val) [val] = #val
static const char * const a6xx_hfi_msg_id[] = {
HFI_MSG_ID(HFI_H2F_MSG_INIT),
HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_TEST),
HFI_MSG_ID(HFI_H2F_MSG_START),
HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
};
static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
{
struct a6xx_hfi_queue_header *header = queue->header;
u32 i, hdr, index = header->read_index;
if (header->read_index == header->write_index) {
header->rx_request = 1;
return 0;
}
hdr = queue->data[index];
queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
/*
* If we are to assume that the GMU firmware is in fact a rational actor
* and is programmed to not send us a larger response than we expect
* then we can also assume that if the header size is unexpectedly large
* that it is due to memory corruption and/or hardware failure. In this
* case the only reasonable course of action is to BUG() to help harden
* the failure.
*/
BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
data[i] = queue->data[index];
index = (index + 1) % header->size;
}
if (!gmu->legacy)
index = ALIGN(index, 4) % header->size;
header->read_index = index;
return HFI_HEADER_SIZE(hdr);
}
static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
{
struct a6xx_hfi_queue_header *header = queue->header;
u32 i, space, index = header->write_index;
spin_lock(&queue->lock);
space = CIRC_SPACE(header->write_index, header->read_index,
header->size);
if (space < dwords) {
header->dropped++;
spin_unlock(&queue->lock);
return -ENOSPC;
}
queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
for (i = 0; i < dwords; i++) {
queue->data[index] = data[i];
index = (index + 1) % header->size;
}
/* Cookify any non used data at the end of the write buffer */
if (!gmu->legacy) {
for (; index % 4; index = (index + 1) % header->size)
queue->data[index] = 0xfafafafa;
}
header->write_index = index;
spin_unlock(&queue->lock);
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
return 0;
}
static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
u32 *payload, u32 payload_size)
{
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
u32 val;
int ret;
/* Wait for a response */
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
if (ret) {
DRM_DEV_ERROR(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
return -ETIMEDOUT;
}
/* Clear the interrupt */
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
for (;;) {
struct a6xx_hfi_msg_response resp;
/* Get the next packet */
ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
sizeof(resp) >> 2);
/* If the queue is empty our response never made it */
if (!ret) {
DRM_DEV_ERROR(gmu->dev,
"The HFI response queue is unexpectedly empty\n");
return -ENOENT;
}
if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
struct a6xx_hfi_msg_error *error =
(struct a6xx_hfi_msg_error *) &resp;
DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
error->code);
continue;
}
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
DRM_DEV_ERROR(gmu->dev,
"Unexpected message id %d on the response queue\n",
HFI_HEADER_SEQNUM(resp.ret_header));
continue;
}
if (resp.error) {
DRM_DEV_ERROR(gmu->dev,
"Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
}
/* All is well, copy over the buffer */
if (payload && payload_size)
memcpy(payload, resp.payload,
min_t(u32, payload_size, sizeof(resp.payload)));
return 0;
}
}
static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
void *data, u32 size, u32 *payload, u32 payload_size)
{
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
int ret, dwords = size >> 2;
u32 seqnum;
seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
/* First dword of the message is the message header - fill it in */
*((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
(dwords << 8) | id;
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
return ret;
}
return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
}
static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
{
struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
msg.dbg_buffer_addr = (u32) gmu->debug.iova;
msg.dbg_buffer_size = (u32) gmu->debug.size;
msg.boot_state = boot_state;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
NULL, 0);
}
static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
{
struct a6xx_hfi_msg_fw_version msg = { 0 };
/* Currently supporting version 1.10 */
msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17);
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
version, sizeof(*version));
}
static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
int i;
msg.num_gpu_levels = gmu->nr_gpu_freqs;
msg.num_gmu_levels = gmu->nr_gmu_freqs;
for (i = 0; i < gmu->nr_gpu_freqs; i++) {
msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
}
for (i = 0; i < gmu->nr_gmu_freqs; i++) {
msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
}
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
NULL, 0);
}
static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_perf_table msg = { 0 };
int i;
msg.num_gpu_levels = gmu->nr_gpu_freqs;
msg.num_gmu_levels = gmu->nr_gmu_freqs;
for (i = 0; i < gmu->nr_gpu_freqs; i++) {
msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
msg.gx_votes[i].acd = 0xffffffff;
msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
}
for (i = 0; i < gmu->nr_gmu_freqs; i++) {
msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
}
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
NULL, 0);
}
static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x01;
msg->ddr_cmds_addrs[0] = 0x50000;
msg->ddr_cmds_addrs[1] = 0x5003c;
msg->ddr_cmds_addrs[2] = 0x5000c;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x5007c;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
msg->bw_level_num = 13;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x0;
msg->ddr_cmds_addrs[0] = 0x50000;
msg->ddr_cmds_addrs[1] = 0x50004;
msg->ddr_cmds_addrs[2] = 0x50080;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
msg->ddr_cmds_data[1][0] = 0x6000030c;
msg->ddr_cmds_data[1][1] = 0x600000db;
msg->ddr_cmds_data[1][2] = 0x60000008;
msg->ddr_cmds_data[2][0] = 0x60000618;
msg->ddr_cmds_data[2][1] = 0x600001b6;
msg->ddr_cmds_data[2][2] = 0x60000008;
msg->ddr_cmds_data[3][0] = 0x60000925;
msg->ddr_cmds_data[3][1] = 0x60000291;
msg->ddr_cmds_data[3][2] = 0x60000008;
msg->ddr_cmds_data[4][0] = 0x60000dc1;
msg->ddr_cmds_data[4][1] = 0x600003dc;
msg->ddr_cmds_data[4][2] = 0x60000008;
msg->ddr_cmds_data[5][0] = 0x600010ad;
msg->ddr_cmds_data[5][1] = 0x600004ae;
msg->ddr_cmds_data[5][2] = 0x60000008;
msg->ddr_cmds_data[6][0] = 0x600014c3;
msg->ddr_cmds_data[6][1] = 0x600005d4;
msg->ddr_cmds_data[6][2] = 0x60000008;
msg->ddr_cmds_data[7][0] = 0x6000176a;
msg->ddr_cmds_data[7][1] = 0x60000693;
msg->ddr_cmds_data[7][2] = 0x60000008;
msg->ddr_cmds_data[8][0] = 0x60001f01;
msg->ddr_cmds_data[8][1] = 0x600008b5;
msg->ddr_cmds_data[8][2] = 0x60000008;
msg->ddr_cmds_data[9][0] = 0x60002940;
msg->ddr_cmds_data[9][1] = 0x60000b95;
msg->ddr_cmds_data[9][2] = 0x60000008;
msg->ddr_cmds_data[10][0] = 0x60002f68;
msg->ddr_cmds_data[10][1] = 0x60000d50;
msg->ddr_cmds_data[10][2] = 0x60000008;
msg->ddr_cmds_data[11][0] = 0x60003700;
msg->ddr_cmds_data[11][1] = 0x60000f71;
msg->ddr_cmds_data[11][2] = 0x60000008;
msg->ddr_cmds_data[12][0] = 0x60003fce;
msg->ddr_cmds_data[12][1] = 0x600011ea;
msg->ddr_cmds_data[12][2] = 0x60000008;
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x0;
msg->cnoc_cmds_addrs[0] = 0x50054;
msg->cnoc_cmds_data[0][0] = 0x40000000;
}
static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
* Send a single "off" entry just to get things running
* TODO: bus scaling
*/
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x01;
msg->ddr_cmds_addrs[0] = 0x50000;
msg->ddr_cmds_addrs[1] = 0x5003c;
msg->ddr_cmds_addrs[2] = 0x5000c;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 3;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x50034;
msg->cnoc_cmds_addrs[1] = 0x5007c;
msg->cnoc_cmds_addrs[2] = 0x5004c;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[0][1] = 0x00000000;
msg->cnoc_cmds_data[0][2] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
msg->cnoc_cmds_data[1][1] = 0x20000001;
msg->cnoc_cmds_data[1][2] = 0x60000001;
}
static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
* Send a single "off" entry just to get things running
* TODO: bus scaling
*/
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x01;
msg->ddr_cmds_addrs[0] = 0x50000;
msg->ddr_cmds_addrs[1] = 0x50004;
msg->ddr_cmds_addrs[2] = 0x5007c;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x500a4;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
* Send a single "off" entry just to get things running
* TODO: bus scaling
*/
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x01;
msg->ddr_cmds_addrs[0] = 0x50004;
msg->ddr_cmds_addrs[1] = 0x50000;
msg->ddr_cmds_addrs[2] = 0x500ac;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x5003c;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
* Send a single "off" entry just to get things running
* TODO: bus scaling
*/
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x01;
msg->ddr_cmds_addrs[0] = 0x50004;
msg->ddr_cmds_addrs[1] = 0x500a0;
msg->ddr_cmds_addrs[2] = 0x50000;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x50070;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
* Send a single "off" entry just to get things running
* TODO: bus scaling
*/
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x07;
msg->ddr_cmds_addrs[0] = 0x50004;
msg->ddr_cmds_addrs[1] = 0x50000;
msg->ddr_cmds_addrs[2] = 0x50088;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x5006c;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x07;
msg->ddr_cmds_addrs[0] = 0x50000;
msg->ddr_cmds_addrs[1] = 0x5005c;
msg->ddr_cmds_addrs[2] = 0x5000c;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes. This is used but the values for the
* sdm845 GMU are known and fixed so we can hard code them.
*/
msg->cnoc_cmds_num = 3;
msg->cnoc_wait_bitmask = 0x05;
msg->cnoc_cmds_addrs[0] = 0x50034;
msg->cnoc_cmds_addrs[1] = 0x5007c;
msg->cnoc_cmds_addrs[2] = 0x5004c;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[0][1] = 0x00000000;
msg->cnoc_cmds_data[0][2] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
msg->cnoc_cmds_data[1][1] = 0x20000001;
msg->cnoc_cmds_data[1][2] = 0x60000001;
}
static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_bw_table msg = { 0 };
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(&msg);
else if (adreno_is_a619(adreno_gpu))
a619_build_bw_table(&msg);
else if (adreno_is_a640_family(adreno_gpu))
a640_build_bw_table(&msg);
else if (adreno_is_a650(adreno_gpu))
a650_build_bw_table(&msg);
else if (adreno_is_7c3(adreno_gpu))
adreno_7c3_build_bw_table(&msg);
else if (adreno_is_a660(adreno_gpu))
a660_build_bw_table(&msg);
else if (adreno_is_a690(adreno_gpu))
a690_build_bw_table(&msg);
else
a6xx_build_bw_table(&msg);
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
NULL, 0);
}
static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_test msg = { 0 };
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
NULL, 0);
}
static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_start msg = { 0 };
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
NULL, 0);
}
static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_core_fw_start msg = { 0 };
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
sizeof(msg), NULL, 0);
}
int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
{
struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
msg.ack_type = 1; /* blocking */
msg.freq = index;
msg.bw = 0; /* TODO: bus scaling */
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
sizeof(msg), NULL, 0);
}
int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
/* TODO: should freq and bw fields be non-zero ? */
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
sizeof(msg), NULL, 0);
}
static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
{
int ret;
ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
if (ret)
return ret;
ret = a6xx_hfi_get_fw_version(gmu, NULL);
if (ret)
return ret;
/*
* We have to get exchange version numbers per the sequence but at this
* point th kernel driver doesn't need to know the exact version of
* the GMU firmware
*/
ret = a6xx_hfi_send_perf_table_v1(gmu);
if (ret)
return ret;
ret = a6xx_hfi_send_bw_table(gmu);
if (ret)
return ret;
/*
* Let the GMU know that there won't be any more HFI messages until next
* boot
*/
a6xx_hfi_send_test(gmu);
return 0;
}
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
{
int ret;
if (gmu->legacy)
return a6xx_hfi_start_v1(gmu, boot_state);
ret = a6xx_hfi_send_perf_table(gmu);
if (ret)
return ret;
ret = a6xx_hfi_send_bw_table(gmu);
if (ret)
return ret;
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
/*
* Downstream driver sends this in its "a6xx_hw_init" equivalent,
* but seems to be no harm in sending it here
*/
ret = a6xx_hfi_send_start(gmu);
if (ret)
return ret;
return 0;
}
void a6xx_hfi_stop(struct a6xx_gmu *gmu)
{
int i;
for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
struct a6xx_hfi_queue *queue = &gmu->queues[i];
if (!queue->header)
continue;
if (queue->header->read_index != queue->header->write_index)
DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
queue->header->read_index = 0;
queue->header->write_index = 0;
memset(&queue->history, 0xff, sizeof(queue->history));
queue->history_idx = 0;
}
}
static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
u32 id)
{
spin_lock_init(&queue->lock);
queue->header = header;
queue->data = virt;
atomic_set(&queue->seqnum, 0);
memset(&queue->history, 0xff, sizeof(queue->history));
queue->history_idx = 0;
/* Set up the shared memory header */
header->iova = iova;
header->type = 10 << 8 | id;
header->status = 1;
header->size = SZ_4K >> 2;
header->msg_size = 0;
header->dropped = 0;
header->rx_watermark = 1;
header->tx_watermark = 1;
header->rx_request = 1;
header->tx_request = 0;
header->read_index = 0;
header->write_index = 0;
}
void a6xx_hfi_init(struct a6xx_gmu *gmu)
{
struct a6xx_gmu_bo *hfi = &gmu->hfi;
struct a6xx_hfi_queue_table_header *table = hfi->virt;
struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
u64 offset;
int table_size;
/*
* The table size is the size of the table header plus all of the queue
* headers
*/
table_size = sizeof(*table);
table_size += (ARRAY_SIZE(gmu->queues) *
sizeof(struct a6xx_hfi_queue_header));
table->version = 0;
table->size = table_size;
/* First queue header is located immediately after the table header */
table->qhdr0_offset = sizeof(*table) >> 2;
table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
table->num_queues = ARRAY_SIZE(gmu->queues);
table->active_queues = ARRAY_SIZE(gmu->queues);
/* Command queue */
offset = SZ_4K;
a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
hfi->iova + offset, 0);
/* GMU response queue */
offset += SZ_4K;
a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
hfi->iova + offset, gmu->legacy ? 4 : 1);
}
| linux-master | drivers/gpu/drm/msm/adreno/a6xx_hfi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
#include <sound/hdmi-codec.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
{
uint32_t ctrl = 0;
unsigned long flags;
spin_lock_irqsave(&hdmi->reg_lock, flags);
if (power_on) {
ctrl |= HDMI_CTRL_ENABLE;
if (!hdmi->hdmi_mode) {
ctrl |= HDMI_CTRL_HDMI;
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
ctrl &= ~HDMI_CTRL_HDMI;
} else {
ctrl |= HDMI_CTRL_HDMI;
}
} else {
ctrl = HDMI_CTRL_HDMI;
}
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
power_on ? "Enable" : "Disable", ctrl);
}
static irqreturn_t msm_hdmi_irq(int irq, void *dev_id)
{
struct hdmi *hdmi = dev_id;
/* Process HPD: */
msm_hdmi_hpd_irq(hdmi->bridge);
/* Process DDC: */
msm_hdmi_i2c_irq(hdmi->i2c);
/* Process HDCP: */
if (hdmi->hdcp_ctrl)
msm_hdmi_hdcp_irq(hdmi->hdcp_ctrl);
/* TODO audio.. */
return IRQ_HANDLED;
}
static void msm_hdmi_destroy(struct hdmi *hdmi)
{
/*
* at this point, hpd has been disabled,
* after flush workq, it's safe to deinit hdcp
*/
if (hdmi->workq)
destroy_workqueue(hdmi->workq);
msm_hdmi_hdcp_destroy(hdmi);
if (hdmi->i2c)
msm_hdmi_i2c_destroy(hdmi->i2c);
}
static void msm_hdmi_put_phy(struct hdmi *hdmi)
{
if (hdmi->phy_dev) {
put_device(hdmi->phy_dev);
hdmi->phy = NULL;
hdmi->phy_dev = NULL;
}
}
static int msm_hdmi_get_phy(struct hdmi *hdmi)
{
struct platform_device *pdev = hdmi->pdev;
struct platform_device *phy_pdev;
struct device_node *phy_node;
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
phy_pdev = of_find_device_by_node(phy_node);
of_node_put(phy_node);
if (!phy_pdev)
return dev_err_probe(&pdev->dev, -EPROBE_DEFER, "phy driver is not ready\n");
hdmi->phy = platform_get_drvdata(phy_pdev);
if (!hdmi->phy) {
put_device(&phy_pdev->dev);
return dev_err_probe(&pdev->dev, -EPROBE_DEFER, "phy driver is not ready\n");
}
hdmi->phy_dev = &phy_pdev->dev;
return 0;
}
/* construct hdmi at bind/probe time, grab all the resources. If
* we are to EPROBE_DEFER we want to do it here, rather than later
* at modeset_init() time
*/
static int msm_hdmi_init(struct hdmi *hdmi)
{
struct platform_device *pdev = hdmi->pdev;
int ret;
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
if (!hdmi->workq) {
ret = -ENOMEM;
goto fail;
}
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
hdmi->i2c = NULL;
goto fail;
}
hdmi->hdcp_ctrl = msm_hdmi_hdcp_init(hdmi);
if (IS_ERR(hdmi->hdcp_ctrl)) {
dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
hdmi->hdcp_ctrl = NULL;
}
return 0;
fail:
msm_hdmi_destroy(hdmi);
return ret;
}
/* Second part of initialization, the drm/kms level modeset_init,
* constructs/initializes mode objects, etc, is called from master
* driver (not hdmi sub-device's probe/bind!)
*
* Any resource (regulator/clk/etc) which could be missing at boot
* should be handled in msm_hdmi_init() so that failure happens from
* hdmi sub-device's probe.
*/
int msm_hdmi_modeset_init(struct hdmi *hdmi,
struct drm_device *dev, struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
int ret;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
hdmi->dev = dev;
hdmi->encoder = encoder;
hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
hdmi->bridge = msm_hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
hdmi->bridge = NULL;
goto fail;
}
if (hdmi->next_bridge) {
ret = drm_bridge_attach(hdmi->encoder, hdmi->next_bridge, hdmi->bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to attach next HDMI bridge: %d\n", ret);
goto fail;
}
}
hdmi->connector = drm_bridge_connector_init(hdmi->dev, encoder);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
hdmi->connector = NULL;
goto fail;
}
drm_connector_attach_encoder(hdmi->connector, hdmi->encoder);
ret = devm_request_irq(dev->dev, hdmi->irq,
msm_hdmi_irq, IRQF_TRIGGER_HIGH,
"hdmi_isr", hdmi);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
hdmi->irq, ret);
goto fail;
}
ret = msm_hdmi_hpd_enable(hdmi->bridge);
if (ret < 0) {
DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
goto fail;
}
priv->bridges[priv->num_bridges++] = hdmi->bridge;
return 0;
fail:
/* bridge is normally destroyed by drm: */
if (hdmi->bridge) {
msm_hdmi_bridge_destroy(hdmi->bridge);
hdmi->bridge = NULL;
}
if (hdmi->connector) {
hdmi->connector->funcs->destroy(hdmi->connector);
hdmi->connector = NULL;
}
return ret;
}
/*
* The hdmi device:
*/
#define HDMI_CFG(item, entry) \
.item ## _names = item ##_names_ ## entry, \
.item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
static const char *hpd_reg_names_8960[] = {"core-vdda"};
static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static const struct hdmi_platform_config hdmi_tx_8960_config = {
HDMI_CFG(hpd_reg, 8960),
HDMI_CFG(hpd_clk, 8960),
};
static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
static const struct hdmi_platform_config hdmi_tx_8974_config = {
HDMI_CFG(pwr_reg, 8x74),
HDMI_CFG(pwr_clk, 8x74),
HDMI_CFG(hpd_clk, 8x74),
.hpd_freq = hpd_clk_freq_8x74,
};
/*
* HDMI audio codec callbacks
*/
static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct hdmi *hdmi = dev_get_drvdata(dev);
unsigned int chan;
unsigned int channel_allocation = 0;
unsigned int rate;
unsigned int level_shift = 0; /* 0dB */
bool down_mix = false;
DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
params->sample_width, params->cea.channels);
switch (params->cea.channels) {
case 2:
/* FR and FL speakers */
channel_allocation = 0;
chan = MSM_HDMI_AUDIO_CHANNEL_2;
break;
case 4:
/* FC, LFE, FR and FL speakers */
channel_allocation = 0x3;
chan = MSM_HDMI_AUDIO_CHANNEL_4;
break;
case 6:
/* RR, RL, FC, LFE, FR and FL speakers */
channel_allocation = 0x0B;
chan = MSM_HDMI_AUDIO_CHANNEL_6;
break;
case 8:
/* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
channel_allocation = 0x1F;
chan = MSM_HDMI_AUDIO_CHANNEL_8;
break;
default:
return -EINVAL;
}
switch (params->sample_rate) {
case 32000:
rate = HDMI_SAMPLE_RATE_32KHZ;
break;
case 44100:
rate = HDMI_SAMPLE_RATE_44_1KHZ;
break;
case 48000:
rate = HDMI_SAMPLE_RATE_48KHZ;
break;
case 88200:
rate = HDMI_SAMPLE_RATE_88_2KHZ;
break;
case 96000:
rate = HDMI_SAMPLE_RATE_96KHZ;
break;
case 176400:
rate = HDMI_SAMPLE_RATE_176_4KHZ;
break;
case 192000:
rate = HDMI_SAMPLE_RATE_192KHZ;
break;
default:
DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
params->sample_rate);
return -EINVAL;
}
msm_hdmi_audio_set_sample_rate(hdmi, rate);
msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
level_shift, down_mix);
return 0;
}
static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct hdmi *hdmi = dev_get_drvdata(dev);
msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
}
static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
.hw_params = msm_hdmi_audio_hw_params,
.audio_shutdown = msm_hdmi_audio_shutdown,
};
static struct hdmi_codec_pdata codec_data = {
.ops = &msm_hdmi_audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
};
static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
{
hdmi->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&codec_data,
sizeof(codec_data));
return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
}
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
struct hdmi *hdmi = dev_get_drvdata(dev);
int err;
err = msm_hdmi_init(hdmi);
if (err)
return err;
priv->hdmi = hdmi;
err = msm_hdmi_register_audio_driver(hdmi, dev);
if (err) {
DRM_ERROR("Failed to attach an audio codec %d\n", err);
hdmi->audio_pdev = NULL;
}
return 0;
}
static void msm_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
if (priv->hdmi) {
if (priv->hdmi->audio_pdev)
platform_device_unregister(priv->hdmi->audio_pdev);
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
}
static const struct component_ops msm_hdmi_ops = {
.bind = msm_hdmi_bind,
.unbind = msm_hdmi_unbind,
};
static int msm_hdmi_dev_probe(struct platform_device *pdev)
{
const struct hdmi_platform_config *config;
struct device *dev = &pdev->dev;
struct hdmi *hdmi;
struct resource *res;
int i, ret;
config = of_device_get_match_data(dev);
if (!config)
return -EINVAL;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->pdev = pdev;
hdmi->config = config;
spin_lock_init(&hdmi->reg_lock);
ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge);
if (ret && ret != -ENODEV)
return ret;
hdmi->mmio = msm_ioremap(pdev, "core_physical");
if (IS_ERR(hdmi->mmio))
return PTR_ERR(hdmi->mmio);
/* HDCP needs physical address of hdmi register */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"core_physical");
if (!res)
return -EINVAL;
hdmi->mmio_phy_addr = res->start;
hdmi->qfprom_mmio = msm_ioremap(pdev, "qfprom_physical");
if (IS_ERR(hdmi->qfprom_mmio)) {
DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
hdmi->qfprom_mmio = NULL;
}
hdmi->irq = platform_get_irq(pdev, 0);
if (hdmi->irq < 0)
return hdmi->irq;
hdmi->hpd_regs = devm_kcalloc(&pdev->dev,
config->hpd_reg_cnt,
sizeof(hdmi->hpd_regs[0]),
GFP_KERNEL);
if (!hdmi->hpd_regs)
return -ENOMEM;
for (i = 0; i < config->hpd_reg_cnt; i++)
hdmi->hpd_regs[i].supply = config->hpd_reg_names[i];
ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs);
if (ret)
return dev_err_probe(dev, ret, "failed to get hpd regulators\n");
hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
config->pwr_reg_cnt,
sizeof(hdmi->pwr_regs[0]),
GFP_KERNEL);
if (!hdmi->pwr_regs)
return -ENOMEM;
for (i = 0; i < config->pwr_reg_cnt; i++)
hdmi->pwr_regs[i].supply = config->pwr_reg_names[i];
ret = devm_regulator_bulk_get(&pdev->dev, config->pwr_reg_cnt, hdmi->pwr_regs);
if (ret)
return dev_err_probe(dev, ret, "failed to get pwr regulators\n");
hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
config->hpd_clk_cnt,
sizeof(hdmi->hpd_clks[0]),
GFP_KERNEL);
if (!hdmi->hpd_clks)
return -ENOMEM;
for (i = 0; i < config->hpd_clk_cnt; i++) {
struct clk *clk;
clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk))
return dev_err_probe(dev, PTR_ERR(clk),
"failed to get hpd clk: %s\n",
config->hpd_clk_names[i]);
hdmi->hpd_clks[i] = clk;
}
hdmi->pwr_clks = devm_kcalloc(&pdev->dev,
config->pwr_clk_cnt,
sizeof(hdmi->pwr_clks[0]),
GFP_KERNEL);
if (!hdmi->pwr_clks)
return -ENOMEM;
for (i = 0; i < config->pwr_clk_cnt; i++) {
struct clk *clk;
clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk))
return dev_err_probe(dev, PTR_ERR(clk),
"failed to get pwr clk: %s\n",
config->pwr_clk_names[i]);
hdmi->pwr_clks[i] = clk;
}
hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
/* This will catch e.g. -EPROBE_DEFER */
if (IS_ERR(hdmi->hpd_gpiod))
return dev_err_probe(dev, PTR_ERR(hdmi->hpd_gpiod),
"failed to get hpd gpio\n");
if (!hdmi->hpd_gpiod)
DBG("failed to get HPD gpio");
if (hdmi->hpd_gpiod)
gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD");
ret = msm_hdmi_get_phy(hdmi);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
return ret;
}
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
goto err_put_phy;
platform_set_drvdata(pdev, hdmi);
ret = component_add(&pdev->dev, &msm_hdmi_ops);
if (ret)
goto err_put_phy;
return 0;
err_put_phy:
msm_hdmi_put_phy(hdmi);
return ret;
}
static int msm_hdmi_dev_remove(struct platform_device *pdev)
{
struct hdmi *hdmi = dev_get_drvdata(&pdev->dev);
component_del(&pdev->dev, &msm_hdmi_ops);
msm_hdmi_put_phy(hdmi);
return 0;
}
static const struct of_device_id msm_hdmi_dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
{ .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8960_config },
{}
};
static struct platform_driver msm_hdmi_driver = {
.probe = msm_hdmi_dev_probe,
.remove = msm_hdmi_dev_remove,
.driver = {
.name = "hdmi_msm",
.of_match_table = msm_hdmi_dt_match,
},
};
void __init msm_hdmi_register(void)
{
msm_hdmi_phy_driver_register();
platform_driver_register(&msm_hdmi_driver);
}
void __exit msm_hdmi_unregister(void)
{
platform_driver_unregister(&msm_hdmi_driver);
msm_hdmi_phy_driver_unregister();
}
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/delay.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include "msm_kms.h"
#include "hdmi.h"
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
msm_hdmi_hpd_disable(hdmi_bridge);
drm_bridge_remove(bridge);
}
static void msm_hdmi_power_on(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
int i, ret;
pm_runtime_get_sync(&hdmi->pdev->dev);
ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret);
if (config->pwr_clk_cnt > 0) {
DBG("pixclock: %lu", hdmi->pixclock);
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
config->pwr_clk_names[0], ret);
}
}
for (i = 0; i < config->pwr_clk_cnt; i++) {
ret = clk_prepare_enable(hdmi->pwr_clks[i]);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
}
}
}
static void power_off(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
int i, ret;
/* TODO do we need to wait for final vblank somewhere before
* cutting the clocks?
*/
mdelay(16 + 4);
for (i = 0; i < config->pwr_clk_cnt; i++)
clk_disable_unprepare(hdmi->pwr_clks[i]);
ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
pm_runtime_put(&hdmi->pdev->dev);
}
#define AVI_IFRAME_LINE_NUMBER 1
static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
{
struct drm_crtc *crtc = hdmi->encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame;
u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
u32 val;
int len;
drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
hdmi->connector, mode);
len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (len < 0) {
DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
return;
}
/*
* the AVI_INFOx registers don't map exactly to how the AVI infoframes
* are packed according to the spec. The checksum from the header is
* written to the LSB byte of AVI_INFO0 and the version is written to
* the third byte from the LSB of AVI_INFO3
*/
hdmi_write(hdmi, REG_HDMI_AVI_INFO(0),
buffer[3] |
buffer[4] << 8 |
buffer[5] << 16 |
buffer[6] << 24);
hdmi_write(hdmi, REG_HDMI_AVI_INFO(1),
buffer[7] |
buffer[8] << 8 |
buffer[9] << 16 |
buffer[10] << 24);
hdmi_write(hdmi, REG_HDMI_AVI_INFO(2),
buffer[11] |
buffer[12] << 8 |
buffer[13] << 16 |
buffer[14] << 24);
hdmi_write(hdmi, REG_HDMI_AVI_INFO(3),
buffer[15] |
buffer[16] << 8 |
buffer[1] << 24);
hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
HDMI_INFOFRAME_CTRL0_AVI_SEND |
HDMI_INFOFRAME_CTRL0_AVI_CONT);
val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
}
static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
DBG("power up");
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
if (hdmi->hdmi_mode) {
msm_hdmi_config_avi_infoframe(hdmi);
msm_hdmi_audio_update(hdmi);
}
}
msm_hdmi_phy_powerup(phy, hdmi->pixclock);
msm_hdmi_set_mode(hdmi, true);
if (hdmi->hdcp_ctrl)
msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
if (hdmi->hdcp_ctrl)
msm_hdmi_hdcp_off(hdmi->hdcp_ctrl);
DBG("power down");
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_powerdown(phy);
if (hdmi->power_on) {
power_off(bridge);
hdmi->power_on = false;
if (hdmi->hdmi_mode)
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
}
static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
int hstart, hend, vstart, vend;
uint32_t frame_ctrl;
mode = adjusted_mode;
hdmi->pixclock = mode->clock * 1000;
hstart = mode->htotal - mode->hsync_start;
hend = mode->htotal - mode->hsync_start + mode->hdisplay;
vstart = mode->vtotal - mode->vsync_start - 1;
vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
hdmi_write(hdmi, REG_HDMI_TOTAL,
HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
HDMI_ACTIVE_HSYNC_START(hstart) |
HDMI_ACTIVE_HSYNC_END(hend));
hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
HDMI_ACTIVE_VSYNC_START(vstart) |
HDMI_ACTIVE_VSYNC_END(vend));
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
} else {
hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
HDMI_VSYNC_ACTIVE_F2_START(0) |
HDMI_VSYNC_ACTIVE_F2_END(0));
}
frame_ctrl = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
if (hdmi->hdmi_mode)
msm_hdmi_audio_update(hdmi);
}
static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct edid *edid;
uint32_t hdmi_ctrl;
hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
edid = drm_get_edid(connector, hdmi->i2c);
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
return edid;
}
static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
long actual, requested;
requested = 1000 * mode->clock;
/* for mdp5/apq8074, we manage our own pixel clk (as opposed to
* mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
* instead):
*/
if (kms->funcs->round_pixclk)
actual = kms->funcs->round_pixclk(kms,
requested, hdmi_bridge->hdmi->encoder);
else if (config->pwr_clk_cnt > 0)
actual = clk_round_rate(hdmi->pwr_clks[0], requested);
else
actual = requested;
DBG("requested=%ld, actual=%ld", requested, actual);
if (actual != requested)
return MODE_CLOCK_RANGE;
return 0;
}
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.pre_enable = msm_hdmi_bridge_pre_enable,
.post_disable = msm_hdmi_bridge_post_disable,
.mode_set = msm_hdmi_bridge_mode_set,
.mode_valid = msm_hdmi_bridge_mode_valid,
.get_edid = msm_hdmi_bridge_get_edid,
.detect = msm_hdmi_bridge_detect,
};
static void
msm_hdmi_hotplug_work(struct work_struct *work)
{
struct hdmi_bridge *hdmi_bridge =
container_of(work, struct hdmi_bridge, hpd_work);
struct drm_bridge *bridge = &hdmi_bridge->base;
drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge));
}
/* initialize bridge */
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
{
struct drm_bridge *bridge = NULL;
struct hdmi_bridge *hdmi_bridge;
int ret;
hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
sizeof(*hdmi_bridge), GFP_KERNEL);
if (!hdmi_bridge) {
ret = -ENOMEM;
goto fail;
}
hdmi_bridge->hdmi = hdmi;
INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
drm_bridge_add(bridge);
ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto fail;
return bridge;
fail:
if (bridge)
msm_hdmi_bridge_destroy(bridge);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_bridge.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include "hdmi.h"
#define HDMI_VCO_MAX_FREQ 12000000000UL
#define HDMI_VCO_MIN_FREQ 8000000000UL
#define HDMI_PCLK_MAX_FREQ 600000000
#define HDMI_PCLK_MIN_FREQ 25000000
#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL
#define HDMI_CORECLK_DIV 5
#define HDMI_DEFAULT_REF_CLOCK 19200000
#define HDMI_PLL_CMP_CNT 1024
#define HDMI_PLL_POLL_MAX_READS 100
#define HDMI_PLL_POLL_TIMEOUT_US 150
#define HDMI_NUM_TX_CHANNEL 4
struct hdmi_pll_8996 {
struct platform_device *pdev;
struct clk_hw clk_hw;
/* pll mmio base */
void __iomem *mmio_qserdes_com;
/* tx channel base */
void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL];
};
#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8996, clk_hw)
struct hdmi_8996_phy_pll_reg_cfg {
u32 tx_lx_lane_mode[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL];
u32 com_svs_mode_clk_sel;
u32 com_hsclk_sel;
u32 com_pll_cctrl_mode0;
u32 com_pll_rctrl_mode0;
u32 com_cp_ctrl_mode0;
u32 com_dec_start_mode0;
u32 com_div_frac_start1_mode0;
u32 com_div_frac_start2_mode0;
u32 com_div_frac_start3_mode0;
u32 com_integloop_gain0_mode0;
u32 com_integloop_gain1_mode0;
u32 com_lock_cmp_en;
u32 com_lock_cmp1_mode0;
u32 com_lock_cmp2_mode0;
u32 com_lock_cmp3_mode0;
u32 com_core_clk_en;
u32 com_coreclk_div;
u32 com_vco_tune_ctrl;
u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_vmode_ctrl1[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_vmode_ctrl2[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_res_code_lane_tx[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_hp_pd_enables[HDMI_NUM_TX_CHANNEL];
u32 phy_mode;
};
struct hdmi_8996_post_divider {
u64 vco_freq;
int hsclk_divsel;
int vco_ratio;
int tx_band_sel;
int half_rate_mode;
};
static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8996 *pll)
{
return platform_get_drvdata(pll->pdev);
}
static inline void hdmi_pll_write(struct hdmi_pll_8996 *pll, int offset,
u32 data)
{
msm_writel(data, pll->mmio_qserdes_com + offset);
}
static inline u32 hdmi_pll_read(struct hdmi_pll_8996 *pll, int offset)
{
return msm_readl(pll->mmio_qserdes_com + offset);
}
static inline void hdmi_tx_chan_write(struct hdmi_pll_8996 *pll, int channel,
int offset, int data)
{
msm_writel(data, pll->mmio_qserdes_tx[channel] + offset);
}
static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
bool gen_ssc)
{
if ((frac_start != 0) || gen_ssc)
return (11000000 / (ref_clk / 20));
return 0x23;
}
static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc)
{
if ((frac_start != 0) || gen_ssc)
return 0x16;
return 0x10;
}
static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc)
{
if ((frac_start != 0) || gen_ssc)
return 0x28;
return 0x1;
}
static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
bool gen_ssc)
{
int digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
u64 base;
if ((frac_start != 0) || gen_ssc)
base = (64 * ref_clk) / HDMI_DEFAULT_REF_CLOCK;
else
base = (1022 * ref_clk) / 100;
base <<= digclk_divsel;
return (base <= 2046 ? base : 2046);
}
static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
{
u64 dividend = HDMI_PLL_CMP_CNT * fdata;
u32 divisor = ref_clk * 10;
u32 rem;
rem = do_div(dividend, divisor);
if (rem > (divisor >> 1))
dividend++;
return dividend - 1;
}
static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
{
u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
do_div(fdata, HDMI_PLL_CMP_CNT);
return fdata;
}
static int pll_get_post_div(struct hdmi_8996_post_divider *pd, u64 bclk)
{
int ratio[] = { 2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35 };
int hs_divsel[] = { 0, 4, 8, 12, 1, 5, 2, 9, 3, 13, 10, 7, 14, 11, 15 };
int tx_band_sel[] = { 0, 1, 2, 3 };
u64 vco_freq[60];
u64 vco, vco_optimal;
int half_rate_mode = 0;
int vco_optimal_index, vco_freq_index;
int i, j;
retry:
vco_optimal = HDMI_VCO_MAX_FREQ;
vco_optimal_index = -1;
vco_freq_index = 0;
for (i = 0; i < 15; i++) {
for (j = 0; j < 4; j++) {
u32 ratio_mult = ratio[i] << tx_band_sel[j];
vco = bclk >> half_rate_mode;
vco *= ratio_mult;
vco_freq[vco_freq_index++] = vco;
}
}
for (i = 0; i < 60; i++) {
u64 vco_tmp = vco_freq[i];
if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
(vco_tmp <= vco_optimal)) {
vco_optimal = vco_tmp;
vco_optimal_index = i;
}
}
if (vco_optimal_index == -1) {
if (!half_rate_mode) {
half_rate_mode = 1;
goto retry;
}
} else {
pd->vco_freq = vco_optimal;
pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
pd->vco_ratio = ratio[vco_optimal_index / 4];
pd->hsclk_divsel = hs_divsel[vco_optimal_index / 4];
return 0;
}
return -EINVAL;
}
static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk,
struct hdmi_8996_phy_pll_reg_cfg *cfg)
{
struct hdmi_8996_post_divider pd;
u64 bclk;
u64 tmds_clk;
u64 dec_start;
u64 frac_start;
u64 fdata;
u32 pll_divisor;
u32 rem;
u32 cpctrl;
u32 rctrl;
u32 cctrl;
u32 integloop_gain;
u32 pll_cmp;
int i, ret;
/* bit clk = 10 * pix_clk */
bclk = ((u64)pix_clk) * 10;
if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
tmds_clk = pix_clk >> 2;
else
tmds_clk = pix_clk;
ret = pll_get_post_div(&pd, bclk);
if (ret)
return ret;
dec_start = pd.vco_freq;
pll_divisor = 4 * ref_clk;
do_div(dec_start, pll_divisor);
frac_start = pd.vco_freq * (1 << 20);
rem = do_div(frac_start, pll_divisor);
frac_start -= dec_start * (1 << 20);
if (rem > (pll_divisor >> 1))
frac_start++;
cpctrl = pll_get_cpctrl(frac_start, ref_clk, false);
rctrl = pll_get_rctrl(frac_start, false);
cctrl = pll_get_cctrl(frac_start, false);
integloop_gain = pll_get_integloop_gain(frac_start, bclk,
ref_clk, false);
fdata = pd.vco_freq;
do_div(fdata, pd.vco_ratio);
pll_cmp = pll_get_pll_cmp(fdata, ref_clk);
DBG("VCO freq: %llu", pd.vco_freq);
DBG("fdata: %llu", fdata);
DBG("pix_clk: %lu", pix_clk);
DBG("tmds clk: %llu", tmds_clk);
DBG("HSCLK_SEL: %d", pd.hsclk_divsel);
DBG("DEC_START: %llu", dec_start);
DBG("DIV_FRAC_START: %llu", frac_start);
DBG("PLL_CPCTRL: %u", cpctrl);
DBG("PLL_RCTRL: %u", rctrl);
DBG("PLL_CCTRL: %u", cctrl);
DBG("INTEGLOOP_GAIN: %u", integloop_gain);
DBG("TX_BAND: %d", pd.tx_band_sel);
DBG("PLL_CMP: %u", pll_cmp);
/* Convert these values to register specific values */
if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
cfg->com_svs_mode_clk_sel = 1;
else
cfg->com_svs_mode_clk_sel = 2;
cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel);
cfg->com_pll_cctrl_mode0 = cctrl;
cfg->com_pll_rctrl_mode0 = rctrl;
cfg->com_cp_ctrl_mode0 = cpctrl;
cfg->com_dec_start_mode0 = dec_start;
cfg->com_div_frac_start1_mode0 = (frac_start & 0xff);
cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8);
cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16);
cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff);
cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8);
cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff);
cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8);
cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
cfg->com_lock_cmp_en = 0x0;
cfg->com_core_clk_en = 0x2c;
cfg->com_coreclk_div = HDMI_CORECLK_DIV;
cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
cfg->com_vco_tune_ctrl = 0x0;
cfg->tx_lx_lane_mode[0] =
cfg->tx_lx_lane_mode[2] = 0x43;
cfg->tx_lx_hp_pd_enables[0] =
cfg->tx_lx_hp_pd_enables[1] =
cfg->tx_lx_hp_pd_enables[2] = 0x0c;
cfg->tx_lx_hp_pd_enables[3] = 0x3;
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
cfg->tx_lx_tx_band[i] = pd.tx_band_sel + 4;
if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
cfg->tx_lx_tx_drv_lvl[0] =
cfg->tx_lx_tx_drv_lvl[1] =
cfg->tx_lx_tx_drv_lvl[2] = 0x25;
cfg->tx_lx_tx_drv_lvl[3] = 0x22;
cfg->tx_lx_tx_emp_post1_lvl[0] =
cfg->tx_lx_tx_emp_post1_lvl[1] =
cfg->tx_lx_tx_emp_post1_lvl[2] = 0x23;
cfg->tx_lx_tx_emp_post1_lvl[3] = 0x27;
cfg->tx_lx_vmode_ctrl1[0] =
cfg->tx_lx_vmode_ctrl1[1] =
cfg->tx_lx_vmode_ctrl1[2] =
cfg->tx_lx_vmode_ctrl1[3] = 0x00;
cfg->tx_lx_vmode_ctrl2[0] =
cfg->tx_lx_vmode_ctrl2[1] =
cfg->tx_lx_vmode_ctrl2[2] = 0x0D;
cfg->tx_lx_vmode_ctrl2[3] = 0x00;
} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
cfg->tx_lx_tx_drv_lvl[i] = 0x25;
cfg->tx_lx_tx_emp_post1_lvl[i] = 0x23;
cfg->tx_lx_vmode_ctrl1[i] = 0x00;
}
cfg->tx_lx_vmode_ctrl2[0] =
cfg->tx_lx_vmode_ctrl2[1] =
cfg->tx_lx_vmode_ctrl2[2] = 0x0D;
cfg->tx_lx_vmode_ctrl2[3] = 0x00;
} else {
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
cfg->tx_lx_tx_drv_lvl[i] = 0x20;
cfg->tx_lx_tx_emp_post1_lvl[i] = 0x20;
cfg->tx_lx_vmode_ctrl1[i] = 0x00;
cfg->tx_lx_vmode_ctrl2[i] = 0x0E;
}
}
DBG("com_svs_mode_clk_sel = 0x%x", cfg->com_svs_mode_clk_sel);
DBG("com_hsclk_sel = 0x%x", cfg->com_hsclk_sel);
DBG("com_lock_cmp_en = 0x%x", cfg->com_lock_cmp_en);
DBG("com_pll_cctrl_mode0 = 0x%x", cfg->com_pll_cctrl_mode0);
DBG("com_pll_rctrl_mode0 = 0x%x", cfg->com_pll_rctrl_mode0);
DBG("com_cp_ctrl_mode0 = 0x%x", cfg->com_cp_ctrl_mode0);
DBG("com_dec_start_mode0 = 0x%x", cfg->com_dec_start_mode0);
DBG("com_div_frac_start1_mode0 = 0x%x", cfg->com_div_frac_start1_mode0);
DBG("com_div_frac_start2_mode0 = 0x%x", cfg->com_div_frac_start2_mode0);
DBG("com_div_frac_start3_mode0 = 0x%x", cfg->com_div_frac_start3_mode0);
DBG("com_integloop_gain0_mode0 = 0x%x", cfg->com_integloop_gain0_mode0);
DBG("com_integloop_gain1_mode0 = 0x%x", cfg->com_integloop_gain1_mode0);
DBG("com_lock_cmp1_mode0 = 0x%x", cfg->com_lock_cmp1_mode0);
DBG("com_lock_cmp2_mode0 = 0x%x", cfg->com_lock_cmp2_mode0);
DBG("com_lock_cmp3_mode0 = 0x%x", cfg->com_lock_cmp3_mode0);
DBG("com_core_clk_en = 0x%x", cfg->com_core_clk_en);
DBG("com_coreclk_div = 0x%x", cfg->com_coreclk_div);
DBG("phy_mode = 0x%x", cfg->phy_mode);
DBG("tx_l0_lane_mode = 0x%x", cfg->tx_lx_lane_mode[0]);
DBG("tx_l2_lane_mode = 0x%x", cfg->tx_lx_lane_mode[2]);
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
DBG("tx_l%d_tx_band = 0x%x", i, cfg->tx_lx_tx_band[i]);
DBG("tx_l%d_tx_drv_lvl = 0x%x", i, cfg->tx_lx_tx_drv_lvl[i]);
DBG("tx_l%d_tx_emp_post1_lvl = 0x%x", i,
cfg->tx_lx_tx_emp_post1_lvl[i]);
DBG("tx_l%d_vmode_ctrl1 = 0x%x", i, cfg->tx_lx_vmode_ctrl1[i]);
DBG("tx_l%d_vmode_ctrl2 = 0x%x", i, cfg->tx_lx_vmode_ctrl2[i]);
}
return 0;
}
static int hdmi_8996_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
struct hdmi_phy *phy = pll_get_phy(pll);
struct hdmi_8996_phy_pll_reg_cfg cfg;
int i, ret;
memset(&cfg, 0x00, sizeof(cfg));
ret = pll_calculate(rate, parent_rate, &cfg);
if (ret) {
DRM_ERROR("PLL calculation failed\n");
return ret;
}
/* Initially shut down PHY */
DBG("Disabling PHY");
hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x0);
udelay(500);
/* Power up sequence */
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x04);
hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20);
hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL, 0x0F);
hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL, 0x0F);
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE,
0x03);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND,
cfg.tx_lx_tx_band[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN,
0x03);
}
hdmi_tx_chan_write(pll, 0, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE,
cfg.tx_lx_lane_mode[0]);
hdmi_tx_chan_write(pll, 2, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE,
cfg.tx_lx_lane_mode[2]);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E);
/* Bypass VCO calibration */
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL,
cfg.com_svs_mode_clk_sel);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_TRIM, 0x0F);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_IVCO, 0x0F);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL,
cfg.com_vco_tune_ctrl);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x06);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_SELECT, 0x30);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL,
cfg.com_hsclk_sel);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN,
cfg.com_lock_cmp_en);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0,
cfg.com_pll_cctrl_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0,
cfg.com_pll_rctrl_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0,
cfg.com_cp_ctrl_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0,
cfg.com_dec_start_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0,
cfg.com_div_frac_start1_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0,
cfg.com_div_frac_start2_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0,
cfg.com_div_frac_start3_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
cfg.com_integloop_gain0_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
cfg.com_integloop_gain1_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0,
cfg.com_lock_cmp1_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0,
cfg.com_lock_cmp2_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0,
cfg.com_lock_cmp3_mode0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN,
cfg.com_core_clk_en);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV,
cfg.com_coreclk_div);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG, 0x02);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM, 0x15);
/* TX lanes setup (TX 0/1/2/3) */
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL,
cfg.tx_lx_tx_drv_lvl[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL,
cfg.tx_lx_tx_emp_post1_lvl[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1,
cfg.tx_lx_vmode_ctrl1[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2,
cfg.tx_lx_vmode_ctrl2[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET,
0x00);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET,
0x00);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN,
0x03);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN,
0x40);
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES,
cfg.tx_lx_hp_pd_enables[i]);
}
hdmi_phy_write(phy, REG_HDMI_8996_PHY_MODE, cfg.phy_mode);
hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1F);
/*
* Ensure that vco configuration gets flushed to hardware before
* enabling the PLL
*/
wmb();
return 0;
}
static int hdmi_8996_phy_ready_status(struct hdmi_phy *phy)
{
u32 nb_tries = HDMI_PLL_POLL_MAX_READS;
unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
u32 status;
int phy_ready = 0;
DBG("Waiting for PHY ready");
while (nb_tries--) {
status = hdmi_phy_read(phy, REG_HDMI_8996_PHY_STATUS);
phy_ready = status & BIT(0);
if (phy_ready)
break;
udelay(timeout);
}
DBG("PHY is %sready", phy_ready ? "" : "*not* ");
return phy_ready;
}
static int hdmi_8996_pll_lock_status(struct hdmi_pll_8996 *pll)
{
u32 status;
int nb_tries = HDMI_PLL_POLL_MAX_READS;
unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
int pll_locked = 0;
DBG("Waiting for PLL lock");
while (nb_tries--) {
status = hdmi_pll_read(pll,
REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS);
pll_locked = status & BIT(0);
if (pll_locked)
break;
udelay(timeout);
}
DBG("HDMI PLL is %slocked", pll_locked ? "" : "*not* ");
return pll_locked;
}
static int hdmi_8996_pll_prepare(struct clk_hw *hw)
{
struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
struct hdmi_phy *phy = pll_get_phy(pll);
int i, ret = 0;
hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x1);
udelay(100);
hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19);
udelay(100);
ret = hdmi_8996_pll_lock_status(pll);
if (!ret)
return ret;
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
hdmi_tx_chan_write(pll, i,
REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
0x6F);
/* Disable SSC */
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER1, 0x0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER2, 0x0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1, 0x0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2, 0x0);
hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER, 0x2);
ret = hdmi_8996_phy_ready_status(phy);
if (!ret)
return ret;
/* Restart the retiming buffer */
hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x18);
udelay(1);
hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19);
return 0;
}
static long hdmi_8996_pll_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
{
if (rate < HDMI_PCLK_MIN_FREQ)
return HDMI_PCLK_MIN_FREQ;
else if (rate > HDMI_PCLK_MAX_FREQ)
return HDMI_PCLK_MAX_FREQ;
else
return rate;
}
static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
u64 fdata;
u32 cmp1, cmp2, cmp3, pll_cmp;
cmp1 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0);
cmp2 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0);
cmp3 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0);
pll_cmp = cmp1 | (cmp2 << 8) | (cmp3 << 16);
fdata = pll_cmp_to_fdata(pll_cmp + 1, parent_rate);
do_div(fdata, 10);
return fdata;
}
static void hdmi_8996_pll_unprepare(struct clk_hw *hw)
{
struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
struct hdmi_phy *phy = pll_get_phy(pll);
hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x6);
usleep_range(100, 150);
}
static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
{
struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
u32 status;
int pll_locked;
status = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS);
pll_locked = status & BIT(0);
return pll_locked;
}
static const struct clk_ops hdmi_8996_pll_ops = {
.set_rate = hdmi_8996_pll_set_clk_rate,
.round_rate = hdmi_8996_pll_round_rate,
.recalc_rate = hdmi_8996_pll_recalc_rate,
.prepare = hdmi_8996_pll_prepare,
.unprepare = hdmi_8996_pll_unprepare,
.is_enabled = hdmi_8996_pll_is_enabled,
};
static const struct clk_init_data pll_init = {
.name = "hdmipll",
.ops = &hdmi_8996_pll_ops,
.parent_data = (const struct clk_parent_data[]){
{ .fw_name = "xo", .name = "xo_board" },
},
.num_parents = 1,
.flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_pll_8996 *pll;
int i, ret;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
return -ENOMEM;
pll->pdev = pdev;
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll");
if (IS_ERR(pll->mmio_qserdes_com)) {
DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
char name[32];
snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name);
if (IS_ERR(pll->mmio_qserdes_tx[i])) {
DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
}
pll->clk_hw.init = &pll_init;
ret = devm_clk_hw_register(dev, &pll->clk_hw);
if (ret) {
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return ret;
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
if (ret) {
DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
return ret;
}
return 0;
}
static const char * const hdmi_phy_8996_reg_names[] = {
"vddio",
"vcca",
};
static const char * const hdmi_phy_8996_clk_names[] = {
"iface", "ref",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = {
.type = MSM_HDMI_PHY_8996,
.reg_names = hdmi_phy_8996_reg_names,
.num_regs = ARRAY_SIZE(hdmi_phy_8996_reg_names),
.clk_names = hdmi_phy_8996_clk_names,
.num_clks = ARRAY_SIZE(hdmi_phy_8996_clk_names),
};
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include "msm_kms.h"
#include "hdmi.h"
static void msm_hdmi_phy_reset(struct hdmi *hdmi)
{
unsigned int val;
val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
/* pull low */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val & ~HDMI_PHY_CTRL_SW_RESET);
} else {
/* pull high */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val | HDMI_PHY_CTRL_SW_RESET);
}
if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
/* pull low */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
} else {
/* pull high */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val | HDMI_PHY_CTRL_SW_RESET_PLL);
}
msleep(100);
if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
/* pull high */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val | HDMI_PHY_CTRL_SW_RESET);
} else {
/* pull low */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val & ~HDMI_PHY_CTRL_SW_RESET);
}
if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
/* pull high */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val | HDMI_PHY_CTRL_SW_RESET_PLL);
} else {
/* pull low */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
}
}
static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
{
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
int i, ret;
if (enable) {
for (i = 0; i < config->hpd_clk_cnt; i++) {
if (config->hpd_freq && config->hpd_freq[i]) {
ret = clk_set_rate(hdmi->hpd_clks[i],
config->hpd_freq[i]);
if (ret)
dev_warn(dev,
"failed to set clk %s (%d)\n",
config->hpd_clk_names[i], ret);
}
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
if (ret) {
DRM_DEV_ERROR(dev,
"failed to enable hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
}
}
} else {
for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
clk_disable_unprepare(hdmi->hpd_clks[i]);
}
}
int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
int ret;
unsigned long flags;
ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs);
if (ret) {
DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret);
goto fail;
}
ret = pinctrl_pm_select_default_state(dev);
if (ret) {
DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
goto fail;
}
if (hdmi->hpd_gpiod)
gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1);
pm_runtime_get_sync(dev);
enable_hpd_clocks(hdmi, true);
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_reset(hdmi);
msm_hdmi_set_mode(hdmi, true);
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
/* enable HPD events: */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
HDMI_HPD_INT_CTRL_INT_CONNECT |
HDMI_HPD_INT_CTRL_INT_EN);
/* set timeout to 4.1ms (max) for hardware debounce */
spin_lock_irqsave(&hdmi->reg_lock, flags);
hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
/* Toggle HPD circuit to trigger HPD sense */
hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
return 0;
fail:
return ret;
}
void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge)
{
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
int ret;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
msm_hdmi_set_mode(hdmi, false);
enable_hpd_clocks(hdmi, false);
pm_runtime_put(dev);
ret = pinctrl_pm_select_sleep_state(dev);
if (ret)
dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs);
if (ret)
dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl;
/* Process HPD: */
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
/* ack & disable (temporarily) HPD events: */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
HDMI_HPD_INT_CTRL_INT_ACK);
DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
/* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
if (!detected)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
queue_work(hdmi->workq, &hdmi_bridge->hpd_work);
}
}
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
uint32_t hpd_int_status;
pm_runtime_get_sync(&hdmi->pdev->dev);
enable_hpd_clocks(hdmi, true);
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
enable_hpd_clocks(hdmi, false);
pm_runtime_put(&hdmi->pdev->dev);
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
connector_status_connected : connector_status_disconnected;
}
#define HPD_GPIO_INDEX 2
static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
{
return gpiod_get_value(hdmi->hpd_gpiod) ?
connector_status_connected :
connector_status_disconnected;
}
enum drm_connector_status msm_hdmi_bridge_detect(
struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
enum drm_connector_status stat_gpio, stat_reg;
int retry = 20;
/*
* some platforms may not have hpd gpio. Rely only on the status
* provided by REG_HDMI_HPD_INT_STATUS in this case.
*/
if (!hdmi->hpd_gpiod)
return detect_reg(hdmi);
do {
stat_gpio = detect_gpio(hdmi);
stat_reg = detect_reg(hdmi);
if (stat_gpio == stat_reg)
break;
mdelay(10);
} while (--retry);
/* the status we get from reading gpio seems to be more reliable,
* so trust that one the most if we didn't manage to get hdmi and
* gpio status to agree:
*/
if (stat_gpio != stat_reg) {
DBG("HDMI_HPD_INT_STATUS tells us: %d", stat_reg);
DBG("hpd gpio tells us: %d", stat_gpio);
}
return stat_gpio;
}
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_hpd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
#include <linux/of.h>
#include <linux/platform_device.h>
#include "hdmi.h"
static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
{
struct hdmi_phy_cfg *cfg = phy->cfg;
struct device *dev = &phy->pdev->dev;
int i, ret;
phy->regs = devm_kcalloc(dev, cfg->num_regs, sizeof(phy->regs[0]),
GFP_KERNEL);
if (!phy->regs)
return -ENOMEM;
phy->clks = devm_kcalloc(dev, cfg->num_clks, sizeof(phy->clks[0]),
GFP_KERNEL);
if (!phy->clks)
return -ENOMEM;
for (i = 0; i < cfg->num_regs; i++)
phy->regs[i].supply = cfg->reg_names[i];
ret = devm_regulator_bulk_get(dev, cfg->num_regs, phy->regs);
if (ret) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "failed to get phy regulators: %d\n", ret);
return ret;
}
for (i = 0; i < cfg->num_clks; i++) {
struct clk *clk;
clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
cfg->clk_names[i], ret);
return ret;
}
phy->clks[i] = clk;
}
return 0;
}
int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
{
struct hdmi_phy_cfg *cfg = phy->cfg;
struct device *dev = &phy->pdev->dev;
int i, ret = 0;
pm_runtime_get_sync(dev);
ret = regulator_bulk_enable(cfg->num_regs, phy->regs);
if (ret) {
DRM_DEV_ERROR(dev, "failed to enable regulators: (%d)\n", ret);
return ret;
}
for (i = 0; i < cfg->num_clks; i++) {
ret = clk_prepare_enable(phy->clks[i]);
if (ret)
DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
cfg->clk_names[i], ret);
}
return ret;
}
void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy)
{
struct hdmi_phy_cfg *cfg = phy->cfg;
struct device *dev = &phy->pdev->dev;
int i;
for (i = cfg->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(phy->clks[i]);
regulator_bulk_disable(cfg->num_regs, phy->regs);
pm_runtime_put_sync(dev);
}
void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock)
{
if (!phy || !phy->cfg->powerup)
return;
phy->cfg->powerup(phy, pixclock);
}
void msm_hdmi_phy_powerdown(struct hdmi_phy *phy)
{
if (!phy || !phy->cfg->powerdown)
return;
phy->cfg->powerdown(phy);
}
static int msm_hdmi_phy_pll_init(struct platform_device *pdev,
enum hdmi_phy_type type)
{
int ret;
switch (type) {
case MSM_HDMI_PHY_8960:
ret = msm_hdmi_pll_8960_init(pdev);
break;
case MSM_HDMI_PHY_8996:
ret = msm_hdmi_pll_8996_init(pdev);
break;
/*
* we don't have PLL support for these, don't report an error for now
*/
case MSM_HDMI_PHY_8x60:
case MSM_HDMI_PHY_8x74:
default:
ret = 0;
break;
}
return ret;
}
static int msm_hdmi_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_phy *phy;
int ret;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENODEV;
phy->cfg = (struct hdmi_phy_cfg *)of_device_get_match_data(dev);
if (!phy->cfg)
return -ENODEV;
phy->mmio = msm_ioremap(pdev, "hdmi_phy");
if (IS_ERR(phy->mmio)) {
DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
return -ENOMEM;
}
phy->pdev = pdev;
ret = msm_hdmi_phy_resource_init(phy);
if (ret)
return ret;
pm_runtime_enable(&pdev->dev);
ret = msm_hdmi_phy_resource_enable(phy);
if (ret)
return ret;
ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
if (ret) {
DRM_DEV_ERROR(dev, "couldn't init PLL\n");
msm_hdmi_phy_resource_disable(phy);
return ret;
}
msm_hdmi_phy_resource_disable(phy);
platform_set_drvdata(pdev, phy);
return 0;
}
static int msm_hdmi_phy_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id msm_hdmi_phy_dt_match[] = {
{ .compatible = "qcom,hdmi-phy-8660",
.data = &msm_hdmi_phy_8x60_cfg },
{ .compatible = "qcom,hdmi-phy-8960",
.data = &msm_hdmi_phy_8960_cfg },
{ .compatible = "qcom,hdmi-phy-8974",
.data = &msm_hdmi_phy_8x74_cfg },
{ .compatible = "qcom,hdmi-phy-8084",
.data = &msm_hdmi_phy_8x74_cfg },
{ .compatible = "qcom,hdmi-phy-8996",
.data = &msm_hdmi_phy_8996_cfg },
{}
};
static struct platform_driver msm_hdmi_phy_platform_driver = {
.probe = msm_hdmi_phy_probe,
.remove = msm_hdmi_phy_remove,
.driver = {
.name = "msm_hdmi_phy",
.of_match_table = msm_hdmi_phy_dt_match,
},
};
void __init msm_hdmi_phy_driver_register(void)
{
platform_driver_register(&msm_hdmi_phy_platform_driver);
}
void __exit msm_hdmi_phy_driver_unregister(void)
{
platform_driver_unregister(&msm_hdmi_phy_platform_driver);
}
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_phy.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
*/
#include "hdmi.h"
#include <linux/firmware/qcom/qcom_scm.h>
#define HDCP_REG_ENABLE 0x01
#define HDCP_REG_DISABLE 0x00
#define HDCP_PORT_ADDR 0x74
#define HDCP_INT_STATUS_MASK ( \
HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \
HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \
HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \
HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT)
#define AUTH_WORK_RETRIES_TIME 100
#define AUTH_RETRIES_TIME 30
/* QFPROM Registers for HDMI/HDCP */
#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB 0x000000F8
#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB 0x000000FC
#define HDCP_KSV_LSB 0x000060D8
#define HDCP_KSV_MSB 0x000060DC
enum DS_TYPE { /* type of downstream device */
DS_UNKNOWN,
DS_RECEIVER,
DS_REPEATER,
};
enum hdmi_hdcp_state {
HDCP_STATE_NO_AKSV,
HDCP_STATE_INACTIVE,
HDCP_STATE_AUTHENTICATING,
HDCP_STATE_AUTHENTICATED,
HDCP_STATE_AUTH_FAILED
};
struct hdmi_hdcp_reg_data {
u32 reg_id;
u32 off;
char *name;
u32 reg_val;
};
struct hdmi_hdcp_ctrl {
struct hdmi *hdmi;
u32 auth_retries;
bool tz_hdcp;
enum hdmi_hdcp_state hdcp_state;
struct work_struct hdcp_auth_work;
struct work_struct hdcp_reauth_work;
#define AUTH_ABORT_EV 1
#define AUTH_RESULT_RDY_EV 2
unsigned long auth_event;
wait_queue_head_t auth_event_queue;
u32 ksv_fifo_w_index;
/*
* store aksv from qfprom
*/
u32 aksv_lsb;
u32 aksv_msb;
bool aksv_valid;
u32 ds_type;
u32 bksv_lsb;
u32 bksv_msb;
u8 dev_count;
u8 depth;
u8 ksv_list[5 * 127];
bool max_cascade_exceeded;
bool max_dev_exceeded;
};
static int msm_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
u8 *data, u16 data_len)
{
int rc;
int retry = 5;
struct i2c_msg msgs[] = {
{
.addr = addr >> 1,
.flags = 0,
.len = 1,
.buf = &offset,
}, {
.addr = addr >> 1,
.flags = I2C_M_RD,
.len = data_len,
.buf = data,
}
};
DBG("Start DDC read");
retry:
rc = i2c_transfer(hdmi->i2c, msgs, 2);
retry--;
if (rc == 2)
rc = 0;
else if (retry > 0)
goto retry;
else
rc = -EIO;
DBG("End DDC read %d", rc);
return rc;
}
#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
static int msm_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
u8 *data, u16 data_len)
{
int rc;
int retry = 10;
u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
struct i2c_msg msgs[] = {
{
.addr = addr >> 1,
.flags = 0,
.len = 1,
}
};
DBG("Start DDC write");
if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
pr_err("%s: write size too big\n", __func__);
return -ERANGE;
}
buf[0] = offset;
memcpy(&buf[1], data, data_len);
msgs[0].buf = buf;
msgs[0].len = data_len + 1;
retry:
rc = i2c_transfer(hdmi->i2c, msgs, 1);
retry--;
if (rc == 1)
rc = 0;
else if (retry > 0)
goto retry;
else
rc = -EIO;
DBG("End DDC write %d", rc);
return rc;
}
static int msm_hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
u32 *pdata, u32 count)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT];
u32 resp, phy_addr, idx = 0;
int i, ret = 0;
WARN_ON(!pdata || !preg || (count == 0));
if (hdcp_ctrl->tz_hdcp) {
phy_addr = (u32)hdmi->mmio_phy_addr;
while (count) {
memset(scm_buf, 0, sizeof(scm_buf));
for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT;
i++) {
scm_buf[i].addr = phy_addr + preg[idx];
scm_buf[i].val = pdata[idx];
idx++;
}
ret = qcom_scm_hdcp_req(scm_buf, i, &resp);
if (ret || resp) {
pr_err("%s: error: scm_call ret=%d resp=%u\n",
__func__, ret, resp);
ret = -EINVAL;
break;
}
count -= i;
}
} else {
for (i = 0; i < count; i++)
hdmi_write(hdmi, preg[i], pdata[i]);
}
return ret;
}
void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val, hdcp_int_status;
unsigned long flags;
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL);
hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK;
if (!hdcp_int_status) {
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
return;
}
/* Clear Interrupts */
reg_val |= hdcp_int_status << 1;
/* Clear AUTH_FAIL_INFO as well */
if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT)
reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK;
hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
DBG("hdcp irq %x", hdcp_int_status);
if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) {
pr_info("%s:AUTH_SUCCESS_INT received\n", __func__);
if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
wake_up_all(&hdcp_ctrl->auth_event_queue);
}
}
if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) {
reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
__func__, reg_val);
if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state)
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
else if (HDCP_STATE_AUTHENTICATING ==
hdcp_ctrl->hdcp_state) {
set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
wake_up_all(&hdcp_ctrl->auth_event_queue);
}
}
}
static int msm_hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
{
int rc;
rc = wait_event_timeout(hdcp_ctrl->auth_event_queue,
!!test_bit(ev, &hdcp_ctrl->auth_event),
msecs_to_jiffies(ms));
if (rc) {
pr_info("%s: msleep is canceled by event %d\n",
__func__, ev);
clear_bit(ev, &hdcp_ctrl->auth_event);
return -ECANCELED;
}
return 0;
}
static int msm_hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
/* Fetch aksv from QFPROM, this info should be public. */
hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB);
hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB);
/* check there are 20 ones in AKSV */
if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb))
!= 20) {
pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
__func__);
pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
__func__, hdcp_ctrl->aksv_msb,
hdcp_ctrl->aksv_lsb);
return -EINVAL;
}
DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb);
return 0;
}
static int msm_reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val, failure, nack0;
int rc = 0;
/* Check for any DDC transfer failures */
reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED;
nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0;
DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d",
reg_val, failure, nack0);
if (failure) {
/*
* Indicates that the last HDCP HW DDC transfer failed.
* This occurs when a transfer is attempted with HDCP DDC
* disabled (HDCP_DDC_DISABLE=1) or the number of retries
* matches HDCP_DDC_RETRY_CNT.
* Failure occurred, let's clear it.
*/
DBG("DDC failure detected");
/* First, Disable DDC */
hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0,
HDMI_HDCP_DDC_CTRL_0_DISABLE);
/* ACK the Failure to Clear it */
reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1);
reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK;
hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val);
/* Check if the FAILURE got Cleared */
reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED)
pr_info("%s: Unable to clear HDCP DDC Failure\n",
__func__);
/* Re-Enable HDCP DDC */
hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0);
}
if (nack0) {
DBG("Before: HDMI_DDC_SW_STATUS=0x%08x",
hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
/* Reset HDMI DDC software status */
reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
/* Reset HDMI DDC Controller */
reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
reg_val |= HDMI_DDC_CTRL_SOFT_RESET;
hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
/* If previous msleep is aborted, skip this msleep */
if (!rc)
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
DBG("After: HDMI_DDC_SW_STATUS=0x%08x",
hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
}
return rc;
}
static int msm_hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
u32 hdcp_ddc_status, ddc_hw_status;
u32 xfer_done, xfer_req, hw_done;
bool hw_not_ready;
u32 timeout_count;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0)
return 0;
/* Wait to be clean on DDC HW engine */
timeout_count = 100;
do {
hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE;
xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ;
hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE;
hw_not_ready = !xfer_done || xfer_req || !hw_done;
if (hw_not_ready)
break;
timeout_count--;
if (!timeout_count) {
pr_warn("%s: hw_ddc_clean failed\n", __func__);
return -ETIMEDOUT;
}
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
return 0;
}
static void msm_hdmi_hdcp_reauth_work(struct work_struct *work)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
struct hdmi_hdcp_ctrl, hdcp_reauth_work);
struct hdmi *hdmi = hdcp_ctrl->hdmi;
unsigned long flags;
u32 reg_val;
DBG("HDCP REAUTH WORK");
/*
* Disable HPD circuitry.
* This is needed to reset the HDCP cipher engine so that when we
* attempt a re-authentication, HW would clear the AN0_READY and
* AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
*/
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
reg_val &= ~HDMI_HPD_CTRL_ENABLE;
hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
/* Disable HDCP interrupts */
hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
/* Wait to be clean on DDC HW engine */
if (msm_hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
pr_info("%s: reauth work aborted\n", __func__);
return;
}
/* Disable encryption and disable the HDCP block */
hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
/* Enable HPD circuitry */
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
reg_val |= HDMI_HPD_CTRL_ENABLE;
hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
/*
* Only retry defined times then abort current authenticating process
*/
if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
hdcp_ctrl->auth_retries = 0;
pr_info("%s: abort reauthentication!\n", __func__);
return;
}
DBG("Queue AUTH WORK");
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
}
static int msm_hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 link0_status;
u32 reg_val;
unsigned long flags;
int rc;
if (!hdcp_ctrl->aksv_valid) {
rc = msm_hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
if (rc) {
pr_err("%s: ASKV validation failed\n", __func__);
hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
return -ENOTSUPP;
}
hdcp_ctrl->aksv_valid = true;
}
spin_lock_irqsave(&hdmi->reg_lock, flags);
/* disable HDMI Encrypt */
reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
reg_val &= ~HDMI_CTRL_ENCRYPTED;
hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
/* Enabling Software DDC */
reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION;
hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
/*
* Write AKSV read from QFPROM to the HDCP registers.
* This step is needed for HDCP authentication and must be
* written before enabling HDCP.
*/
hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb);
hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb);
/*
* HDCP setup prior to enabling HDCP_CTRL.
* Setup seed values for random number An.
*/
hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
/* Disable the RngCipher state */
reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL);
reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER;
hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val);
DBG("HDCP_DEBUG_CTRL=0x%08x",
hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL));
/*
* Ensure that all register writes are completed before
* enabling HDCP cipher
*/
wmb();
/*
* Enable HDCP
* This needs to be done as early as possible in order for the
* hardware to make An available to read
*/
hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE);
/*
* If we had stale values for the An ready bit, it should most
* likely be cleared now after enabling HDCP cipher
*/
link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
DBG("After enabling HDCP Link0_Status=0x%08x", link0_status);
if (!(link0_status &
(HDMI_HDCP_LINK0_STATUS_AN_0_READY |
HDMI_HDCP_LINK0_STATUS_AN_1_READY)))
DBG("An not ready after enabling HDCP");
/* Clear any DDC failures from previous tries before enable HDCP*/
rc = msm_reset_hdcp_ddc_failures(hdcp_ctrl);
return rc;
}
static void msm_hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val;
unsigned long flags;
DBG("hdcp auth failed, queue reauth work");
/* clear HDMI Encrypt */
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
reg_val &= ~HDMI_CTRL_ENCRYPTED;
hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED;
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
}
static void msm_hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val;
unsigned long flags;
/*
* Disable software DDC before going into part3 to make sure
* there is no Arbitration between software and hardware for DDC
*/
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION;
hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
/* enable HDMI Encrypt */
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
reg_val |= HDMI_CTRL_ENCRYPTED;
hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
hdcp_ctrl->auth_retries = 0;
}
/*
* hdcp authenticating part 1
* Wait Key/An ready
* Read BCAPS from sink
* Write BCAPS and AKSV into HDCP engine
* Write An and AKSV to sink
* Read BKSV from sink and write into HDCP engine
*/
static int msm_hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 link0_status, keys_state;
u32 timeout_count;
bool an_ready;
/* Wait for HDCP keys to be checked and validated */
timeout_count = 100;
do {
link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
keys_state = (link0_status >> 28) & 0x7;
if (keys_state == HDCP_KEYS_STATE_VALID)
break;
DBG("Keys not ready(%d). s=%d, l0=%0x08x",
timeout_count, keys_state, link0_status);
timeout_count--;
if (!timeout_count) {
pr_err("%s: Wait key state timedout", __func__);
return -ETIMEDOUT;
}
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
timeout_count = 100;
do {
link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY)
&& (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY);
if (an_ready)
break;
DBG("An not ready(%d). l0_status=0x%08x",
timeout_count, link0_status);
timeout_count--;
if (!timeout_count) {
pr_err("%s: Wait An timedout", __func__);
return -ETIMEDOUT;
}
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
return 0;
}
static int msm_hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc = 0;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 link0_aksv_0, link0_aksv_1;
u32 link0_an[2];
u8 aksv[5];
/* Read An0 and An1 */
link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5);
link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6);
/* Read AKSV */
link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3);
link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4);
DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1);
/* Copy An and AKSV to byte arrays for transmission */
aksv[0] = link0_aksv_0 & 0xFF;
aksv[1] = (link0_aksv_0 >> 8) & 0xFF;
aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
aksv[4] = link0_aksv_1 & 0xFF;
/* Write An to offset 0x18 */
rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
(u16)sizeof(link0_an));
if (rc) {
pr_err("%s:An write failed\n", __func__);
return rc;
}
DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
/* Write AKSV to offset 0x10 */
rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
if (rc) {
pr_err("%s:AKSV write failed\n", __func__);
return rc;
}
DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0);
return 0;
}
static int msm_hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc = 0;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u8 bksv[5];
u32 reg[2], data[2];
/* Read BKSV at offset 0x00 */
rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
if (rc) {
pr_err("%s:BKSV read failed\n", __func__);
return rc;
}
hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) |
(bksv[2] << 16) | (bksv[3] << 24);
hdcp_ctrl->bksv_msb = bksv[4];
DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb);
/* check there are 20 ones in BKSV */
if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb))
!= 20) {
pr_err(": BKSV doesn't have 20 1's and 20 0's\n");
pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
return -EINVAL;
}
/* Write BKSV read from sink to HDCP registers */
reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0;
data[0] = hdcp_ctrl->bksv_lsb;
reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
data[1] = hdcp_ctrl->bksv_msb;
rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
return rc;
}
static int msm_hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc = 0;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg, data;
u8 bcaps;
rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
if (rc) {
pr_err("%s:BCAPS read failed\n", __func__);
return rc;
}
DBG("BCAPS=%02x", bcaps);
/* receiver (0), repeater (1) */
hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER;
/* Write BCAPS to the hardware */
reg = REG_HDMI_HDCP_RCVPORT_DATA12;
data = (u32)bcaps;
rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1);
return rc;
}
static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
unsigned long flags;
int rc;
/* Wait for AKSV key and An ready */
rc = msm_hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
if (rc) {
pr_err("%s: wait key and an ready failed\n", __func__);
return rc;
}
/* Read BCAPS and send to HDCP engine */
rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl);
if (rc) {
pr_err("%s: read bcaps error, abort\n", __func__);
return rc;
}
/*
* 1.1_Features turned off by default.
* No need to write AInfo since 1.1_Features is disabled.
*/
hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
/* Send AKSV and An to sink */
rc = msm_hdmi_hdcp_send_aksv_an(hdcp_ctrl);
if (rc) {
pr_err("%s:An/Aksv write failed\n", __func__);
return rc;
}
/* Read BKSV and send to HDCP engine*/
rc = msm_hdmi_hdcp_recv_bksv(hdcp_ctrl);
if (rc) {
pr_err("%s:BKSV Process failed\n", __func__);
return rc;
}
/* Enable HDCP interrupts and ack/clear any stale interrupts */
spin_lock_irqsave(&hdmi->reg_lock, flags);
hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL,
HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK |
HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK |
HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK |
HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK |
HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
return 0;
}
/* read R0' from sink and pass it to HDCP engine */
static int msm_hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
int rc = 0;
u8 buf[2];
/*
* HDCP Compliance Test case 1A-01:
* Wait here at least 100ms before reading R0'
*/
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
if (rc)
return rc;
/* Read R0' at offset 0x08 */
rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
if (rc) {
pr_err("%s:R0' read failed\n", __func__);
return rc;
}
DBG("R0'=%02x%02x", buf[1], buf[0]);
/* Write R0' to HDCP registers and check to see if it is a match */
hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0,
(((u32)buf[1]) << 8) | buf[0]);
return 0;
}
/* Wait for authenticating result: R0/R0' are matched or not */
static int msm_hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 link0_status;
int rc;
/* wait for hdcp irq, 10 sec should be long enough */
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
if (!rc) {
pr_err("%s: Wait Auth IRQ timeout\n", __func__);
return -ETIMEDOUT;
}
link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) {
pr_err("%s: Authentication Part I failed\n", __func__);
return -EINVAL;
}
/* Enable HDCP Encryption */
hdmi_write(hdmi, REG_HDMI_HDCP_CTRL,
HDMI_HDCP_CTRL_ENABLE |
HDMI_HDCP_CTRL_ENCRYPTION_ENABLE);
return 0;
}
static int msm_hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
u16 *pbstatus)
{
int rc;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
bool max_devs_exceeded = false, max_cascade_exceeded = false;
u32 repeater_cascade_depth = 0, down_stream_devices = 0;
u16 bstatus;
u8 buf[2];
/* Read BSTATUS at offset 0x41 */
rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
if (rc) {
pr_err("%s: BSTATUS read failed\n", __func__);
goto error;
}
*pbstatus = bstatus = (buf[1] << 8) | buf[0];
down_stream_devices = bstatus & 0x7F;
repeater_cascade_depth = (bstatus >> 8) & 0x7;
max_devs_exceeded = (bstatus & BIT(7)) ? true : false;
max_cascade_exceeded = (bstatus & BIT(11)) ? true : false;
if (down_stream_devices == 0) {
/*
* If no downstream devices are attached to the repeater
* then part II fails.
* todo: The other approach would be to continue PART II.
*/
pr_err("%s: No downstream devices\n", __func__);
rc = -EINVAL;
goto error;
}
/*
* HDCP Compliance 1B-05:
* Check if no. of devices connected to repeater
* exceed max_devices_connected from bit 7 of Bstatus.
*/
if (max_devs_exceeded) {
pr_err("%s: no. of devs connected exceeds max allowed",
__func__);
rc = -EINVAL;
goto error;
}
/*
* HDCP Compliance 1B-06:
* Check if no. of cascade connected to repeater
* exceed max_cascade_connected from bit 11 of Bstatus.
*/
if (max_cascade_exceeded) {
pr_err("%s: no. of cascade conn exceeds max allowed",
__func__);
rc = -EINVAL;
goto error;
}
error:
hdcp_ctrl->dev_count = down_stream_devices;
hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded;
hdcp_ctrl->max_dev_exceeded = max_devs_exceeded;
hdcp_ctrl->depth = repeater_cascade_depth;
return rc;
}
static int msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg, data;
u32 timeout_count;
u16 bstatus;
u8 bcaps;
/*
* Wait until READY bit is set in BCAPS, as per HDCP specifications
* maximum permitted time to check for READY bit is five seconds.
*/
timeout_count = 100;
do {
/* Read BCAPS at offset 0x40 */
rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
if (rc) {
pr_err("%s: BCAPS read failed\n", __func__);
return rc;
}
if (bcaps & BIT(5))
break;
timeout_count--;
if (!timeout_count) {
pr_err("%s: Wait KSV fifo ready timedout", __func__);
return -ETIMEDOUT;
}
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
rc = msm_hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
if (rc) {
pr_err("%s: bstatus error\n", __func__);
return rc;
}
/* Write BSTATUS and BCAPS to HDCP registers */
reg = REG_HDMI_HDCP_RCVPORT_DATA12;
data = bcaps | (bstatus << 8);
rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1);
if (rc) {
pr_err("%s: BSTATUS write failed\n", __func__);
return rc;
}
return 0;
}
/*
* hdcp authenticating part 2: 2nd
* read ksv fifo from sink
* transfer V' from sink to HDCP engine
* reset SHA engine
*/
static int msm_hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
int rc = 0;
struct hdmi_hdcp_reg_data reg_data[] = {
{REG_HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"},
{REG_HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"},
{REG_HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"},
{REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
{REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
};
struct hdmi_hdcp_reg_data *rd;
u32 size = ARRAY_SIZE(reg_data);
u32 reg[ARRAY_SIZE(reg_data)];
u32 data[ARRAY_SIZE(reg_data)];
int i;
for (i = 0; i < size; i++) {
rd = ®_data[i];
rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
if (rc) {
pr_err("%s: Read %s failed\n", __func__, rd->name);
goto error;
}
DBG("%s =%x", rd->name, data[i]);
reg[i] = reg_data[i].reg_id;
}
rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
error:
return rc;
}
static int msm_hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 ksv_bytes;
ksv_bytes = 5 * hdcp_ctrl->dev_count;
rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
hdcp_ctrl->ksv_list, ksv_bytes);
if (rc)
pr_err("%s: KSV FIFO read failed\n", __func__);
return rc;
}
static int msm_hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
u32 reg[2], data[2];
u32 rc = 0;
reg[0] = REG_HDMI_HDCP_SHA_CTRL;
data[0] = HDCP_REG_ENABLE;
reg[1] = REG_HDMI_HDCP_SHA_CTRL;
data[1] = HDCP_REG_DISABLE;
rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
return rc;
}
static int msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(
struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
u32 timeout_count;
/*
* Read KSV FIFO over DDC
* Key Selection vector FIFO Used to pull downstream KSVs
* from HDCP Repeaters.
* All bytes (DEVICE_COUNT * 5) must be read in a single,
* auto incrementing access.
* All bytes read as 0x00 for HDCP Receivers that are not
* HDCP Repeaters (REPEATER == 0).
*/
timeout_count = 100;
do {
rc = msm_hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
if (!rc)
break;
timeout_count--;
if (!timeout_count) {
pr_err("%s: Recv ksv fifo timedout", __func__);
return -ETIMEDOUT;
}
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
rc = msm_hdmi_hdcp_transfer_v_h(hdcp_ctrl);
if (rc) {
pr_err("%s: transfer V failed\n", __func__);
return rc;
}
/* reset SHA engine before write ksv fifo */
rc = msm_hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
if (rc) {
pr_err("%s: fail to reset sha engine\n", __func__);
return rc;
}
return 0;
}
/*
* Write KSV FIFO to HDCP_SHA_DATA.
* This is done 1 byte at time starting with the LSB.
* Once 64 bytes have been written, we need to poll for
* HDCP_SHA_BLOCK_DONE before writing any further
* If the last byte is written, we need to poll for
* HDCP_SHA_COMP_DONE to wait until HW finish
*/
static int msm_hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int i;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 ksv_bytes, last_byte = 0;
u8 *ksv_fifo = NULL;
u32 reg_val, data, reg;
u32 rc = 0;
ksv_bytes = 5 * hdcp_ctrl->dev_count;
/* Check if need to wait for HW completion */
if (hdcp_ctrl->ksv_fifo_w_index) {
reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS);
DBG("HDCP_SHA_STATUS=%08x", reg_val);
if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) {
/* check COMP_DONE if last write */
if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) {
DBG("COMP_DONE");
return 0;
} else {
return -EAGAIN;
}
} else {
/* check BLOCK_DONE if not last write */
if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE))
return -EAGAIN;
DBG("BLOCK_DONE");
}
}
ksv_bytes -= hdcp_ctrl->ksv_fifo_w_index;
if (ksv_bytes <= 64)
last_byte = 1;
else
ksv_bytes = 64;
ksv_fifo = hdcp_ctrl->ksv_list;
ksv_fifo += hdcp_ctrl->ksv_fifo_w_index;
for (i = 0; i < ksv_bytes; i++) {
/* Write KSV byte and set DONE bit[0] for last byte*/
reg_val = ksv_fifo[i] << 16;
if ((i == (ksv_bytes - 1)) && last_byte)
reg_val |= HDMI_HDCP_SHA_DATA_DONE;
reg = REG_HDMI_HDCP_SHA_DATA;
data = reg_val;
rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1);
if (rc)
return rc;
}
hdcp_ctrl->ksv_fifo_w_index += ksv_bytes;
/*
*return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE
*/
return -EAGAIN;
}
/* write ksv fifo into HDCP engine */
static int msm_hdmi_hdcp_auth_part2_write_ksv_fifo(
struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
u32 timeout_count;
hdcp_ctrl->ksv_fifo_w_index = 0;
timeout_count = 100;
do {
rc = msm_hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
if (!rc)
break;
if (rc != -EAGAIN)
return rc;
timeout_count--;
if (!timeout_count) {
pr_err("%s: Write KSV fifo timedout", __func__);
return -ETIMEDOUT;
}
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
return 0;
}
static int msm_hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc = 0;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 link0_status;
u32 timeout_count = 100;
do {
link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES)
break;
timeout_count--;
if (!timeout_count) {
pr_err("%s: HDCP V Match timedout", __func__);
return -ETIMEDOUT;
}
rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
return 0;
}
static void msm_hdmi_hdcp_auth_work(struct work_struct *work)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
struct hdmi_hdcp_ctrl, hdcp_auth_work);
int rc;
rc = msm_hdmi_hdcp_auth_prepare(hdcp_ctrl);
if (rc) {
pr_err("%s: auth prepare failed %d\n", __func__, rc);
goto end;
}
/* HDCP PartI */
rc = msm_hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
if (rc) {
pr_err("%s: key exchange failed %d\n", __func__, rc);
goto end;
}
rc = msm_hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
if (rc) {
pr_err("%s: receive r0 failed %d\n", __func__, rc);
goto end;
}
rc = msm_hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
if (rc) {
pr_err("%s: verify r0 failed %d\n", __func__, rc);
goto end;
}
pr_info("%s: Authentication Part I successful\n", __func__);
if (hdcp_ctrl->ds_type == DS_RECEIVER)
goto end;
/* HDCP PartII */
rc = msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
if (rc) {
pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
goto end;
}
rc = msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
if (rc) {
pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
goto end;
}
rc = msm_hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
if (rc) {
pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
goto end;
}
rc = msm_hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
if (rc)
pr_err("%s: check v match failed %d\n", __func__, rc);
end:
if (rc == -ECANCELED) {
pr_info("%s: hdcp authentication canceled\n", __func__);
} else if (rc == -ENOTSUPP) {
pr_info("%s: hdcp is not supported\n", __func__);
} else if (rc) {
pr_err("%s: hdcp authentication failed\n", __func__);
msm_hdmi_hdcp_auth_fail(hdcp_ctrl);
} else {
msm_hdmi_hdcp_auth_done(hdcp_ctrl);
}
}
void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val;
unsigned long flags;
if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) ||
(HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
DBG("still active or activating or no askv. returning");
return;
}
/* clear HDMI Encrypt */
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
reg_val &= ~HDMI_CTRL_ENCRYPTED;
hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
hdcp_ctrl->auth_event = 0;
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
hdcp_ctrl->auth_retries = 0;
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
}
void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
unsigned long flags;
u32 reg_val;
if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) ||
(HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
DBG("hdcp inactive or no aksv. returning");
return;
}
/*
* Disable HPD circuitry.
* This is needed to reset the HDCP cipher engine so that when we
* attempt a re-authentication, HW would clear the AN0_READY and
* AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
*/
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
reg_val &= ~HDMI_HPD_CTRL_ENABLE;
hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
/*
* Disable HDCP interrupts.
* Also, need to set the state to inactive here so that any ongoing
* reauth works will know that the HDCP session has been turned off.
*/
hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
/*
* Cancel any pending auth/reauth attempts.
* If one is ongoing, this will wait for it to finish.
* No more reauthentication attempts will be scheduled since we
* set the current state to inactive.
*/
set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event);
wake_up_all(&hdcp_ctrl->auth_event_queue);
cancel_work_sync(&hdcp_ctrl->hdcp_auth_work);
cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work);
hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
/* Disable encryption and disable the HDCP block */
hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
spin_lock_irqsave(&hdmi->reg_lock, flags);
reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
reg_val &= ~HDMI_CTRL_ENCRYPTED;
hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
/* Enable HPD circuitry */
reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
reg_val |= HDMI_HPD_CTRL_ENABLE;
hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
DBG("HDCP: Off");
}
struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
if (!hdmi->qfprom_mmio) {
pr_err("%s: HDCP is not supported without qfprom\n",
__func__);
return ERR_PTR(-EINVAL);
}
hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
if (!hdcp_ctrl)
return ERR_PTR(-ENOMEM);
INIT_WORK(&hdcp_ctrl->hdcp_auth_work, msm_hdmi_hdcp_auth_work);
INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, msm_hdmi_hdcp_reauth_work);
init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
hdcp_ctrl->hdmi = hdmi;
hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
hdcp_ctrl->aksv_valid = false;
if (qcom_scm_hdcp_available())
hdcp_ctrl->tz_hdcp = true;
else
hdcp_ctrl->tz_hdcp = false;
return hdcp_ctrl;
}
void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
{
if (hdmi) {
kfree(hdmi->hdcp_ctrl);
hdmi->hdcp_ctrl = NULL;
}
}
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "hdmi.h"
struct hdmi_i2c_adapter {
struct i2c_adapter base;
struct hdmi *hdmi;
bool sw_done;
wait_queue_head_t ddc_event;
};
#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
{
struct hdmi *hdmi = hdmi_i2c->hdmi;
hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
HDMI_DDC_CTRL_SW_STATUS_RESET);
hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
HDMI_DDC_CTRL_SOFT_RESET);
hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
HDMI_DDC_SPEED_THRESHOLD(2) |
HDMI_DDC_SPEED_PRESCALE(10));
hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
HDMI_DDC_SETUP_TIMEOUT(0xff));
/* enable reference timer for 27us */
hdmi_write(hdmi, REG_HDMI_DDC_REF,
HDMI_DDC_REF_REFTIMER_ENABLE |
HDMI_DDC_REF_REFTIMER(27));
}
static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
{
struct hdmi *hdmi = hdmi_i2c->hdmi;
struct drm_device *dev = hdmi->dev;
uint32_t retry = 0xffff;
uint32_t ddc_int_ctrl;
do {
--retry;
hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
HDMI_DDC_INT_CTRL_SW_DONE_ACK |
HDMI_DDC_INT_CTRL_SW_DONE_MASK);
ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
if (!retry) {
DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
return -ETIMEDOUT;
}
hdmi_i2c->sw_done = false;
return 0;
}
#define MAX_TRANSACTIONS 4
static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
{
struct hdmi *hdmi = hdmi_i2c->hdmi;
if (!hdmi_i2c->sw_done) {
uint32_t ddc_int_ctrl;
ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
(ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
hdmi_i2c->sw_done = true;
hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
HDMI_DDC_INT_CTRL_SW_DONE_ACK);
}
}
return hdmi_i2c->sw_done;
}
static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
struct i2c_msg *msgs, int num)
{
struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
struct hdmi *hdmi = hdmi_i2c->hdmi;
struct drm_device *dev = hdmi->dev;
static const uint32_t nack[] = {
HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
};
int indices[MAX_TRANSACTIONS];
int ret, i, j, index = 0;
uint32_t ddc_status, ddc_data, i2c_trans;
num = min(num, MAX_TRANSACTIONS);
WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
if (num == 0)
return num;
init_ddc(hdmi_i2c);
ret = ddc_clear_irq(hdmi_i2c);
if (ret)
return ret;
for (i = 0; i < num; i++) {
struct i2c_msg *p = &msgs[i];
uint32_t raw_addr = p->addr << 1;
if (p->flags & I2C_M_RD)
raw_addr |= 1;
ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
if (i == 0) {
ddc_data |= HDMI_DDC_DATA_INDEX(0) |
HDMI_DDC_DATA_INDEX_WRITE;
}
hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
index++;
indices[i] = index;
if (p->flags & I2C_M_RD) {
index += p->len;
} else {
for (j = 0; j < p->len; j++) {
ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
index++;
}
}
i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
HDMI_I2C_TRANSACTION_REG_RW(
(p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
HDMI_I2C_TRANSACTION_REG_START;
if (i == (num - 1))
i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
}
/* trigger the transfer: */
hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
HDMI_DDC_CTRL_GO);
ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
if (ret <= 0) {
if (ret == 0)
ret = -ETIMEDOUT;
dev_warn(dev->dev, "DDC timeout: %d\n", ret);
DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
return ret;
}
ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
/* read back results of any read transactions: */
for (i = 0; i < num; i++) {
struct i2c_msg *p = &msgs[i];
if (!(p->flags & I2C_M_RD))
continue;
/* check for NACK: */
if (ddc_status & nack[i]) {
DBG("ddc_status=%08x", ddc_status);
break;
}
ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
HDMI_DDC_DATA_INDEX(indices[i]) |
HDMI_DDC_DATA_INDEX_WRITE;
hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
/* discard first byte: */
hdmi_read(hdmi, REG_HDMI_DDC_DATA);
for (j = 0; j < p->len; j++) {
ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
}
}
return i;
}
static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm msm_hdmi_i2c_algorithm = {
.master_xfer = msm_hdmi_i2c_xfer,
.functionality = msm_hdmi_i2c_func,
};
void msm_hdmi_i2c_irq(struct i2c_adapter *i2c)
{
struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
if (sw_done(hdmi_i2c))
wake_up_all(&hdmi_i2c->ddc_event);
}
void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c)
{
struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
i2c_del_adapter(i2c);
kfree(hdmi_i2c);
}
struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
{
struct hdmi_i2c_adapter *hdmi_i2c;
struct i2c_adapter *i2c = NULL;
int ret;
hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
if (!hdmi_i2c) {
ret = -ENOMEM;
goto fail;
}
i2c = &hdmi_i2c->base;
hdmi_i2c->hdmi = hdmi;
init_waitqueue_head(&hdmi_i2c->ddc_event);
i2c->owner = THIS_MODULE;
i2c->class = I2C_CLASS_DDC;
snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
i2c->dev.parent = &hdmi->pdev->dev;
i2c->algo = &msm_hdmi_i2c_algorithm;
ret = i2c_add_adapter(i2c);
if (ret)
goto fail;
return i2c;
fail:
if (i2c)
msm_hdmi_i2c_destroy(i2c);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "hdmi.h"
static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
DBG("pixclock: %lu", pixclock);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG0, 0x1b);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG1, 0xf2);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG4, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG5, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG6, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG7, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG8, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG9, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG10, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG11, 0x00);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG3, 0x20);
}
static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
{
DBG("");
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x7f);
}
static const char * const hdmi_phy_8960_reg_names[] = {
"core-vdda",
};
static const char * const hdmi_phy_8960_clk_names[] = {
"slave_iface",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = {
.type = MSM_HDMI_PHY_8960,
.powerup = hdmi_phy_8960_powerup,
.powerdown = hdmi_phy_8960_powerdown,
.reg_names = hdmi_phy_8960_reg_names,
.num_regs = ARRAY_SIZE(hdmi_phy_8960_reg_names),
.clk_names = hdmi_phy_8960_clk_names,
.num_clks = ARRAY_SIZE(hdmi_phy_8960_clk_names),
};
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/delay.h>
#include "hdmi.h"
static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
/* De-serializer delay D/C for non-lbk mode: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG0,
HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
if (pixclock == 27000000) {
/* video_format == HDMI_VFRMT_720x480p60_16_9 */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1,
HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
} else {
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1,
HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
}
/* No matter what, start from the power down mode: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
HDMI_8x60_PHY_REG2_PD_PWRGEN |
HDMI_8x60_PHY_REG2_PD_PLL |
HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
HDMI_8x60_PHY_REG2_PD_DESER);
/* Turn PowerGen on: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
HDMI_8x60_PHY_REG2_PD_PLL |
HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
HDMI_8x60_PHY_REG2_PD_DESER);
/* Turn PLL power on: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
HDMI_8x60_PHY_REG2_PD_DESER);
/* Write to HIGH after PLL power down de-assert: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3,
HDMI_8x60_PHY_REG3_PLL_ENABLE);
/* ASIC power on; PHY REG9 = 0 */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0);
/* Enable PLL lock detect, PLL lock det will go high after lock
* Enable the re-time logic
*/
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12,
HDMI_8x60_PHY_REG12_RETIMING_EN |
HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
/* Drivers are on: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
HDMI_8x60_PHY_REG2_PD_DESER);
/* If the RX detector is needed: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
HDMI_8x60_PHY_REG2_PD_DESER);
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG4, 0);
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG5, 0);
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG6, 0);
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG7, 0);
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG8, 0);
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0);
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG10, 0);
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG11, 0);
/* If we want to use lock enable based on counting: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12,
HDMI_8x60_PHY_REG12_RETIMING_EN |
HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
HDMI_8x60_PHY_REG12_FORCE_LOCK);
}
static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
{
/* Assert RESET PHY from controller */
hdmi_phy_write(phy, REG_HDMI_PHY_CTRL,
HDMI_PHY_CTRL_SW_RESET);
udelay(10);
/* De-assert RESET PHY from controller */
hdmi_phy_write(phy, REG_HDMI_PHY_CTRL, 0);
/* Turn off Driver */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
HDMI_8x60_PHY_REG2_PD_DESER);
udelay(10);
/* Disable PLL */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3, 0);
/* Power down PHY, but keep RX-sense: */
hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
HDMI_8x60_PHY_REG2_PD_PWRGEN |
HDMI_8x60_PHY_REG2_PD_PLL |
HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
HDMI_8x60_PHY_REG2_PD_DESER);
}
static const char * const hdmi_phy_8x60_reg_names[] = {
"core-vdda",
};
static const char * const hdmi_phy_8x60_clk_names[] = {
"slave_iface",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg = {
.type = MSM_HDMI_PHY_8x60,
.powerup = hdmi_phy_8x60_powerup,
.powerdown = hdmi_phy_8x60_powerdown,
.reg_names = hdmi_phy_8x60_reg_names,
.num_regs = ARRAY_SIZE(hdmi_phy_8x60_reg_names),
.clk_names = hdmi_phy_8x60_clk_names,
.num_clks = ARRAY_SIZE(hdmi_phy_8x60_clk_names),
};
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "hdmi.h"
static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG0, 0x1b);
hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG1, 0xf2);
hdmi_phy_write(phy, REG_HDMI_8x74_BIST_CFG0, 0x0);
hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN0, 0x0);
hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN1, 0x0);
hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN2, 0x0);
hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN3, 0x0);
hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL1, 0x20);
}
static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
{
hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL0, 0x7f);
}
static const char * const hdmi_phy_8x74_reg_names[] = {
"core-vdda",
"vddio",
};
static const char * const hdmi_phy_8x74_clk_names[] = {
"iface", "alt_iface"
};
const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = {
.type = MSM_HDMI_PHY_8x74,
.powerup = hdmi_phy_8x74_powerup,
.powerdown = hdmi_phy_8x74_powerdown,
.reg_names = hdmi_phy_8x74_reg_names,
.num_regs = ARRAY_SIZE(hdmi_phy_8x74_reg_names),
.clk_names = hdmi_phy_8x74_clk_names,
.num_clks = ARRAY_SIZE(hdmi_phy_8x74_clk_names),
};
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include "hdmi.h"
struct hdmi_pll_8960 {
struct platform_device *pdev;
struct clk_hw clk_hw;
void __iomem *mmio;
unsigned long pixclk;
};
#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8960, clk_hw)
/*
* HDMI PLL:
*
* To get the parent clock setup properly, we need to plug in hdmi pll
* configuration into common-clock-framework.
*/
struct pll_rate {
unsigned long rate;
int num_reg;
struct {
u32 val;
u32 reg;
} conf[32];
};
/* NOTE: keep sorted highest freq to lowest: */
static const struct pll_rate freqtbl[] = {
{ 154000000, 14, {
{ 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
{ 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
{ 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
{ 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
}
},
/* 1080p60/1080p50 case */
{ 148500000, 27, {
{ 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
{ 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
{ 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
}
},
{ 108000000, 13, {
{ 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
{ 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
}
},
/* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
{ 74250000, 8, {
{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
{ 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
}
},
{ 74176000, 14, {
{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
{ 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
{ 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
{ 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
}
},
{ 65000000, 14, {
{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
{ 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
{ 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
{ 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
}
},
/* 480p60/480i60 */
{ 27030000, 18, {
{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
{ 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
{ 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
{ 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
{ 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
}
},
/* 576p50/576i50 */
{ 27000000, 27, {
{ 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
{ 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
{ 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
}
},
/* 640x480p60 */
{ 25200000, 27, {
{ 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
{ 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
{ 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
{ 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
}
},
};
static inline void pll_write(struct hdmi_pll_8960 *pll, u32 reg, u32 data)
{
msm_writel(data, pll->mmio + reg);
}
static inline u32 pll_read(struct hdmi_pll_8960 *pll, u32 reg)
{
return msm_readl(pll->mmio + reg);
}
static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8960 *pll)
{
return platform_get_drvdata(pll->pdev);
}
static int hdmi_pll_enable(struct clk_hw *hw)
{
struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
struct hdmi_phy *phy = pll_get_phy(pll);
int timeout_count, pll_lock_retry = 10;
unsigned int val;
DBG("");
/* Assert PLL S/W reset */
pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
/* Wait for a short time before de-asserting
* to allow the hardware to complete its job.
* This much of delay should be fine for hardware
* to assert and de-assert.
*/
udelay(10);
/* De-assert PLL S/W reset */
pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
val |= HDMI_8960_PHY_REG12_SW_RESET;
/* Assert PHY S/W reset */
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
val &= ~HDMI_8960_PHY_REG12_SW_RESET;
/*
* Wait for a short time before de-asserting to allow the hardware to
* complete its job. This much of delay should be fine for hardware to
* assert and de-assert.
*/
udelay(10);
/* De-assert PHY S/W reset */
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x3f);
val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
val |= HDMI_8960_PHY_REG12_PWRDN_B;
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
/* Wait 10 us for enabling global power for PHY */
mb();
udelay(10);
val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B);
val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x80);
timeout_count = 1000;
while (--pll_lock_retry > 0) {
/* are we there yet? */
val = pll_read(pll, REG_HDMI_8960_PHY_PLL_STATUS0);
if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
break;
udelay(1);
if (--timeout_count > 0)
continue;
/*
* PLL has still not locked.
* Do a software reset and try again
* Assert PLL S/W reset first
*/
pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
udelay(10);
pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
/*
* Wait for a short duration for the PLL calibration
* before checking if the PLL gets locked
*/
udelay(350);
timeout_count = 1000;
}
return 0;
}
static void hdmi_pll_disable(struct clk_hw *hw)
{
struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
struct hdmi_phy *phy = pll_get_phy(pll);
unsigned int val;
DBG("");
val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B);
val |= HDMI_8960_PHY_REG12_SW_RESET;
val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
/* Make sure HDMI PHY/PLL are powered down */
mb();
}
static const struct pll_rate *find_rate(unsigned long rate)
{
int i;
for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
if (rate > freqtbl[i].rate)
return &freqtbl[i - 1];
return &freqtbl[i - 1];
}
static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
return pll->pixclk;
}
static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
const struct pll_rate *pll_rate = find_rate(rate);
return pll_rate->rate;
}
static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
const struct pll_rate *pll_rate = find_rate(rate);
int i;
DBG("rate=%lu", rate);
for (i = 0; i < pll_rate->num_reg; i++)
pll_write(pll, pll_rate->conf[i].reg, pll_rate->conf[i].val);
pll->pixclk = rate;
return 0;
}
static const struct clk_ops hdmi_pll_ops = {
.enable = hdmi_pll_enable,
.disable = hdmi_pll_disable,
.recalc_rate = hdmi_pll_recalc_rate,
.round_rate = hdmi_pll_round_rate,
.set_rate = hdmi_pll_set_rate,
};
static const struct clk_parent_data hdmi_pll_parents[] = {
{ .fw_name = "pxo", .name = "pxo_board" },
};
static struct clk_init_data pll_init = {
.name = "hdmi_pll",
.ops = &hdmi_pll_ops,
.parent_data = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
.flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8960_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_pll_8960 *pll;
int i, ret;
/* sanity check: */
for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
if (WARN_ON(freqtbl[i].rate < freqtbl[i + 1].rate))
return -EINVAL;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
return -ENOMEM;
pll->mmio = msm_ioremap(pdev, "hdmi_pll");
if (IS_ERR(pll->mmio)) {
DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
pll->pdev = pdev;
pll->clk_hw.init = &pll_init;
ret = devm_clk_hw_register(dev, &pll->clk_hw);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return ret;
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
if (ret) {
DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
return ret;
}
return 0;
}
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/hdmi.h>
#include "hdmi.h"
/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
static int nchannels[] = { 2, 4, 6, 8 };
/* Supported HDMI Audio sample rates */
#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
#define MSM_HDMI_SAMPLE_RATE_MAX 7
struct hdmi_msm_audio_acr {
uint32_t n; /* N parameter for clock regeneration */
uint32_t cts; /* CTS parameter for clock regeneration */
};
struct hdmi_msm_audio_arcs {
unsigned long int pixclock;
struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
};
#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
static const struct hdmi_msm_audio_arcs acr_lut[] = {
/* 25.200MHz */
HDMI_MSM_AUDIO_ARCS(25200, {
{4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
{12288, 25200}, {25088, 28000}, {24576, 25200} }),
/* 27.000MHz */
HDMI_MSM_AUDIO_ARCS(27000, {
{4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
{12288, 27000}, {25088, 30000}, {24576, 27000} }),
/* 27.027MHz */
HDMI_MSM_AUDIO_ARCS(27030, {
{4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
{12288, 27027}, {25088, 30030}, {24576, 27027} }),
/* 74.250MHz */
HDMI_MSM_AUDIO_ARCS(74250, {
{4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
{12288, 74250}, {25088, 82500}, {24576, 74250} }),
/* 148.500MHz */
HDMI_MSM_AUDIO_ARCS(148500, {
{4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
{12288, 148500}, {25088, 165000}, {24576, 148500} }),
};
static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
{
int i;
for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
if (arcs->pixclock == pixclock)
return arcs;
}
return NULL;
}
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
struct hdmi_audio_infoframe *info = &audio->infoframe;
const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
uint32_t infofrm_ctrl, audio_config;
DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
"level_shift_value=%d, downmix_inhibit=%d, rate=%d",
audio->enabled, info->channels, info->channel_allocation,
info->level_shift_value, info->downmix_inhibit, audio->rate);
DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
DBG("disabling audio: no video");
enabled = false;
}
if (enabled) {
arcs = get_arcs(hdmi->pixclock);
if (!arcs) {
DBG("disabling audio: unsupported pixclock: %lu",
hdmi->pixclock);
enabled = false;
}
}
/* Read first before writing */
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
/* Clear N/CTS selection bits */
acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SELECT__MASK;
if (enabled) {
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
uint8_t buf[14];
n = arcs->lut[audio->rate].n;
cts = arcs->lut[audio->rate].cts;
if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
(MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
multiplier = 4;
n >>= 2; /* divide N by 4 and use multiplier */
} else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
(MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
multiplier = 2;
n >>= 1; /* divide N by 2 and use multiplier */
} else {
multiplier = 1;
}
DBG("n=%u, cts=%u, multiplier=%u", n, cts, multiplier);
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SOURCE;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
(MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
(MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
select = ACR_48;
else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
(MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
(MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
select = ACR_44;
else /* default to 32k */
select = ACR_32;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SELECT(select);
hdmi_write(hdmi, REG_HDMI_ACR_0(select - 1),
HDMI_ACR_0_CTS(cts));
hdmi_write(hdmi, REG_HDMI_ACR_1(select - 1),
HDMI_ACR_1_N(n));
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
/* configure infoframe: */
hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
(buf[3] << 0) | (buf[4] << 8) |
(buf[5] << 16) | (buf[6] << 24));
hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
(buf[7] << 0) | (buf[8] << 8));
hdmi_write(hdmi, REG_HDMI_GC, 0);
vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
} else {
acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
}
hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
hdmi_write(hdmi, REG_HDMI_AUD_INT,
COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
COND(enabled, HDMI_AUD_INT_AUD_SAM_DROP_INT));
hdmi_write(hdmi, REG_HDMI_AUDIO_CFG, audio_config);
DBG("audio %sabled", enabled ? "en" : "dis");
return 0;
}
int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
uint32_t num_of_channels, uint32_t channel_allocation,
uint32_t level_shift, bool down_mix)
{
struct hdmi_audio *audio;
if (!hdmi)
return -ENXIO;
audio = &hdmi->audio;
if (num_of_channels >= ARRAY_SIZE(nchannels))
return -EINVAL;
audio->enabled = enabled;
audio->infoframe.channels = nchannels[num_of_channels];
audio->infoframe.channel_allocation = channel_allocation;
audio->infoframe.level_shift_value = level_shift;
audio->infoframe.downmix_inhibit = down_mix;
return msm_hdmi_audio_update(hdmi);
}
void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
{
struct hdmi_audio *audio;
if (!hdmi)
return;
audio = &hdmi->audio;
if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
return;
audio->rate = rate;
msm_hdmi_audio_update(hdmi);
}
| linux-master | drivers/gpu/drm/msm/hdmi/hdmi_audio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include "drm/drm_bridge_connector.h"
#include "msm_kms.h"
#include "dsi.h"
#define DSI_CLOCK_MASTER DSI_0
#define DSI_CLOCK_SLAVE DSI_1
#define DSI_LEFT DSI_0
#define DSI_RIGHT DSI_1
/* According to the current drm framework sequence, take the encoder of
* DSI_1 as master encoder
*/
#define DSI_ENCODER_MASTER DSI_1
#define DSI_ENCODER_SLAVE DSI_0
struct msm_dsi_manager {
struct msm_dsi *dsi[DSI_MAX];
bool is_bonded_dsi;
bool is_sync_needed;
int master_dsi_link_id;
};
static struct msm_dsi_manager msm_dsim_glb;
#define IS_BONDED_DSI() (msm_dsim_glb.is_bonded_dsi)
#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id)
static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
{
return msm_dsim_glb.dsi[id];
}
static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
{
return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
}
static int dsi_mgr_parse_of(struct device_node *np, int id)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
/* We assume 2 dsi nodes have the same information of bonded dsi and
* sync-mode, and only one node specifies master in case of bonded mode.
*/
if (!msm_dsim->is_bonded_dsi)
msm_dsim->is_bonded_dsi = of_property_read_bool(np, "qcom,dual-dsi-mode");
if (msm_dsim->is_bonded_dsi) {
if (of_property_read_bool(np, "qcom,master-dsi"))
msm_dsim->master_dsi_link_id = id;
if (!msm_dsim->is_sync_needed)
msm_dsim->is_sync_needed = of_property_read_bool(
np, "qcom,sync-dual-dsi");
}
return 0;
}
static int dsi_mgr_setup_components(int id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
int ret;
if (!IS_BONDED_DSI()) {
ret = msm_dsi_host_register(msm_dsi->host);
if (ret)
return ret;
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
} else if (other_dsi) {
struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
other_dsi : msm_dsi;
/* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master
* DSI device's probe.
* Also, do not check defer for the slave host,
* because only master DSI device adds the panel to global
* panel list. The panel's device is the master DSI device.
*/
ret = msm_dsi_host_register(slave_link_dsi->host);
if (ret)
return ret;
ret = msm_dsi_host_register(master_link_dsi->host);
if (ret)
return ret;
/* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
msm_dsi_phy_set_usecase(clk_master_dsi->phy,
MSM_DSI_PHY_MASTER);
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
}
return 0;
}
static int enable_phy(struct msm_dsi *msm_dsi,
struct msm_dsi_phy_shared_timings *shared_timings)
{
struct msm_dsi_phy_clk_request clk_req;
bool is_bonded_dsi = IS_BONDED_DSI();
msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi);
return msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
}
static int
dsi_mgr_phy_enable(int id,
struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX])
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
int ret;
/* In case of bonded DSI, some registers in PHY1 have been programmed
* during PLL0 clock's set_rate. The PHY1 reset called by host1 here
* will silently reset those PHY1 registers. Therefore we need to reset
* and enable both PHYs before any PLL clock operation.
*/
if (IS_BONDED_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_host_reset_phy(mdsi->host);
msm_dsi_host_reset_phy(sdsi->host);
ret = enable_phy(mdsi,
&shared_timings[DSI_CLOCK_MASTER]);
if (ret)
return ret;
ret = enable_phy(sdsi,
&shared_timings[DSI_CLOCK_SLAVE]);
if (ret) {
msm_dsi_phy_disable(mdsi->phy);
return ret;
}
}
} else {
msm_dsi_host_reset_phy(msm_dsi->host);
ret = enable_phy(msm_dsi, &shared_timings[id]);
if (ret)
return ret;
}
msm_dsi->phy_enabled = true;
return 0;
}
static void dsi_mgr_phy_disable(int id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
/* disable DSI phy
* In bonded dsi configuration, the phy should be disabled for the
* first controller only when the second controller is disabled.
*/
msm_dsi->phy_enabled = false;
if (IS_BONDED_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_phy_disable(sdsi->phy);
msm_dsi_phy_disable(mdsi->phy);
}
} else {
msm_dsi_phy_disable(msm_dsi->phy);
}
}
struct dsi_bridge {
struct drm_bridge base;
int id;
};
#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
{
struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
return dsi_bridge->id;
}
static void msm_dsi_manager_set_split_display(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct msm_drm_private *priv = msm_dsi->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_dsi *master_dsi, *slave_dsi;
if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
master_dsi = other_dsi;
slave_dsi = msm_dsi;
} else {
master_dsi = msm_dsi;
slave_dsi = other_dsi;
}
if (!msm_dsi->external_bridge || !IS_BONDED_DSI())
return;
/*
* Set split display info to kms once bonded DSI panel is connected to
* both hosts.
*/
if (other_dsi && other_dsi->external_bridge && kms->funcs->set_split_display) {
kms->funcs->set_split_display(kms, master_dsi->encoder,
slave_dsi->encoder,
msm_dsi_is_cmd_mode(msm_dsi));
}
}
static int dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
ret = dsi_mgr_phy_enable(id, phy_shared_timings);
if (ret)
goto phy_en_fail;
ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_bonded_dsi, msm_dsi->phy);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
}
if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host,
&phy_shared_timings[DSI_1], is_bonded_dsi, msm_dsi1->phy);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
goto host1_on_fail;
}
}
/*
* Enable before preparing the panel, disable after unpreparing, so
* that the panel can communicate over the DSI link.
*/
msm_dsi_host_enable_irq(host);
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_enable_irq(msm_dsi1->host);
return 0;
host1_on_fail:
msm_dsi_host_power_off(host);
host_on_fail:
dsi_mgr_phy_disable(id);
phy_en_fail:
return ret;
}
static void dsi_mgr_bridge_power_off(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
bool is_bonded_dsi = IS_BONDED_DSI();
msm_dsi_host_disable_irq(host);
if (is_bonded_dsi && msm_dsi1) {
msm_dsi_host_disable_irq(msm_dsi1->host);
msm_dsi_host_power_off(msm_dsi1->host);
}
msm_dsi_host_power_off(host);
dsi_mgr_phy_disable(id);
}
static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
/* Do nothing with the host if it is slave-DSI in case of bonded DSI */
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
ret = dsi_mgr_bridge_power_on(bridge);
if (ret) {
dev_err(&msm_dsi->pdev->dev, "Power on failed: %d\n", ret);
return;
}
ret = msm_dsi_host_enable(host);
if (ret) {
pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
goto host_en_fail;
}
if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_enable(msm_dsi1->host);
if (ret) {
pr_err("%s: enable host1 failed, %d\n", __func__, ret);
goto host1_en_fail;
}
}
return;
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
dsi_mgr_bridge_power_off(bridge);
}
void msm_dsi_manager_tpg_enable(void)
{
struct msm_dsi *m_dsi = dsi_mgr_get_dsi(DSI_0);
struct msm_dsi *s_dsi = dsi_mgr_get_dsi(DSI_1);
/* if dual dsi, trigger tpg on master first then slave */
if (m_dsi) {
msm_dsi_host_test_pattern_en(m_dsi->host);
if (IS_BONDED_DSI() && s_dsi)
msm_dsi_host_test_pattern_en(s_dsi->host);
}
}
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
/*
* Do nothing with the host if it is slave-DSI in case of bonded DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_disable(msm_dsi1->host);
if (ret)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
}
msm_dsi_host_disable_irq(host);
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_disable_irq(msm_dsi1->host);
/* Save PHY status if it is a clock source */
msm_dsi_phy_pll_save_state(msm_dsi->phy);
ret = msm_dsi_host_power_off(host);
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_power_off(msm_dsi1->host);
if (ret)
pr_err("%s: host1 power off failed, %d\n",
__func__, ret);
}
disable_phy:
dsi_mgr_phy_disable(id);
}
static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct mipi_dsi_host *host = msm_dsi->host;
bool is_bonded_dsi = IS_BONDED_DSI();
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
if (is_bonded_dsi && other_dsi)
msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
}
static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct mipi_dsi_host *host = msm_dsi->host;
struct platform_device *pdev = msm_dsi->pdev;
struct dev_pm_opp *opp;
unsigned long byte_clk_rate;
byte_clk_rate = dsi_byte_clk_get_rate(host, IS_BONDED_DSI(), mode);
opp = dev_pm_opp_find_freq_ceil(&pdev->dev, &byte_clk_rate);
if (!IS_ERR(opp)) {
dev_pm_opp_put(opp);
} else if (PTR_ERR(opp) == -ERANGE) {
/*
* An empty table is created by devm_pm_opp_set_clkname() even
* if there is none. Thus find_freq_ceil will still return
* -ERANGE in such case.
*/
if (dev_pm_opp_get_opp_count(&pdev->dev) != 0)
return MODE_CLOCK_RANGE;
} else {
return MODE_ERROR;
}
return msm_dsi_host_check_dsc(host, mode);
}
static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
.pre_enable = dsi_mgr_bridge_pre_enable,
.post_disable = dsi_mgr_bridge_post_disable,
.mode_set = dsi_mgr_bridge_mode_set,
.mode_valid = dsi_mgr_bridge_mode_valid,
};
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_bridge *bridge = NULL;
struct dsi_bridge *dsi_bridge;
struct drm_encoder *encoder;
int ret;
dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
sizeof(*dsi_bridge), GFP_KERNEL);
if (!dsi_bridge) {
ret = -ENOMEM;
goto fail;
}
dsi_bridge->id = id;
encoder = msm_dsi->encoder;
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
drm_bridge_add(bridge);
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto fail;
return bridge;
fail:
if (bridge)
msm_dsi_manager_bridge_destroy(bridge);
return ERR_PTR(ret);
}
int msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
int ret;
int_bridge = msm_dsi->bridge;
ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
msm_dsi->pdev->dev.of_node, 1, 0);
if (IS_ERR(ext_bridge))
return PTR_ERR(ext_bridge);
msm_dsi->external_bridge = ext_bridge;
encoder = msm_dsi->encoder;
/*
* Try first to create the bridge without it creating its own
* connector.. currently some bridges support this, and others
* do not (and some support both modes)
*/
ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret == -EINVAL) {
/*
* link the internal dsi bridge to the external bridge,
* connector is created by the next bridge.
*/
ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
if (ret < 0)
return ret;
} else {
struct drm_connector *connector;
/* We are in charge of the connector, create one now. */
connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(connector)) {
DRM_ERROR("Unable to create bridge connector\n");
return PTR_ERR(connector);
}
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
}
/* The pipeline is ready, ping encoders if necessary */
msm_dsi_manager_set_split_display(id);
return 0;
}
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
{
drm_bridge_remove(bridge);
}
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
struct mipi_dsi_host *host = msm_dsi->host;
bool is_read = (msg->rx_buf && msg->rx_len);
bool need_sync = (IS_SYNC_NEEDED() && !is_read);
int ret;
if (!msg->tx_buf || !msg->tx_len)
return 0;
/* In bonded master case, panel requires the same commands sent to
* both DSI links. Host issues the command trigger to both links
* when DSI_1 calls the cmd transfer function, no matter it happens
* before or after DSI_0 cmd transfer.
*/
if (need_sync && (id == DSI_0))
return is_read ? msg->rx_len : msg->tx_len;
if (need_sync && msm_dsi0) {
ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
if (ret) {
pr_err("%s: failed to prepare non-trigger host, %d\n",
__func__, ret);
return ret;
}
}
ret = msm_dsi_host_xfer_prepare(host, msg);
if (ret) {
pr_err("%s: failed to prepare host, %d\n", __func__, ret);
goto restore_host0;
}
ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
msm_dsi_host_cmd_tx(host, msg);
msm_dsi_host_xfer_restore(host, msg);
restore_host0:
if (need_sync && msm_dsi0)
msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
return ret;
}
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
struct mipi_dsi_host *host = msm_dsi->host;
if (IS_SYNC_NEEDED() && (id == DSI_0))
return false;
if (IS_SYNC_NEEDED() && msm_dsi0)
msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, dma_base, len);
msm_dsi_host_cmd_xfer_commit(host, dma_base, len);
return true;
}
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
int id = msm_dsi->id;
int ret;
if (id >= DSI_MAX) {
pr_err("%s: invalid id %d\n", __func__, id);
return -EINVAL;
}
if (msm_dsim->dsi[id]) {
pr_err("%s: dsi%d already registered\n", __func__, id);
return -EBUSY;
}
msm_dsim->dsi[id] = msm_dsi;
ret = dsi_mgr_parse_of(msm_dsi->pdev->dev.of_node, id);
if (ret) {
pr_err("%s: failed to parse OF DSI info\n", __func__);
goto fail;
}
ret = dsi_mgr_setup_components(id);
if (ret) {
pr_err("%s: failed to register mipi dsi host for DSI %d: %d\n",
__func__, id, ret);
goto fail;
}
return 0;
fail:
msm_dsim->dsi[id] = NULL;
return ret;
}
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
if (msm_dsi->host)
msm_dsi_host_unregister(msm_dsi->host);
if (msm_dsi->id >= 0)
msm_dsim->dsi[msm_dsi->id] = NULL;
}
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
{
return IS_BONDED_DSI();
}
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
{
return IS_MASTER_DSI_LINK(msm_dsi->id);
}
| linux-master | drivers/gpu/drm/msm/dsi/dsi_manager.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include "dsi.h"
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
{
unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
return !(host_flags & MIPI_DSI_MODE_VIDEO);
}
struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return msm_dsi_host_get_dsc_config(msm_dsi->host);
}
static int dsi_get_phy(struct msm_dsi *msm_dsi)
{
struct platform_device *pdev = msm_dsi->pdev;
struct platform_device *phy_pdev;
struct device_node *phy_node;
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
phy_pdev = of_find_device_by_node(phy_node);
if (phy_pdev) {
msm_dsi->phy = platform_get_drvdata(phy_pdev);
msm_dsi->phy_dev = &phy_pdev->dev;
}
of_node_put(phy_node);
if (!phy_pdev) {
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
if (!msm_dsi->phy) {
put_device(&phy_pdev->dev);
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
return 0;
}
static void dsi_destroy(struct msm_dsi *msm_dsi)
{
if (!msm_dsi)
return;
msm_dsi_manager_unregister(msm_dsi);
if (msm_dsi->phy_dev) {
put_device(msm_dsi->phy_dev);
msm_dsi->phy = NULL;
msm_dsi->phy_dev = NULL;
}
if (msm_dsi->host) {
msm_dsi_host_destroy(msm_dsi->host);
msm_dsi->host = NULL;
}
platform_set_drvdata(msm_dsi->pdev, NULL);
}
static struct msm_dsi *dsi_init(struct platform_device *pdev)
{
struct msm_dsi *msm_dsi;
int ret;
if (!pdev)
return ERR_PTR(-ENXIO);
msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
if (!msm_dsi)
return ERR_PTR(-ENOMEM);
DBG("dsi probed=%p", msm_dsi);
msm_dsi->id = -1;
msm_dsi->pdev = pdev;
platform_set_drvdata(pdev, msm_dsi);
/* Init dsi host */
ret = msm_dsi_host_init(msm_dsi);
if (ret)
goto destroy_dsi;
/* GET dsi PHY */
ret = dsi_get_phy(msm_dsi);
if (ret)
goto destroy_dsi;
/* Register to dsi manager */
ret = msm_dsi_manager_register(msm_dsi);
if (ret)
goto destroy_dsi;
return msm_dsi;
destroy_dsi:
dsi_destroy(msm_dsi);
return ERR_PTR(ret);
}
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
priv->dsi[msm_dsi->id] = msm_dsi;
return 0;
}
static void dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
priv->dsi[msm_dsi->id] = NULL;
}
static const struct component_ops dsi_ops = {
.bind = dsi_bind,
.unbind = dsi_unbind,
};
int dsi_dev_attach(struct platform_device *pdev)
{
return component_add(&pdev->dev, &dsi_ops);
}
void dsi_dev_detach(struct platform_device *pdev)
{
component_del(&pdev->dev, &dsi_ops);
}
static int dsi_dev_probe(struct platform_device *pdev)
{
struct msm_dsi *msm_dsi;
DBG("");
msm_dsi = dsi_init(pdev);
if (IS_ERR(msm_dsi)) {
/* Don't fail the bind if the dsi port is not connected */
if (PTR_ERR(msm_dsi) == -ENODEV)
return 0;
else
return PTR_ERR(msm_dsi);
}
return 0;
}
static int dsi_dev_remove(struct platform_device *pdev)
{
struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
DBG("");
dsi_destroy(msm_dsi);
return 0;
}
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdss-dsi-ctrl" },
/* Deprecated, don't use */
{ .compatible = "qcom,dsi-ctrl-6g-qcm2290" },
{}
};
static const struct dev_pm_ops dsi_pm_ops = {
SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static struct platform_driver dsi_driver = {
.probe = dsi_dev_probe,
.remove = dsi_dev_remove,
.driver = {
.name = "msm_dsi",
.of_match_table = dt_match,
.pm = &dsi_pm_ops,
},
};
void __init msm_dsi_register(void)
{
DBG("");
msm_dsi_phy_driver_register();
platform_driver_register(&dsi_driver);
}
void __exit msm_dsi_unregister(void)
{
DBG("");
msm_dsi_phy_driver_unregister();
platform_driver_unregister(&dsi_driver);
}
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
int ret;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
goto fail;
}
if (msm_dsi_is_bonded_dsi(msm_dsi) &&
!msm_dsi_is_master_dsi(msm_dsi)) {
/*
* Do not return an eror here,
* Just skip creating encoder/connector for the slave-DSI.
*/
return 0;
}
msm_dsi->encoder = encoder;
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge);
DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
msm_dsi->bridge = NULL;
goto fail;
}
ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
goto fail;
}
priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
return 0;
fail:
/* bridge/connector are normally destroyed by drm: */
if (msm_dsi->bridge) {
msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
msm_dsi->bridge = NULL;
}
return ret;
}
void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
{
msm_dsi_host_snapshot(disp_state, msm_dsi->host);
msm_dsi_phy_snapshot(disp_state, msm_dsi->phy);
}
| linux-master | drivers/gpu/drm/msm/dsi/dsi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include "dsi_cfg.h"
static const char * const dsi_v2_bus_clk_names[] = {
"core_mmss", "iface", "bus",
};
static const struct regulator_bulk_data apq8064_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
{ .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config apq8064_dsi_cfg = {
.io_offset = 0,
.regulator_data = apq8064_dsi_regulators,
.num_regulators = ARRAY_SIZE(apq8064_dsi_regulators),
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
.io_start = {
{ 0x4700000, 0x5800000 },
},
};
static const char * const dsi_6g_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
static const struct regulator_bulk_data msm8974_apq8084_regulators[] = {
{ .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = msm8974_apq8084_regulators,
.num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = {
{ 0xfd922800, 0xfd922b00 },
},
};
static const char * const dsi_v1_3_1_clk_names[] = {
"mdp_core", "iface", "bus",
};
static const struct regulator_bulk_data dsi_v1_3_1_regulators[] = {
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = dsi_v1_3_1_regulators,
.num_regulators = ARRAY_SIZE(dsi_v1_3_1_regulators),
.bus_clk_names = dsi_v1_3_1_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v1_3_1_clk_names),
.io_start = {
{ 0x1a98000 },
},
};
static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = dsi_v1_3_1_regulators,
.num_regulators = ARRAY_SIZE(dsi_v1_3_1_regulators),
.bus_clk_names = dsi_v1_3_1_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v1_3_1_clk_names),
.io_start = {
{ 0x1a94000, 0x1a96000 },
},
};
static const struct regulator_bulk_data msm8994_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
{ .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
{ .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */
{ .supply = "lab_reg", .init_load_uA = -1 },
{ .supply = "ibb_reg", .init_load_uA = -1 },
};
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = msm8994_dsi_regulators,
.num_regulators = ARRAY_SIZE(msm8994_dsi_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = {
{ 0xfd998000, 0xfd9a0000 },
},
};
static const struct regulator_bulk_data msm8996_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */
{ .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = msm8996_dsi_regulators,
.num_regulators = ARRAY_SIZE(msm8996_dsi_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = {
{ 0x994000, 0x996000 },
},
};
static const char * const dsi_msm8998_bus_clk_names[] = {
"iface", "bus", "core",
};
static const struct regulator_bulk_data msm8998_dsi_regulators[] = {
{ .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */
{ .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */
};
static const struct msm_dsi_config msm8998_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = msm8998_dsi_regulators,
.num_regulators = ARRAY_SIZE(msm8998_dsi_regulators),
.bus_clk_names = dsi_msm8998_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
.io_start = {
{ 0xc994000, 0xc996000 },
},
};
static const char * const dsi_sdm660_bus_clk_names[] = {
"iface", "bus", "core", "core_mmss",
};
static const struct regulator_bulk_data sdm660_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */
};
static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = sdm660_dsi_regulators,
.num_regulators = ARRAY_SIZE(sdm660_dsi_regulators),
.bus_clk_names = dsi_sdm660_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names),
.io_start = {
{ 0xc994000, 0xc996000 },
},
};
static const char * const dsi_v2_4_clk_names[] = {
"iface", "bus",
};
static const struct regulator_bulk_data dsi_v2_4_regulators[] = {
{ .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
{ .supply = "refgen" },
};
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = dsi_v2_4_regulators,
.num_regulators = ARRAY_SIZE(dsi_v2_4_regulators),
.bus_clk_names = dsi_v2_4_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
.io_start = {
{ 0xae94000, 0xae96000 }, /* SDM845 / SDM670 */
{ 0x5e94000 }, /* QCM2290 / SM6115 / SM6125 / SM6375 */
},
};
static const struct regulator_bulk_data sm8550_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 16800 }, /* 1.2 V */
};
static const struct msm_dsi_config sm8550_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = sm8550_dsi_regulators,
.num_regulators = ARRAY_SIZE(sm8550_dsi_regulators),
.bus_clk_names = dsi_v2_4_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
.io_start = {
{ 0xae94000, 0xae96000 },
},
};
static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
{ .supply = "refgen" },
};
static const struct msm_dsi_config sc7280_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.regulator_data = sc7280_dsi_regulators,
.num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
.bus_clk_names = dsi_v2_4_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
.io_start = {
{ 0xae94000, 0xae96000 },
},
};
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
.link_clk_disable = dsi_link_clk_disable_v2,
.clk_init_ver = dsi_clk_init_v2,
.tx_buf_alloc = dsi_tx_buf_alloc_v2,
.tx_buf_get = dsi_tx_buf_get_v2,
.tx_buf_put = NULL,
.dma_base_get = dsi_dma_base_get_v2,
.calc_clk_rate = dsi_calc_clk_rate_v2,
};
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = NULL,
.tx_buf_alloc = dsi_tx_buf_alloc_6g,
.tx_buf_get = dsi_tx_buf_get_6g,
.tx_buf_put = dsi_tx_buf_put_6g,
.dma_base_get = dsi_dma_base_get_6g,
.calc_clk_rate = dsi_calc_clk_rate_6g,
};
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = dsi_clk_init_6g_v2,
.tx_buf_alloc = dsi_tx_buf_alloc_6g,
.tx_buf_get = dsi_tx_buf_get_6g,
.tx_buf_put = dsi_tx_buf_put_6g,
.dma_base_get = dsi_dma_base_get_6g,
.calc_clk_rate = dsi_calc_clk_rate_6g,
};
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
&apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0_2,
&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
&msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
&msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2,
&msm8976_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_1_0,
&sdm660_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
&sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_6_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
&sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
int i;
for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
if ((dsi_cfg_handlers[i].major == major) &&
(dsi_cfg_handlers[i].minor == minor)) {
cfg_hnd = &dsi_cfg_handlers[i];
break;
}
}
return cfg_hnd;
}
| linux-master | drivers/gpu/drm/msm/dsi/dsi_cfg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <video/mipi_display.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_of.h>
#include "dsi.h"
#include "dsi.xml.h"
#include "sfpb.xml.h"
#include "dsi_cfg.h"
#include "msm_dsc_helper.h"
#include "msm_kms.h"
#include "msm_gem.h"
#include "phy/dsi_phy.h"
#define DSI_RESET_TOGGLE_DELAY_MS 20
static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc);
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
if (!major || !minor)
return -EINVAL;
/*
* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
* makes all other registers 4-byte shifted down.
*
* In order to identify between DSI6G(v3) and beyond, and DSIv2 and
* older, we read the DSI_VERSION register without any shift(offset
* 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
* the case of DSI6G, this has to be zero (the offset points to a
* scratch register which we never touch)
*/
ver = msm_readl(base + REG_DSI_VERSION);
if (ver) {
/* older dsi host, there is no register shift */
ver = FIELD(ver, DSI_VERSION_MAJOR);
if (ver <= MSM_DSI_VER_MAJOR_V2) {
/* old versions */
*major = ver;
*minor = 0;
return 0;
} else {
return -EINVAL;
}
} else {
/*
* newer host, offset 0 has 6G_HW_VERSION, the rest of the
* registers are shifted down, read DSI_VERSION again with
* the shifted offset
*/
ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
ver = FIELD(ver, DSI_VERSION_MAJOR);
if (ver == MSM_DSI_VER_MAJOR_6G) {
/* 6G version */
*major = ver;
*minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
return 0;
} else {
return -EINVAL;
}
}
}
#define DSI_ERR_STATE_ACK 0x0000
#define DSI_ERR_STATE_TIMEOUT 0x0001
#define DSI_ERR_STATE_DLN0_PHY 0x0002
#define DSI_ERR_STATE_FIFO 0x0004
#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
#define DSI_CLK_CTRL_ENABLE_CLKS \
(DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
struct msm_dsi_host {
struct mipi_dsi_host base;
struct platform_device *pdev;
struct drm_device *dev;
int id;
void __iomem *ctrl_base;
phys_addr_t ctrl_size;
struct regulator_bulk_data *supplies;
int num_bus_clks;
struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX];
struct clk *byte_clk;
struct clk *esc_clk;
struct clk *pixel_clk;
struct clk *byte_intf_clk;
unsigned long byte_clk_rate;
unsigned long byte_intf_clk_rate;
unsigned long pixel_clk_rate;
unsigned long esc_clk_rate;
/* DSI v2 specific clocks */
struct clk *src_clk;
unsigned long src_clk_rate;
struct gpio_desc *disp_en_gpio;
struct gpio_desc *te_gpio;
const struct msm_dsi_cfg_handler *cfg_hnd;
struct completion dma_comp;
struct completion video_comp;
struct mutex dev_mutex;
struct mutex cmd_mutex;
spinlock_t intr_lock; /* Protect interrupt ctrl register */
u32 err_work_state;
struct work_struct err_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
/* DSI v2 TX buffer */
void *tx_buf;
dma_addr_t tx_buf_paddr;
int tx_size;
u8 *rx_buf;
struct regmap *sfpb;
struct drm_display_mode *mode;
struct drm_dsc_config *dsc;
/* connected device info */
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
/* lane data parsed via DT */
int dlane_swap;
int num_data_lanes;
/* from phy DT */
bool cphy_mode;
u32 dma_cmd_ctrl_restore;
bool registered;
bool power_on;
bool enabled;
int irq;
};
static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
{
switch (fmt) {
case MIPI_DSI_FMT_RGB565: return 16;
case MIPI_DSI_FMT_RGB666_PACKED: return 18;
case MIPI_DSI_FMT_RGB666:
case MIPI_DSI_FMT_RGB888:
default: return 24;
}
}
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
return msm_readl(msm_host->ctrl_base + reg);
}
static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
{
msm_writel(data, msm_host->ctrl_base + reg);
}
static const struct msm_dsi_cfg_handler *dsi_get_config(
struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct device *dev = &msm_host->pdev->dev;
struct clk *ahb_clk;
int ret;
u32 major = 0, minor = 0;
ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
pr_err("%s: cannot get interface clock\n", __func__);
goto exit;
}
pm_runtime_get_sync(dev);
ret = clk_prepare_enable(ahb_clk);
if (ret) {
pr_err("%s: unable to enable ahb_clk\n", __func__);
goto runtime_put;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
if (ret) {
pr_err("%s: Invalid version\n", __func__);
goto disable_clks;
}
cfg_hnd = msm_dsi_cfg_get(major, minor);
DBG("%s: Version %x:%x\n", __func__, major, minor);
disable_clks:
clk_disable_unprepare(ahb_clk);
runtime_put:
pm_runtime_put_sync(dev);
exit:
return cfg_hnd;
}
static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
{
return container_of(host, struct msm_dsi_host, base);
}
int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
int ret = 0;
msm_host->src_clk = msm_clk_get(pdev, "src");
if (IS_ERR(msm_host->src_clk)) {
ret = PTR_ERR(msm_host->src_clk);
pr_err("%s: can't find src clock. ret=%d\n",
__func__, ret);
msm_host->src_clk = NULL;
return ret;
}
return ret;
}
int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
int ret = 0;
msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
if (IS_ERR(msm_host->byte_intf_clk)) {
ret = PTR_ERR(msm_host->byte_intf_clk);
pr_err("%s: can't find byte_intf clock. ret=%d\n",
__func__, ret);
}
return ret;
}
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
const struct msm_dsi_config *cfg = cfg_hnd->cfg;
int i, ret = 0;
/* get bus clocks */
for (i = 0; i < cfg->num_bus_clks; i++)
msm_host->bus_clks[i].id = cfg->bus_clk_names[i];
msm_host->num_bus_clks = cfg->num_bus_clks;
ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret);
goto exit;
}
/* get link and source clocks */
msm_host->byte_clk = msm_clk_get(pdev, "byte");
if (IS_ERR(msm_host->byte_clk)) {
ret = PTR_ERR(msm_host->byte_clk);
pr_err("%s: can't find dsi_byte clock. ret=%d\n",
__func__, ret);
msm_host->byte_clk = NULL;
goto exit;
}
msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(msm_host->pixel_clk)) {
ret = PTR_ERR(msm_host->pixel_clk);
pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
__func__, ret);
msm_host->pixel_clk = NULL;
goto exit;
}
msm_host->esc_clk = msm_clk_get(pdev, "core");
if (IS_ERR(msm_host->esc_clk)) {
ret = PTR_ERR(msm_host->esc_clk);
pr_err("%s: can't find dsi_esc clock. ret=%d\n",
__func__, ret);
msm_host->esc_clk = NULL;
goto exit;
}
if (cfg_hnd->ops->clk_init_ver)
ret = cfg_hnd->ops->clk_init_ver(msm_host);
exit:
return ret;
}
int msm_dsi_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
struct mipi_dsi_host *host = msm_dsi->host;
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
if (!msm_host->cfg_hnd)
return 0;
clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks);
return 0;
}
int msm_dsi_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
struct mipi_dsi_host *host = msm_dsi->host;
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
if (!msm_host->cfg_hnd)
return 0;
return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks);
}
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
int ret;
DBG("Set clk rates: pclk=%d, byteclk=%lu",
msm_host->mode->clock, msm_host->byte_clk_rate);
ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: dev_pm_opp_set_rate failed %d\n", __func__, ret);
return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
return ret;
}
if (msm_host->byte_intf_clk) {
ret = clk_set_rate(msm_host->byte_intf_clk, msm_host->byte_intf_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte intf clk, %d\n",
__func__, ret);
return ret;
}
}
return 0;
}
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
goto error;
}
ret = clk_prepare_enable(msm_host->byte_clk);
if (ret) {
pr_err("%s: Failed to enable dsi byte clk\n", __func__);
goto byte_clk_err;
}
ret = clk_prepare_enable(msm_host->pixel_clk);
if (ret) {
pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
goto pixel_clk_err;
}
ret = clk_prepare_enable(msm_host->byte_intf_clk);
if (ret) {
pr_err("%s: Failed to enable byte intf clk\n",
__func__);
goto byte_intf_clk_err;
}
return 0;
byte_intf_clk_err:
clk_disable_unprepare(msm_host->pixel_clk);
pixel_clk_err:
clk_disable_unprepare(msm_host->byte_clk);
byte_clk_err:
clk_disable_unprepare(msm_host->esc_clk);
error:
return ret;
}
int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
msm_host->mode->clock, msm_host->byte_clk_rate,
msm_host->esc_clk_rate, msm_host->src_clk_rate);
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
return ret;
}
ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
return ret;
}
ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
return ret;
}
return 0;
}
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
int ret;
ret = clk_prepare_enable(msm_host->byte_clk);
if (ret) {
pr_err("%s: Failed to enable dsi byte clk\n", __func__);
goto error;
}
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
goto esc_clk_err;
}
ret = clk_prepare_enable(msm_host->src_clk);
if (ret) {
pr_err("%s: Failed to enable dsi src clk\n", __func__);
goto src_clk_err;
}
ret = clk_prepare_enable(msm_host->pixel_clk);
if (ret) {
pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
goto pixel_clk_err;
}
return 0;
pixel_clk_err:
clk_disable_unprepare(msm_host->src_clk);
src_clk_err:
clk_disable_unprepare(msm_host->esc_clk);
esc_clk_err:
clk_disable_unprepare(msm_host->byte_clk);
error:
return ret;
}
void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
{
/* Drop the performance state vote */
dev_pm_opp_set_rate(&msm_host->pdev->dev, 0);
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->pixel_clk);
clk_disable_unprepare(msm_host->byte_intf_clk);
clk_disable_unprepare(msm_host->byte_clk);
}
void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
{
clk_disable_unprepare(msm_host->pixel_clk);
clk_disable_unprepare(msm_host->src_clk);
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->byte_clk);
}
static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode,
const struct drm_dsc_config *dsc)
{
int new_hdisplay = DIV_ROUND_UP(mode->hdisplay * drm_dsc_get_bpp_int(dsc),
dsc->bits_per_component * 3);
int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
return new_htotal * mode->vtotal * drm_mode_vrefresh(mode);
}
static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
const struct drm_dsc_config *dsc, bool is_bonded_dsi)
{
unsigned long pclk_rate;
pclk_rate = mode->clock * 1000;
if (dsc)
pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
/*
* For bonded DSI mode, the current DRM mode has the complete width of the
* panel. Since, the complete panel is driven by two DSI controllers,
* the clock rates have to be split between the two dsi controllers.
* Adjust the byte and pixel clock rates for each dsi host accordingly.
*/
if (is_bonded_dsi)
pclk_rate /= 2;
return pclk_rate;
}
unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_dsi,
const struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
unsigned long pclk_rate = dsi_get_pclk_rate(mode, msm_host->dsc, is_bonded_dsi);
unsigned long pclk_bpp;
if (lanes == 0) {
pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
lanes = 1;
}
/* CPHY "byte_clk" is in units of 16 bits */
if (msm_host->cphy_mode)
pclk_bpp = mult_frac(pclk_rate, bpp, 16 * lanes);
else
pclk_bpp = mult_frac(pclk_rate, bpp, 8 * lanes);
return pclk_bpp;
}
static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
msm_host->pixel_clk_rate = dsi_get_pclk_rate(msm_host->mode, msm_host->dsc, is_bonded_dsi);
msm_host->byte_clk_rate = dsi_byte_clk_get_rate(&msm_host->base, is_bonded_dsi,
msm_host->mode);
DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
msm_host->byte_clk_rate);
}
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
if (!msm_host->mode) {
pr_err("%s: mode not set\n", __func__);
return -EINVAL;
}
dsi_calc_pclk(msm_host, is_bonded_dsi);
msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
return 0;
}
int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
u32 bpp = dsi_get_bpp(msm_host->format);
unsigned int esc_mhz, esc_div;
unsigned long byte_mhz;
dsi_calc_pclk(msm_host, is_bonded_dsi);
msm_host->src_clk_rate = mult_frac(msm_host->pixel_clk_rate, bpp, 8);
/*
* esc clock is byte clock followed by a 4 bit divider,
* we need to find an escape clock frequency within the
* mipi DSI spec range within the maximum divider limit
* We iterate here between an escape clock frequencey
* between 20 Mhz to 5 Mhz and pick up the first one
* that can be supported by our divider
*/
byte_mhz = msm_host->byte_clk_rate / 1000000;
for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
/*
* TODO: Ideally, we shouldn't know what sort of divider
* is available in mmss_cc, we're just assuming that
* it'll always be a 4 bit divider. Need to come up with
* a better way here.
*/
if (esc_div >= 1 && esc_div <= 16)
break;
}
if (esc_mhz < 5)
return -EINVAL;
msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate,
msm_host->src_clk_rate);
return 0;
}
static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
{
u32 intr;
unsigned long flags;
spin_lock_irqsave(&msm_host->intr_lock, flags);
intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
if (enable)
intr |= mask;
else
intr &= ~mask;
DBG("intr=%x enable=%d", intr, enable);
dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
spin_unlock_irqrestore(&msm_host->intr_lock, flags);
}
static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
{
if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
return BURST_MODE;
else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
return NON_BURST_SYNCH_PULSE;
return NON_BURST_SYNCH_EVENT;
}
static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
default: return VID_DST_FORMAT_RGB888;
}
}
static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
case MIPI_DSI_FMT_RGB666_PACKED:
case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
default: return CMD_DST_FORMAT_RGB888;
}
}
static void dsi_ctrl_disable(struct msm_dsi_host *msm_host)
{
dsi_write(msm_host, REG_DSI_CTRL, 0);
}
static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy)
{
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u32 data = 0, lane_ctrl = 0;
if (flags & MIPI_DSI_MODE_VIDEO) {
if (flags & MIPI_DSI_MODE_VIDEO_HSE)
data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
if (flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
data |= DSI_VID_CFG0_HFP_POWER_STOP;
if (flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
data |= DSI_VID_CFG0_HBP_POWER_STOP;
if (flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
data |= DSI_VID_CFG0_HSA_POWER_STOP;
/* Always set low power stop mode for BLLP
* to let command engine send packets
*/
data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
DSI_VID_CFG0_BLLP_POWER_STOP;
data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
dsi_write(msm_host, REG_DSI_VID_CFG0, data);
/* Do not swap RGB colors */
data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
} else {
/* Do not swap RGB colors */
data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
DSI_CMD_CFG1_WR_MEM_CONTINUE(
MIPI_DCS_WRITE_MEMORY_CONTINUE);
/* Always insert DCS command */
data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
if (msm_host->cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
msm_host->cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3) {
data = dsi_read(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2);
data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE;
dsi_write(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2, data);
}
}
dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
DSI_CMD_DMA_CTRL_LOW_POWER);
data = 0;
/* Always assume dedicated TE pin */
data |= DSI_TRIG_CTRL_TE;
data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
(cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
phy_shared_timings->clk_pre_inc_by_2)
dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
data = 0;
if (!(flags & MIPI_DSI_MODE_NO_EOT_PACKET))
data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
/* allow only ack-err-status to generate interrupt */
dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
data = DSI_CTRL_CLK_EN;
DBG("lane number=%d", msm_host->lanes);
data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
if (msm_dsi_phy_set_continuous_clock(phy, true))
lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY;
dsi_write(msm_host, REG_DSI_LANE_CTRL,
lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
}
data |= DSI_CTRL_ENABLE;
dsi_write(msm_host, REG_DSI_CTRL, data);
if (msm_host->cphy_mode)
dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
}
static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
{
struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, reg_ctrl, reg_ctrl2;
u32 slice_per_intf, total_bytes_per_intf;
u32 pkt_per_line;
u32 eol_byte_num;
/* first calculate dsc parameters and then program
* compress mode registers
*/
slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
eol_byte_num = total_bytes_per_intf % 3;
/*
* Typically, pkt_per_line = slice_per_intf * slice_per_pkt.
*
* Since the current driver only supports slice_per_pkt = 1,
* pkt_per_line will be equal to slice per intf for now.
*/
pkt_per_line = slice_per_intf;
if (is_cmd_mode) /* packet data type */
reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
else
reg = DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(MIPI_DSI_COMPRESSED_PIXEL_STREAM);
/* DSI_VIDEO_COMPRESSION_MODE & DSI_COMMAND_COMPRESSION_MODE
* registers have similar offsets, so for below common code use
* DSI_VIDEO_COMPRESSION_MODE_XXXX for setting bits
*/
reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(pkt_per_line >> 1);
reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(eol_byte_num);
reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EN;
if (is_cmd_mode) {
reg_ctrl = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL);
reg_ctrl2 = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2);
reg_ctrl &= ~0xffff;
reg_ctrl |= reg;
reg_ctrl2 &= ~DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK;
reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(dsc->slice_chunk_size);
dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
} else {
dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg);
}
}
static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
u32 h_total = mode->htotal;
u32 v_total = mode->vtotal;
u32 hs_end = mode->hsync_end - mode->hsync_start;
u32 vs_end = mode->vsync_end - mode->vsync_start;
u32 ha_start = h_total - mode->hsync_start;
u32 ha_end = ha_start + mode->hdisplay;
u32 va_start = v_total - mode->vsync_start;
u32 va_end = va_start + mode->vdisplay;
u32 hdisplay = mode->hdisplay;
u32 wc;
int ret;
DBG("");
/*
* For bonded DSI mode, the current DRM mode has
* the complete width of the panel. Since, the complete
* panel is driven by two DSI controllers, the horizontal
* timings have to be split between the two dsi controllers.
* Adjust the DSI host timing values accordingly.
*/
if (is_bonded_dsi) {
h_total /= 2;
hs_end /= 2;
ha_start /= 2;
ha_end /= 2;
hdisplay /= 2;
}
if (msm_host->dsc) {
struct drm_dsc_config *dsc = msm_host->dsc;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
pr_err("DSI: invalid input: pic_width: %d pic_height: %d\n",
mode->hdisplay, mode->vdisplay);
return;
}
dsc->pic_width = mode->hdisplay;
dsc->pic_height = mode->vdisplay;
DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height);
/* we do the calculations for dsc parameters here so that
* panel can use these parameters
*/
ret = dsi_populate_dsc_params(msm_host, dsc);
if (ret)
return;
/* Divide the display by 3 but keep back/font porch and
* pulse width same
*/
h_total -= hdisplay;
hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), 3);
h_total += hdisplay;
ha_end = ha_start + hdisplay;
}
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
if (msm_host->dsc)
dsi_update_dsc_timing(msm_host, false, mode->hdisplay);
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
DSI_ACTIVE_H_END(ha_end));
dsi_write(msm_host, REG_DSI_ACTIVE_V,
DSI_ACTIVE_V_START(va_start) |
DSI_ACTIVE_V_END(va_end));
dsi_write(msm_host, REG_DSI_TOTAL,
DSI_TOTAL_H_TOTAL(h_total - 1) |
DSI_TOTAL_V_TOTAL(v_total - 1));
dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
DSI_ACTIVE_HSYNC_START(hs_start) |
DSI_ACTIVE_HSYNC_END(hs_end));
dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
if (msm_host->dsc)
dsi_update_dsc_timing(msm_host, true, mode->hdisplay);
/* image data and 1 byte write_memory_start cmd */
if (!msm_host->dsc)
wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
else
/*
* When DSC is enabled, WC = slice_chunk_size * slice_per_pkt + 1.
* Currently, the driver only supports default value of slice_per_pkt = 1
*
* TODO: Expand mipi_dsi_device struct to hold slice_per_pkt info
* and adjust DSC math to account for slice_per_pkt.
*/
wc = msm_host->dsc->slice_chunk_size + 1;
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(
msm_host->channel) |
DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(
MIPI_DSI_DCS_LONG_WRITE));
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_TOTAL,
DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(hdisplay) |
DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(mode->vdisplay));
}
}
static void dsi_sw_reset(struct msm_dsi_host *msm_host)
{
u32 ctrl;
ctrl = dsi_read(msm_host, REG_DSI_CTRL);
if (ctrl & DSI_CTRL_ENABLE) {
dsi_write(msm_host, REG_DSI_CTRL, ctrl & ~DSI_CTRL_ENABLE);
/*
* dsi controller need to be disabled before
* clocks turned on
*/
wmb();
}
dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
wmb(); /* clocks need to be enabled before reset */
/* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */
if (ctrl & DSI_CTRL_ENABLE) {
dsi_write(msm_host, REG_DSI_CTRL, ctrl);
wmb(); /* make sure dsi controller enabled again */
}
}
static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
bool video_mode, bool enable)
{
u32 dsi_ctrl;
dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
if (!enable) {
dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
DSI_CTRL_CMD_MODE_EN);
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
DSI_IRQ_MASK_VIDEO_DONE, 0);
} else {
if (video_mode) {
dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
} else { /* command mode */
dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
}
dsi_ctrl |= DSI_CTRL_ENABLE;
}
dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
}
static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
{
u32 data;
data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
if (mode == 0)
data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
else
data |= DSI_CMD_DMA_CTRL_LOW_POWER;
dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
}
static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
{
u32 ret = 0;
struct device *dev = &msm_host->pdev->dev;
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
reinit_completion(&msm_host->video_comp);
ret = wait_for_completion_timeout(&msm_host->video_comp,
msecs_to_jiffies(70));
if (ret == 0)
DRM_DEV_ERROR(dev, "wait for video done timed out\n");
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
}
static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
{
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
return;
if (msm_host->power_on && msm_host->enabled) {
dsi_wait4video_done(msm_host);
/* delay 4 ms to skip BLLP */
usleep_range(2000, 4000);
}
}
int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
uint64_t iova;
u8 *data;
data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
priv->kms->aspace,
&msm_host->tx_gem_obj, &iova);
if (IS_ERR(data)) {
msm_host->tx_gem_obj = NULL;
return PTR_ERR(data);
}
msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
msm_host->tx_size = msm_host->tx_gem_obj->size;
return 0;
}
int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
&msm_host->tx_buf_paddr, GFP_KERNEL);
if (!msm_host->tx_buf)
return -ENOMEM;
msm_host->tx_size = size;
return 0;
}
static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
{
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv;
/*
* This is possible if we're tearing down before we've had a chance to
* fully initialize. A very real possibility if our probe is deferred,
* in which case we'll hit msm_dsi_host_destroy() without having run
* through the dsi_tx_buf_alloc().
*/
if (!dev)
return;
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
if (msm_host->tx_buf)
dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
msm_host->tx_buf_paddr);
}
void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
{
return msm_gem_get_vaddr(msm_host->tx_gem_obj);
}
void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
{
return msm_host->tx_buf;
}
void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
{
msm_gem_put_vaddr(msm_host->tx_gem_obj);
}
/*
* prepare cmd buffer to be txed
*/
static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
const struct mipi_dsi_msg *msg)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct mipi_dsi_packet packet;
int len;
int ret;
u8 *data;
ret = mipi_dsi_create_packet(&packet, msg);
if (ret) {
pr_err("%s: create packet failed, %d\n", __func__, ret);
return ret;
}
len = (packet.size + 3) & (~0x3);
if (len > msm_host->tx_size) {
pr_err("%s: packet size is too big\n", __func__);
return -EINVAL;
}
data = cfg_hnd->ops->tx_buf_get(msm_host);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
return ret;
}
/* MSM specific command format in memory */
data[0] = packet.header[1];
data[1] = packet.header[2];
data[2] = packet.header[0];
data[3] = BIT(7); /* Last packet */
if (mipi_dsi_packet_format_is_long(msg->type))
data[3] |= BIT(6);
if (msg->rx_buf && msg->rx_len)
data[3] |= BIT(5);
/* Long packet */
if (packet.payload && packet.payload_length)
memcpy(data + 4, packet.payload, packet.payload_length);
/* Append 0xff to the end */
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
if (cfg_hnd->ops->tx_buf_put)
cfg_hnd->ops->tx_buf_put(msm_host);
return len;
}
/*
* dsi_short_read1_resp: 1 parameter
*/
static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
if (data && (msg->rx_len >= 1)) {
*data = buf[1]; /* strip out dcs type */
return 1;
} else {
pr_err("%s: read data does not match with rx_buf len %zu\n",
__func__, msg->rx_len);
return -EINVAL;
}
}
/*
* dsi_short_read2_resp: 2 parameter
*/
static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
if (data && (msg->rx_len >= 2)) {
data[0] = buf[1]; /* strip out dcs type */
data[1] = buf[2];
return 2;
} else {
pr_err("%s: read data does not match with rx_buf len %zu\n",
__func__, msg->rx_len);
return -EINVAL;
}
}
static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
/* strip out 4 byte dcs header */
if (msg->rx_buf && msg->rx_len)
memcpy(msg->rx_buf, buf + 4, msg->rx_len);
return msg->rx_len;
}
int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
{
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
if (!dma_base)
return -EINVAL;
return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
priv->kms->aspace, dma_base);
}
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
{
if (!dma_base)
return -EINVAL;
*dma_base = msm_host->tx_buf_paddr;
return 0;
}
static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
uint64_t dma_base;
bool triggered;
ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
if (ret) {
pr_err("%s: failed to get iova: %d\n", __func__, ret);
return ret;
}
reinit_completion(&msm_host->dma_comp);
dsi_wait4video_eng_busy(msm_host);
triggered = msm_dsi_manager_cmd_xfer_trigger(
msm_host->id, dma_base, len);
if (triggered) {
ret = wait_for_completion_timeout(&msm_host->dma_comp,
msecs_to_jiffies(200));
DBG("ret=%d", ret);
if (ret == 0)
ret = -ETIMEDOUT;
else
ret = len;
} else
ret = len;
return ret;
}
static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
u8 *buf, int rx_byte, int pkt_size)
{
u32 *temp, data;
int i, j = 0, cnt;
u32 read_cnt;
u8 reg[16];
int repeated_bytes = 0;
int buf_offset = buf - msm_host->rx_buf;
temp = (u32 *)reg;
cnt = (rx_byte + 3) >> 2;
if (cnt > 4)
cnt = 4; /* 4 x 32 bits registers only */
if (rx_byte == 4)
read_cnt = 4;
else
read_cnt = pkt_size + 6;
/*
* In case of multiple reads from the panel, after the first read, there
* is possibility that there are some bytes in the payload repeating in
* the RDBK_DATA registers. Since we read all the parameters from the
* panel right from the first byte for every pass. We need to skip the
* repeating bytes and then append the new parameters to the rx buffer.
*/
if (read_cnt > 16) {
int bytes_shifted;
/* Any data more than 16 bytes will be shifted out.
* The temp read buffer should already contain these bytes.
* The remaining bytes in read buffer are the repeated bytes.
*/
bytes_shifted = read_cnt - 16;
repeated_bytes = buf_offset - bytes_shifted;
}
for (i = cnt - 1; i >= 0; i--) {
data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
*temp++ = ntohl(data); /* to host byte order */
DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
}
for (i = repeated_bytes; i < 16; i++)
buf[j++] = reg[i];
return j;
}
static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
const struct mipi_dsi_msg *msg)
{
int len, ret;
int bllp_len = msm_host->mode->hdisplay *
dsi_get_bpp(msm_host->format) / 8;
len = dsi_cmd_dma_add(msm_host, msg);
if (len < 0) {
pr_err("%s: failed to add cmd type = 0x%x\n",
__func__, msg->type);
return len;
}
/* for video mode, do not send cmds more than
* one pixel line, since it only transmit it
* during BLLP.
*/
/* TODO: if the command is sent in LP mode, the bit rate is only
* half of esc clk rate. In this case, if the video is already
* actively streaming, we need to check more carefully if the
* command can be fit into one BLLP.
*/
if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
__func__, len);
return -EINVAL;
}
ret = dsi_cmd_dma_tx(msm_host, len);
if (ret < 0) {
pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n",
__func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret);
return ret;
} else if (ret < len) {
pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n",
__func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len);
return -EIO;
}
return len;
}
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
container_of(work, struct msm_dsi_host, err_work);
u32 status = msm_host->err_work_state;
pr_err_ratelimited("%s: status=%x\n", __func__, status);
if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
dsi_sw_reset(msm_host);
/* It is safe to clear here because error irq is disabled. */
msm_host->err_work_state = 0;
/* enable dsi error interrupt */
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
}
static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
{
u32 status;
status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
if (status) {
dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
/* Writing of an extra 0 needed to clear error bits */
dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
msm_host->err_work_state |= DSI_ERR_STATE_ACK;
}
}
static void dsi_timeout_status(struct msm_dsi_host *msm_host)
{
u32 status;
status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
if (status) {
dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
}
}
static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
{
u32 status;
status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
}
}
static void dsi_fifo_status(struct msm_dsi_host *msm_host)
{
u32 status;
status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
/* fifo underflow, overflow */
if (status) {
dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
msm_host->err_work_state |=
DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
}
}
static void dsi_status(struct msm_dsi_host *msm_host)
{
u32 status;
status = dsi_read(msm_host, REG_DSI_STATUS0);
if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
dsi_write(msm_host, REG_DSI_STATUS0, status);
msm_host->err_work_state |=
DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
}
}
static void dsi_clk_status(struct msm_dsi_host *msm_host)
{
u32 status;
status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
}
}
static void dsi_error(struct msm_dsi_host *msm_host)
{
/* disable dsi error interrupt */
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
dsi_clk_status(msm_host);
dsi_fifo_status(msm_host);
dsi_ack_err_status(msm_host);
dsi_timeout_status(msm_host);
dsi_status(msm_host);
dsi_dln0_phy_err(msm_host);
queue_work(msm_host->workqueue, &msm_host->err_work);
}
static irqreturn_t dsi_host_irq(int irq, void *ptr)
{
struct msm_dsi_host *msm_host = ptr;
u32 isr;
unsigned long flags;
if (!msm_host->ctrl_base)
return IRQ_HANDLED;
spin_lock_irqsave(&msm_host->intr_lock, flags);
isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
spin_unlock_irqrestore(&msm_host->intr_lock, flags);
DBG("isr=0x%x, id=%d", isr, msm_host->id);
if (isr & DSI_IRQ_ERROR)
dsi_error(msm_host);
if (isr & DSI_IRQ_VIDEO_DONE)
complete(&msm_host->video_comp);
if (isr & DSI_IRQ_CMD_DMA_DONE)
complete(&msm_host->dma_comp);
return IRQ_HANDLED;
}
static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
struct device *panel_device)
{
msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
"disp-enable",
GPIOD_OUT_LOW);
if (IS_ERR(msm_host->disp_en_gpio)) {
DBG("cannot get disp-enable-gpios %ld",
PTR_ERR(msm_host->disp_en_gpio));
return PTR_ERR(msm_host->disp_en_gpio);
}
msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
GPIOD_IN);
if (IS_ERR(msm_host->te_gpio)) {
DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
return PTR_ERR(msm_host->te_gpio);
}
return 0;
}
static int dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
if (dsi->lanes > msm_host->num_data_lanes)
return -EINVAL;
msm_host->channel = dsi->channel;
msm_host->lanes = dsi->lanes;
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
if (dsi->dsc)
msm_host->dsc = dsi->dsc;
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret)
return ret;
ret = dsi_dev_attach(msm_host->pdev);
if (ret)
return ret;
DBG("id=%d", msm_host->id);
return 0;
}
static int dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
dsi_dev_detach(msm_host->pdev);
DBG("id=%d", msm_host->id);
return 0;
}
static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
if (!msg || !msm_host->power_on)
return -EINVAL;
mutex_lock(&msm_host->cmd_mutex);
ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
mutex_unlock(&msm_host->cmd_mutex);
return ret;
}
static const struct mipi_dsi_host_ops dsi_host_ops = {
.attach = dsi_host_attach,
.detach = dsi_host_detach,
.transfer = dsi_host_transfer,
};
/*
* List of supported physical to logical lane mappings.
* For example, the 2nd entry represents the following mapping:
*
* "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
*/
static const int supported_data_lane_swaps[][4] = {
{ 0, 1, 2, 3 },
{ 3, 0, 1, 2 },
{ 2, 3, 0, 1 },
{ 1, 2, 3, 0 },
{ 0, 3, 2, 1 },
{ 1, 0, 3, 2 },
{ 2, 1, 0, 3 },
{ 3, 2, 1, 0 },
};
static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
struct device_node *ep)
{
struct device *dev = &msm_host->pdev->dev;
struct property *prop;
u32 lane_map[4];
int ret, i, len, num_lanes;
prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
/* Set the number of date lanes to 4 by default. */
msm_host->num_data_lanes = 4;
return 0;
}
num_lanes = drm_of_get_data_lanes_count(ep, 1, 4);
if (num_lanes < 0) {
DRM_DEV_ERROR(dev, "bad number of data lanes\n");
return num_lanes;
}
msm_host->num_data_lanes = num_lanes;
ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
DRM_DEV_ERROR(dev, "failed to read lane data\n");
return ret;
}
/*
* compare DT specified physical-logical lane mappings with the ones
* supported by hardware
*/
for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
const int *swap = supported_data_lane_swaps[i];
int j;
/*
* the data-lanes array we get from DT has a logical->physical
* mapping. The "data lane swap" register field represents
* supported configurations in a physical->logical mapping.
* Translate the DT mapping to what we understand and find a
* configuration that works.
*/
for (j = 0; j < num_lanes; j++) {
if (lane_map[j] < 0 || lane_map[j] > 3)
DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
lane_map[j]);
if (swap[lane_map[j]] != j)
break;
}
if (j == num_lanes) {
msm_host->dlane_swap = i;
return 0;
}
}
return -EINVAL;
}
static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc)
{
int ret;
if (dsc->bits_per_pixel & 0xf) {
DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support fractional bits_per_pixel\n");
return -EINVAL;
}
if (dsc->bits_per_component != 8) {
DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n");
return -EOPNOTSUPP;
}
dsc->simple_422 = 0;
dsc->convert_rgb = 1;
dsc->vbr_enable = 0;
drm_dsc_set_const_params(dsc);
drm_dsc_set_rc_buf_thresh(dsc);
/* handle only bpp = bpc = 8, pre-SCR panels */
ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR);
if (ret) {
DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n");
return ret;
}
dsc->initial_scale_value = drm_dsc_initial_scale_value(dsc);
dsc->line_buf_depth = dsc->bits_per_component + 1;
return drm_dsc_compute_rc_parameters(dsc);
}
static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *endpoint;
int ret = 0;
/*
* Get the endpoint of the output port of the DSI host. In our case,
* this is mapped to port number with reg = 1. Don't return an error if
* the remote endpoint isn't defined. It's possible that there is
* nothing connected to the dsi output.
*/
endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
return 0;
}
ret = dsi_host_parse_lane_data(msm_host, endpoint);
if (ret) {
DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
ret = -EINVAL;
goto err;
}
if (of_property_read_bool(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
__func__);
ret = PTR_ERR(msm_host->sfpb);
}
}
err:
of_node_put(endpoint);
return ret;
}
static int dsi_host_get_id(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
struct resource *res;
int i, j;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
if (!res)
return -EINVAL;
for (i = 0; i < VARIANTS_MAX; i++)
for (j = 0; j < DSI_MAX; j++)
if (cfg->io_start[i][j] == res->start)
return j;
return -EINVAL;
}
int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
struct platform_device *pdev = msm_dsi->pdev;
const struct msm_dsi_config *cfg;
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
if (!msm_host) {
return -ENOMEM;
}
msm_host->pdev = pdev;
msm_dsi->host = &msm_host->base;
ret = dsi_host_parse_dt(msm_host);
if (ret) {
pr_err("%s: failed to parse dt\n", __func__);
return ret;
}
msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size);
if (IS_ERR(msm_host->ctrl_base)) {
pr_err("%s: unable to map Dsi ctrl base\n", __func__);
return PTR_ERR(msm_host->ctrl_base);
}
pm_runtime_enable(&pdev->dev);
msm_host->cfg_hnd = dsi_get_config(msm_host);
if (!msm_host->cfg_hnd) {
pr_err("%s: get config failed\n", __func__);
return -EINVAL;
}
cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
if (msm_host->id < 0) {
pr_err("%s: unable to identify DSI host index\n", __func__);
return msm_host->id;
}
/* fixup base address by io offset */
msm_host->ctrl_base += cfg->io_offset;
ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators,
cfg->regulator_data,
&msm_host->supplies);
if (ret)
return ret;
ret = dsi_clk_init(msm_host);
if (ret) {
pr_err("%s: unable to initialize dsi clks\n", __func__);
return ret;
}
msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
if (!msm_host->rx_buf) {
pr_err("%s: alloc rx temp buf failed\n", __func__);
return -ENOMEM;
}
ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev);
if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
return ret;
}
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (msm_host->irq < 0) {
ret = msm_host->irq;
dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
return ret;
}
/* do not autoenable, will be enabled later */
ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
"dsi_isr", msm_host);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
msm_host->irq, ret);
return ret;
}
init_completion(&msm_host->dma_comp);
init_completion(&msm_host->video_comp);
mutex_init(&msm_host->dev_mutex);
mutex_init(&msm_host->cmd_mutex);
spin_lock_init(&msm_host->intr_lock);
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
if (!msm_host->workqueue)
return -ENOMEM;
INIT_WORK(&msm_host->err_work, dsi_err_worker);
msm_dsi->id = msm_host->id;
DBG("Dsi Host %d initialized", msm_host->id);
return 0;
}
void msm_dsi_host_destroy(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
DBG("");
dsi_tx_buf_free(msm_host);
if (msm_host->workqueue) {
destroy_workqueue(msm_host->workqueue);
msm_host->workqueue = NULL;
}
mutex_destroy(&msm_host->cmd_mutex);
mutex_destroy(&msm_host->dev_mutex);
pm_runtime_disable(&msm_host->pdev->dev);
}
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
msm_host->dev = dev;
ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
return ret;
}
return 0;
}
int msm_dsi_host_register(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
/* Register mipi dsi host */
if (!msm_host->registered) {
host->dev = &msm_host->pdev->dev;
host->ops = &dsi_host_ops;
ret = mipi_dsi_host_register(host);
if (ret)
return ret;
msm_host->registered = true;
}
return 0;
}
void msm_dsi_host_unregister(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
if (msm_host->registered) {
mipi_dsi_host_unregister(host);
host->dev = NULL;
host->ops = NULL;
msm_host->registered = false;
}
}
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
/* TODO: make sure dsi_cmd_mdp is idle.
* Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
* to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
* How to handle the old versions? Wait for mdp cmd done?
*/
/*
* mdss interrupt is generated in mdp core clock domain
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
cfg_hnd->ops->link_clk_set_rate(msm_host);
cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
dsi_set_tx_power_mode(0, msm_host);
msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
dsi_write(msm_host, REG_DSI_CTRL,
msm_host->dma_cmd_ctrl_restore |
DSI_CTRL_CMD_MODE_EN |
DSI_CTRL_ENABLE);
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
return 0;
}
void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
dsi_set_tx_power_mode(1, msm_host);
/* TODO: unvote for bus bandwidth */
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
}
int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
return dsi_cmds2buf_tx(msm_host, msg);
}
int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int data_byte, rx_byte, dlen, end;
int short_response, diff, pkt_size, ret = 0;
char cmd;
int rlen = msg->rx_len;
u8 *buf;
if (rlen <= 2) {
short_response = 1;
pkt_size = rlen;
rx_byte = 4;
} else {
short_response = 0;
data_byte = 10; /* first read */
if (rlen < data_byte)
pkt_size = rlen;
else
pkt_size = data_byte;
rx_byte = data_byte + 6; /* 4 header + 2 crc */
}
buf = msm_host->rx_buf;
end = 0;
while (!end) {
u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
struct mipi_dsi_msg max_pkt_size_msg = {
.channel = msg->channel,
.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
.tx_len = 2,
.tx_buf = tx,
};
DBG("rlen=%d pkt_size=%d rx_byte=%d",
rlen, pkt_size, rx_byte);
ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
if (ret < 2) {
pr_err("%s: Set max pkt size failed, %d\n",
__func__, ret);
return -EINVAL;
}
if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
/* Clear the RDBK_DATA registers */
dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
DSI_RDBK_DATA_CTRL_CLR);
wmb(); /* make sure the RDBK registers are cleared */
dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
wmb(); /* release cleared status before transfer */
}
ret = dsi_cmds2buf_tx(msm_host, msg);
if (ret < 0) {
pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
return ret;
} else if (ret < msg->tx_len) {
pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret);
return -ECOMM;
}
/*
* once cmd_dma_done interrupt received,
* return data from client is ready and stored
* at RDBK_DATA register already
* since rx fifo is 16 bytes, dcs header is kept at first loop,
* after that dcs header lost during shift into registers
*/
dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
if (dlen <= 0)
return 0;
if (short_response)
break;
if (rlen <= data_byte) {
diff = data_byte - rlen;
end = 1;
} else {
diff = 0;
rlen -= data_byte;
}
if (!end) {
dlen -= 2; /* 2 crc */
dlen -= diff;
buf += dlen; /* next start position */
data_byte = 14; /* NOT first read */
if (rlen < data_byte)
pkt_size += rlen;
else
pkt_size += data_byte;
DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
}
}
/*
* For single Long read, if the requested rlen < 10,
* we need to shift the start position of rx
* data buffer to skip the bytes which are not
* updated.
*/
if (pkt_size < 10 && !short_response)
buf = msm_host->rx_buf + (10 - rlen);
else
buf = msm_host->rx_buf;
cmd = buf[0];
switch (cmd) {
case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
ret = 0;
break;
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
ret = dsi_short_read1_resp(buf, msg);
break;
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
ret = dsi_short_read2_resp(buf, msg);
break;
case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
ret = dsi_long_read_resp(buf, msg);
break;
default:
pr_warn("%s:Invalid response cmd\n", __func__);
ret = 0;
}
return ret;
}
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
u32 len)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
dsi_write(msm_host, REG_DSI_DMA_LEN, len);
dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
/* Make sure trigger happens */
wmb();
}
void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
struct msm_dsi_phy *src_phy)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
msm_host->cphy_mode = src_phy->cphy_mode;
}
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
DBG("");
dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
/* Make sure fully reset */
wmb();
udelay(1000);
dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
udelay(100);
}
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req,
bool is_bonded_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
}
/* CPHY transmits 16 bits over 7 clock cycles
* "byte_clk" is in units of 16-bits (see dsi_calc_pclk),
* so multiply by 7 to get the "bitclk rate"
*/
if (msm_host->cphy_mode)
clk_req->bitclk_rate = msm_host->byte_clk_rate * 7;
else
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
}
void msm_dsi_host_enable_irq(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
enable_irq(msm_host->irq);
}
void msm_dsi_host_disable_irq(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
disable_irq(msm_host->irq);
}
int msm_dsi_host_enable(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
dsi_op_mode_config(msm_host,
!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
/* TODO: clock should be turned off for command mode,
* and only turned on before MDP START.
* This part of code should be enabled once mdp driver support it.
*/
/* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
* dsi_link_clk_disable(msm_host);
* pm_runtime_put(&msm_host->pdev->dev);
* }
*/
msm_host->enabled = true;
return 0;
}
int msm_dsi_host_disable(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
msm_host->enabled = false;
dsi_op_mode_config(msm_host,
!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
/* Since we have disabled INTF, the video engine won't stop so that
* the cmd engine will be blocked.
* Reset to disable video engine so that we can send off cmd.
*/
dsi_sw_reset(msm_host);
return 0;
}
static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
{
enum sfpb_ahb_arb_master_port_en en;
if (!msm_host->sfpb)
return;
en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
SFPB_GPREG_MASTER_PORT_EN__MASK,
SFPB_GPREG_MASTER_PORT_EN(en));
}
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings,
bool is_bonded_dsi, struct msm_dsi_phy *phy)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret = 0;
mutex_lock(&msm_host->dev_mutex);
if (msm_host->power_on) {
DBG("dsi host already on");
goto unlock_ret;
}
msm_host->byte_intf_clk_rate = msm_host->byte_clk_rate;
if (phy_shared_timings->byte_intf_clk_div_2)
msm_host->byte_intf_clk_rate /= 2;
msm_dsi_sfpb_config(msm_host, true);
ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators,
msm_host->supplies);
if (ret) {
pr_err("%s:Failed to enable vregs.ret=%d\n",
__func__, ret);
goto unlock_ret;
}
pm_runtime_get_sync(&msm_host->pdev->dev);
ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
if (!ret)
ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
goto fail_disable_reg;
}
ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
if (ret) {
pr_err("%s: failed to set pinctrl default state, %d\n",
__func__, ret);
goto fail_disable_clk;
}
dsi_timing_setup(msm_host, is_bonded_dsi);
dsi_sw_reset(msm_host);
dsi_ctrl_enable(msm_host, phy_shared_timings, phy);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 1);
msm_host->power_on = true;
mutex_unlock(&msm_host->dev_mutex);
return 0;
fail_disable_clk:
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
fail_disable_reg:
regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
msm_host->supplies);
unlock_ret:
mutex_unlock(&msm_host->dev_mutex);
return ret;
}
int msm_dsi_host_power_off(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
mutex_lock(&msm_host->dev_mutex);
if (!msm_host->power_on) {
DBG("dsi host already off");
goto unlock_ret;
}
dsi_ctrl_disable(msm_host);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 0);
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
msm_host->supplies);
msm_dsi_sfpb_config(msm_host, false);
DBG("-");
msm_host->power_on = false;
unlock_ret:
mutex_unlock(&msm_host->dev_mutex);
return 0;
}
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode);
msm_host->mode = NULL;
}
msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
if (!msm_host->mode) {
pr_err("%s: cannot duplicate mode\n", __func__);
return -ENOMEM;
}
return 0;
}
enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
struct drm_dsc_config *dsc = msm_host->dsc;
int pic_width = mode->hdisplay;
int pic_height = mode->vdisplay;
if (!msm_host->dsc)
return MODE_OK;
if (pic_width % dsc->slice_width) {
pr_err("DSI: pic_width %d has to be multiple of slice %d\n",
pic_width, dsc->slice_width);
return MODE_H_ILLEGAL;
}
if (pic_height % dsc->slice_height) {
pr_err("DSI: pic_height %d has to be multiple of slice %d\n",
pic_height, dsc->slice_height);
return MODE_V_ILLEGAL;
}
return MODE_OK;
}
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
{
return to_msm_dsi_host(host)->mode_flags;
}
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
pm_runtime_get_sync(&msm_host->pdev->dev);
msm_disp_snapshot_add_block(disp_state, msm_host->ctrl_size,
msm_host->ctrl_base, "dsi%d_ctrl", msm_host->id);
pm_runtime_put_sync(&msm_host->pdev->dev);
}
static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host)
{
u32 reg;
reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff);
/* draw checkered rectangle pattern */
dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL,
DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN);
/* use 24-bit RGB test pttern */
dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG,
DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) |
DSI_TPG_VIDEO_CONFIG_RGB);
reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN);
dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg);
DBG("Video test pattern setup done\n");
}
static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host)
{
u32 reg;
reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
/* initial value for test pattern */
dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff);
reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN);
dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg);
/* draw checkered rectangle pattern */
dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2,
DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN);
DBG("Cmd test pattern setup done\n");
}
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO);
u32 reg;
if (is_video_mode)
msm_dsi_host_video_test_pattern_setup(msm_host);
else
msm_dsi_host_cmd_test_pattern_setup(msm_host);
reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
/* enable the test pattern generator */
dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN));
/* for command mode need to trigger one frame from tpg */
if (!is_video_mode)
dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER,
DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
}
struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
return msm_host->dsc;
}
| linux-master | drivers/gpu/drm/msm/dsi/dsi_host.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include "dsi_phy.h"
#include "dsi.xml.h"
#include "dsi_phy_28nm_8960.xml.h"
/*
* DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
*
*
* +------+
* dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
* F * byte_clk | +------+
* | bit clock divider (F / 8)
* |
* | +------+
* o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
* | +------+ | (sets parent rate)
* | byte clock divider (F) |
* | |
* | o---> To esc RCG
* | (doesn't set parent rate)
* |
* | +------+
* o-----| DIV3 |----dsi0pll------o---> To dsi RCG
* +------+ | (sets parent rate)
* dsi clock divider (F * magic) |
* |
* o---> To pixel rcg
* (doesn't set parent rate)
*/
#define POLL_MAX_READS 8000
#define POLL_TIMEOUT_US 1
#define VCO_REF_CLK_RATE 27000000
#define VCO_MIN_RATE 600000000
#define VCO_MAX_RATE 1200000000
#define VCO_PREF_DIV_RATIO 27
struct pll_28nm_cached_state {
unsigned long vco_rate;
u8 postdiv3;
u8 postdiv2;
u8 postdiv1;
};
struct clk_bytediv {
struct clk_hw hw;
void __iomem *reg;
};
struct dsi_pll_28nm {
struct clk_hw clk_hw;
struct msm_dsi_phy *phy;
struct pll_28nm_cached_state cached_state;
};
#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
int nb_tries, int timeout_us)
{
bool pll_locked = false;
u32 val;
while (nb_tries--) {
val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY);
pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
if (pll_locked)
break;
udelay(timeout_us);
}
DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
return pll_locked;
}
/*
* Clock Callbacks
*/
static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
void __iomem *base = pll_28nm->phy->pll_base;
u32 val, temp, fb_divider;
DBG("rate=%lu, parent's=%lu", rate, parent_rate);
temp = rate / 10;
val = VCO_REF_CLK_RATE / 10;
fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
fb_divider = fb_divider / 2 - 1;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
fb_divider & 0xff);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val |= (fb_divider >> 8) & 0x07;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
val);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
val);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
0xf);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val |= 0x7 << 4;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
val);
return 0;
}
static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
POLL_TIMEOUT_US);
}
static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
void __iomem *base = pll_28nm->phy->pll_base;
unsigned long vco_rate;
u32 status, fb_divider, temp, ref_divider;
VERB("parent_rate=%lu", parent_rate);
status = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
fb_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
fb_divider &= 0xff;
temp = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
fb_divider = (temp << 8) | fb_divider;
fb_divider += 1;
ref_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
ref_divider &= 0x3f;
ref_divider += 1;
/* multiply by 2 */
vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
} else {
vco_rate = 0;
}
DBG("returning vco rate = %lu", vco_rate);
return vco_rate;
}
static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
struct device *dev = &pll_28nm->phy->pdev->dev;
void __iomem *base = pll_28nm->phy->pll_base;
bool locked;
unsigned int bit_div, byte_div;
int max_reads = 1000, timeout_us = 100;
u32 val;
DBG("id=%d", pll_28nm->phy->id);
if (unlikely(pll_28nm->phy->pll_on))
return 0;
/*
* before enabling the PLL, configure the bit clock divider since we
* don't expose it as a clock to the outside world
* 1: read back the byte clock divider that should already be set
* 2: divide by 8 to get bit clock divider
* 3: write it to POSTDIV1
*/
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
byte_div = val + 1;
bit_div = byte_div / 8;
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val &= ~0xf;
val |= (bit_div - 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
/* enable the PLL */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked)) {
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
return -EINVAL;
}
DBG("DSI PLL lock success");
pll_28nm->phy->pll_on = true;
return 0;
}
static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
DBG("id=%d", pll_28nm->phy->id);
if (unlikely(!pll_28nm->phy->pll_on))
return;
dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
pll_28nm->phy->pll_on = false;
}
static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
if (rate < pll_28nm->phy->cfg->min_pll_rate)
return pll_28nm->phy->cfg->min_pll_rate;
else if (rate > pll_28nm->phy->cfg->max_pll_rate)
return pll_28nm->phy->cfg->max_pll_rate;
else
return rate;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
.round_rate = dsi_pll_28nm_clk_round_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare,
.unprepare = dsi_pll_28nm_vco_unprepare,
.is_enabled = dsi_pll_28nm_clk_is_enabled,
};
/*
* Custom byte clock divier clk_ops
*
* This clock is the entry point to configuring the PLL. The user (dsi host)
* will set this clock's rate to the desired byte clock rate. The VCO lock
* frequency is a multiple of the byte clock rate. The multiplication factor
* (shown as F in the diagram above) is a function of the byte clock rate.
*
* This custom divider clock ensures that its parent (VCO) is set to the
* desired rate, and that the byte clock postdivider (POSTDIV2) is configured
* accordingly
*/
#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_bytediv *bytediv = to_clk_bytediv(hw);
unsigned int div;
div = dsi_phy_read(bytediv->reg) & 0xff;
return parent_rate / (div + 1);
}
/* find multiplication factor(wrt byte clock) at which the VCO should be set */
static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
{
unsigned long bit_mhz;
/* convert to bit clock in Mhz */
bit_mhz = (byte_clk_rate * 8) / 1000000;
if (bit_mhz < 125)
return 64;
else if (bit_mhz < 250)
return 32;
else if (bit_mhz < 600)
return 16;
else
return 8;
}
static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
unsigned long best_parent;
unsigned int factor;
factor = get_vco_mul_factor(rate);
best_parent = rate * factor;
*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
return *prate / factor;
}
static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_bytediv *bytediv = to_clk_bytediv(hw);
u32 val;
unsigned int factor;
factor = get_vco_mul_factor(rate);
val = dsi_phy_read(bytediv->reg);
val |= (factor - 1) & 0xff;
dsi_phy_write(bytediv->reg, val);
return 0;
}
/* Our special byte clock divider ops */
static const struct clk_ops clk_bytediv_ops = {
.round_rate = clk_bytediv_round_rate,
.set_rate = clk_bytediv_set_rate,
.recalc_rate = clk_bytediv_recalc_rate,
};
/*
* PLL Callbacks
*/
static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
void __iomem *base = pll_28nm->phy->pll_base;
cached_state->postdiv3 =
dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
cached_state->postdiv2 =
dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
cached_state->postdiv1 =
dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
}
static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
void __iomem *base = pll_28nm->phy->pll_base;
int ret;
ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
cached_state->postdiv2);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
cached_state->postdiv1);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->phy->pdev->dev;
struct clk_hw *hw;
struct clk_bytediv *bytediv;
struct clk_init_data bytediv_init = { };
int ret;
DBG("%d", pll_28nm->phy->id);
bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
if (!bytediv)
return -ENOMEM;
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
vco_init.name = clk_name;
pll_28nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
if (ret)
return ret;
/* prepare and register bytediv */
bytediv->hw.init = &bytediv_init;
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1);
bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops;
bytediv_init.flags = CLK_SET_RATE_PARENT;
bytediv_init.parent_hws = (const struct clk_hw*[]){
&pll_28nm->clk_hw,
};
bytediv_init.num_parents = 1;
/* DIV2 */
ret = devm_clk_hw_register(dev, &bytediv->hw);
if (ret)
return ret;
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1);
/* DIV3 */
hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
return 0;
}
static int dsi_pll_28nm_8960_init(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
struct dsi_pll_28nm *pll_28nm;
int ret;
if (!pdev)
return -ENODEV;
pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
if (!pll_28nm)
return -ENOMEM;
pll_28nm->phy = phy;
ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ret;
}
phy->vco_hw = &pll_28nm->clk_hw;
return 0;
}
static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing)
{
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
0x100);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
}
static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
u32 status;
int i = 5000;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
usleep_range(5000, 6000);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
do {
status = dsi_phy_read(base +
REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
break;
udelay(1);
} while (--i > 0);
}
static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
int i;
for (i = 0; i < 4; i++) {
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
0x01);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
0x66);
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base;
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n",
__func__);
return -EINVAL;
}
dsi_28nm_phy_regulator_init(phy);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
/* strength control */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
/* phy ctrl */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
dsi_28nm_phy_regulator_ctrl(phy);
dsi_28nm_phy_calibration(phy);
dsi_28nm_phy_lane_config(phy);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
dsi_28nm_dphy_set_timing(phy, timing);
return 0;
}
static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
/*
* Wait for the registers writes to complete in order to
* ensure that the phy is completely disabled
*/
wmb();
}
static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = {
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.has_phy_regulator = true,
.regulator_data = dsi_phy_28nm_8960_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
.pll_init = dsi_pll_28nm_8960_init,
.save_pll_state = dsi_28nm_pll_save_state,
.restore_pll_state = dsi_28nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0x4700300, 0x5800300 },
.num_dsi_phy = 2,
};
| linux-master | drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/phy/phy.h>
#include "dsi_phy.h"
#define S_DIV_ROUND_UP(n, d) \
(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
s32 min_result, bool even)
{
s32 v;
v = (tmax - tmin) * percent;
v = S_DIV_ROUND_UP(v, 100) + tmin;
if (even && (v & 0x1))
return max_t(s32, min_result, v - 1);
else
return max_t(s32, min_result, v);
}
static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
s32 ui, s32 coeff, s32 pcnt)
{
s32 tmax, tmin, clk_z;
s32 temp;
/* reset */
temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
tmin = S_DIV_ROUND_UP(temp, ui) - 2;
if (tmin > 255) {
tmax = 511;
clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
} else {
tmax = 255;
clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
}
/* adjust */
temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
timing->clk_zero = clk_z + 8 - temp;
}
int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req)
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, lpx;
s32 tmax, tmin;
s32 pcnt0 = 10;
s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
s32 pcnt2 = 10;
s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
s32 coeff = 1000; /* Precision, should avoid overflow */
s32 temp;
if (!bit_rate || !esc_rate)
return -EINVAL;
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
temp = lpx / ui;
if (temp & 0x1)
timing->hs_rqst = temp;
else
timing->hs_rqst = max_t(s32, 0, temp - 2);
/* Calculate clk_zero after clk_prepare and hs_rqst */
dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = S_DIV_ROUND_UP(temp, ui) - 2;
tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
temp = 85 * coeff + 6 * ui;
tmax = S_DIV_ROUND_UP(temp, ui) - 2;
temp = 40 * coeff + 4 * ui;
tmin = S_DIV_ROUND_UP(temp, ui) - 2;
timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
tmax = 255;
temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
temp = 145 * coeff + 10 * ui - temp;
tmin = S_DIV_ROUND_UP(temp, ui) - 2;
timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = S_DIV_ROUND_UP(temp, ui) - 2;
temp = 60 * coeff + 4 * ui;
tmin = DIV_ROUND_UP(temp, ui) - 2;
timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
tmax = 255;
tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
tmax = 63;
temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
temp = 60 * coeff + 52 * ui - 24 * ui - temp;
tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
false);
tmax = 63;
temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
temp += 8 * ui + lpx;
tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
if (tmin > tmax) {
temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre = temp >> 1;
timing->shared_timings.clk_pre_inc_by_2 = true;
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre_inc_by_2 = false;
}
timing->ta_go = 3;
timing->ta_sure = 0;
timing->ta_get = 4;
DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
timing->clk_trail, timing->clk_prepare, timing->hs_exit,
timing->hs_zero, timing->hs_prepare, timing->hs_trail,
timing->hs_rqst);
return 0;
}
int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req)
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
s32 pcnt2 = 10;
s32 pcnt3 = 30;
s32 pcnt4 = 10;
s32 pcnt5 = 2;
s32 coeff = 1000; /* Precision, should avoid overflow */
s32 hb_en, hb_en_ckln, pd_ckln, pd;
s32 val, val_ckln;
s32 temp;
if (!bit_rate || !esc_rate)
return -EINVAL;
timing->hs_halfbyte_en = 0;
hb_en = 0;
timing->hs_halfbyte_en_ckln = 0;
hb_en_ckln = 0;
timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
pd_ckln = timing->hs_prep_dly_ckln;
timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
pd = timing->hs_prep_dly;
val = (hb_en << 2) + (pd << 1);
val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (95 * coeff - val_ckln * ui) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
tmax = (tmin > 255) ? 511 : 255;
timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp + 3 * ui) / ui_x8;
timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
tmax = 255;
timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp + 3 * ui) / ui_x8;
timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
tmax = 255;
timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
temp = 60 * coeff + 52 * ui - 43 * ui;
tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 63;
timing->shared_timings.clk_post =
linear_inter(tmax, tmin, pcnt2, 0, false);
temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
(((timing->hs_rqst_ckln << 3) + 8) * ui);
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 63;
if (tmin > tmax) {
temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre = temp >> 1;
timing->shared_timings.clk_pre_inc_by_2 = 1;
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
timing->ta_sure = 0;
timing->ta_get = 4;
DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
timing->clk_trail, timing->clk_prepare, timing->hs_exit,
timing->hs_zero, timing->hs_prepare, timing->hs_trail,
timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
timing->hs_prep_dly_ckln);
return 0;
}
int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req)
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
s32 pcnt2 = 10;
s32 pcnt3 = 30;
s32 pcnt4 = 10;
s32 pcnt5 = 2;
s32 coeff = 1000; /* Precision, should avoid overflow */
s32 hb_en, hb_en_ckln;
s32 temp;
if (!bit_rate || !esc_rate)
return -EINVAL;
timing->hs_halfbyte_en = 0;
hb_en = 0;
timing->hs_halfbyte_en_ckln = 0;
hb_en_ckln = 0;
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (95 * coeff) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = (tmin > 255) ? 511 : 255;
timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp + 3 * ui) / ui_x8;
timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (85 * coeff + 6 * ui) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 255;
timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp / ui_x8) - 1;
timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
tmax = 255;
timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
temp = 60 * coeff + 52 * ui - 43 * ui;
tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 63;
timing->shared_timings.clk_post =
linear_inter(tmax, tmin, pcnt2, 0, false);
temp = 8 * ui + (timing->clk_prepare << 3) * ui;
temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
(((timing->hs_rqst_ckln << 3) + 8) * ui);
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 63;
if (tmin > tmax) {
temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre = temp >> 1;
timing->shared_timings.clk_pre_inc_by_2 = 1;
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->shared_timings.byte_intf_clk_div_2 = true;
timing->ta_go = 3;
timing->ta_sure = 0;
timing->ta_get = 4;
DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
timing->clk_trail, timing->clk_prepare, timing->hs_exit,
timing->hs_zero, timing->hs_prepare, timing->hs_trail,
timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
timing->hs_prep_dly_ckln);
return 0;
}
int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req)
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt_clk_prep = 50;
s32 pcnt_clk_zero = 2;
s32 pcnt_clk_trail = 30;
s32 pcnt_hs_prep = 50;
s32 pcnt_hs_zero = 10;
s32 pcnt_hs_trail = 30;
s32 pcnt_hs_exit = 10;
s32 coeff = 1000; /* Precision, should avoid overflow */
s32 hb_en;
s32 temp;
if (!bit_rate || !esc_rate)
return -EINVAL;
hb_en = 0;
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
/* TODO: verify these calculations against latest downstream driver
* everything except clk_post/clk_pre uses calculations from v3 based
* on the downstream driver having the same calculations for v3 and v4
*/
temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (95 * coeff) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = (tmin > 255) ? 511 : 255;
timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp + 3 * ui) / ui_x8;
timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (85 * coeff + 6 * ui) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 255;
timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp / ui_x8) - 1;
timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
tmax = 255;
timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
/* recommended min
* = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
*/
temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
tmax = 255;
timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
/* recommended min
* val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
* val2 = (16 * bit_clk_ns)
* final = roundup(val1/val2, 0) - 1
*/
temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
tmax = 255;
timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
timing->shared_timings.byte_intf_clk_div_2 = true;
DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
return 0;
}
int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req)
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x7;
s32 tmax, tmin;
s32 coeff = 1000; /* Precision, should avoid overflow */
s32 temp;
if (!bit_rate || !esc_rate)
return -EINVAL;
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x7 = ui * 7;
temp = S_DIV_ROUND_UP(38 * coeff, ui_x7);
tmin = max_t(s32, temp, 0);
temp = (95 * coeff) / ui_x7;
tmax = max_t(s32, temp, 0);
timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false);
tmin = DIV_ROUND_UP(50 * coeff, ui_x7);
tmax = 255;
timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false);
tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1;
tmax = 255;
timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false);
tmin = 1;
tmax = 32;
timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false);
tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1);
tmax = 64;
timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false);
DBG("%d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->clk_prepare, timing->hs_exit, timing->hs_rqst);
return 0;
}
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
{
struct device *dev = &phy->pdev->dev;
int ret;
pm_runtime_get_sync(dev);
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
pm_runtime_put_sync(dev);
}
return ret;
}
static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
{
clk_disable_unprepare(phy->ahb_clk);
pm_runtime_put(&phy->pdev->dev);
}
static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
.data = &dsi_phy_28nm_hpm_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
.data = &dsi_phy_28nm_hpm_famb_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-lp",
.data = &dsi_phy_28nm_lp_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-8226",
.data = &dsi_phy_28nm_8226_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
{ .compatible = "qcom,dsi-phy-20nm",
.data = &dsi_phy_20nm_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
{ .compatible = "qcom,dsi-phy-28nm-8960",
.data = &dsi_phy_28nm_8960_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
{ .compatible = "qcom,dsi-phy-14nm",
.data = &dsi_phy_14nm_cfgs },
{ .compatible = "qcom,dsi-phy-14nm-2290",
.data = &dsi_phy_14nm_2290_cfgs },
{ .compatible = "qcom,dsi-phy-14nm-660",
.data = &dsi_phy_14nm_660_cfgs },
{ .compatible = "qcom,dsi-phy-14nm-8953",
.data = &dsi_phy_14nm_8953_cfgs },
{ .compatible = "qcom,sm6125-dsi-phy-14nm",
.data = &dsi_phy_14nm_2290_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
.data = &dsi_phy_10nm_cfgs },
{ .compatible = "qcom,dsi-phy-10nm-8998",
.data = &dsi_phy_10nm_8998_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
{ .compatible = "qcom,dsi-phy-7nm",
.data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs },
{ .compatible = "qcom,sc7280-dsi-phy-7nm",
.data = &dsi_phy_7nm_7280_cfgs },
{ .compatible = "qcom,sm6375-dsi-phy-7nm",
.data = &dsi_phy_7nm_6375_cfgs },
{ .compatible = "qcom,sm8350-dsi-phy-5nm",
.data = &dsi_phy_5nm_8350_cfgs },
{ .compatible = "qcom,sm8450-dsi-phy-5nm",
.data = &dsi_phy_5nm_8450_cfgs },
{ .compatible = "qcom,sm8550-dsi-phy-4nm",
.data = &dsi_phy_4nm_8550_cfgs },
#endif
{}
};
/*
* Currently, we only support one SoC for each PHY type. When we have multiple
* SoCs for the same PHY, we can try to make the index searching a bit more
* clever.
*/
static int dsi_phy_get_id(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
const struct msm_dsi_phy_cfg *cfg = phy->cfg;
struct resource *res;
int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
if (!res)
return -EINVAL;
for (i = 0; i < cfg->num_dsi_phy; i++) {
if (cfg->io_start[i] == res->start)
return i;
}
return -EINVAL;
}
static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
struct device *dev = &pdev->dev;
u32 phy_type;
int ret;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->provided_clocks = devm_kzalloc(dev,
struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
GFP_KERNEL);
if (!phy->provided_clocks)
return -ENOMEM;
phy->provided_clocks->num = NUM_PROVIDED_CLKS;
phy->cfg = of_device_get_match_data(&pdev->dev);
if (!phy->cfg)
return -ENODEV;
phy->pdev = pdev;
phy->id = dsi_phy_get_id(phy);
if (phy->id < 0)
return dev_err_probe(dev, phy->id,
"Couldn't identify PHY index\n");
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode");
if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
if (IS_ERR(phy->base))
return dev_err_probe(dev, PTR_ERR(phy->base),
"Failed to map phy base\n");
phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
if (IS_ERR(phy->pll_base))
return dev_err_probe(dev, PTR_ERR(phy->pll_base),
"Failed to map pll base\n");
if (phy->cfg->has_phy_lane) {
phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
if (IS_ERR(phy->lane_base))
return dev_err_probe(dev, PTR_ERR(phy->lane_base),
"Failed to map phy lane base\n");
}
if (phy->cfg->has_phy_regulator) {
phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
if (IS_ERR(phy->reg_base))
return dev_err_probe(dev, PTR_ERR(phy->reg_base),
"Failed to map phy regulator base\n");
}
if (phy->cfg->ops.parse_dt_properties) {
ret = phy->cfg->ops.parse_dt_properties(phy);
if (ret)
return ret;
}
ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators,
phy->cfg->regulator_data,
&phy->supplies);
if (ret)
return ret;
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk))
return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
"Unable to get ahb clk\n");
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
ret = dsi_phy_enable_resource(phy);
if (ret)
return ret;
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
if (ret)
return dev_err_probe(dev, ret,
"PLL init failed; need separate clk driver\n");
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
phy->provided_clocks);
if (ret)
return dev_err_probe(dev, ret,
"Failed to register clk provider\n");
dsi_phy_disable_resource(phy);
platform_set_drvdata(pdev, phy);
return 0;
}
static struct platform_driver dsi_phy_platform_driver = {
.probe = dsi_phy_driver_probe,
.driver = {
.name = "msm_dsi_phy",
.of_match_table = dsi_phy_dt_match,
},
};
void __init msm_dsi_phy_driver_register(void)
{
platform_driver_register(&dsi_phy_platform_driver);
}
void __exit msm_dsi_phy_driver_unregister(void)
{
platform_driver_unregister(&dsi_phy_platform_driver);
}
int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req,
struct msm_dsi_phy_shared_timings *shared_timings)
{
struct device *dev;
int ret;
if (!phy || !phy->cfg->ops.enable)
return -EINVAL;
dev = &phy->pdev->dev;
ret = dsi_phy_enable_resource(phy);
if (ret) {
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
__func__, ret);
goto res_en_fail;
}
ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies);
if (ret) {
DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
goto reg_en_fail;
}
ret = phy->cfg->ops.enable(phy, clk_req);
if (ret) {
DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
goto phy_en_fail;
}
memcpy(shared_timings, &phy->timing.shared_timings,
sizeof(*shared_timings));
/*
* Resetting DSI PHY silently changes its PLL registers to reset status,
* which will confuse clock driver and result in wrong output rate of
* link clocks. Restore PLL status if its PLL is being used as clock
* source.
*/
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
ret = msm_dsi_phy_pll_restore_state(phy);
if (ret) {
DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
__func__, ret);
goto pll_restor_fail;
}
}
return 0;
pll_restor_fail:
if (phy->cfg->ops.disable)
phy->cfg->ops.disable(phy);
phy_en_fail:
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
dsi_phy_disable_resource(phy);
res_en_fail:
return ret;
}
void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
{
if (!phy || !phy->cfg->ops.disable)
return;
phy->cfg->ops.disable(phy);
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
dsi_phy_disable_resource(phy);
}
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc)
{
if (phy)
phy->usecase = uc;
}
/* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */
bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
{
if (!phy || !phy->cfg->ops.set_continuous_clock)
return false;
return phy->cfg->ops.set_continuous_clock(phy, enable);
}
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
{
if (phy->cfg->ops.save_pll_state) {
phy->cfg->ops.save_pll_state(phy);
phy->state_saved = true;
}
}
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
{
int ret;
if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
ret = phy->cfg->ops.restore_pll_state(phy);
if (ret)
return ret;
phy->state_saved = false;
}
return 0;
}
void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy)
{
msm_disp_snapshot_add_block(disp_state,
phy->base_size, phy->base,
"dsi%d_phy", phy->id);
/* Do not try accessing PLL registers if it is switched off */
if (phy->pll_on)
msm_disp_snapshot_add_block(disp_state,
phy->pll_size, phy->pll_base,
"dsi%d_pll", phy->id);
if (phy->lane_base)
msm_disp_snapshot_add_block(disp_state,
phy->lane_size, phy->lane_base,
"dsi%d_lane", phy->id);
if (phy->reg_base)
msm_disp_snapshot_add_block(disp_state,
phy->reg_size, phy->reg_base,
"dsi%d_reg", phy->id);
}
| linux-master | drivers/gpu/drm/msm/dsi/phy/dsi_phy.c |
/*
* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018, The Linux Foundation
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
#include "dsi_phy.h"
#include "dsi.xml.h"
#include "dsi_phy_7nm.xml.h"
/*
* DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
*
* dsi0_pll_out_div_clk dsi0_pll_bit_clk
* | |
* | |
* +---------+ | +----------+ | +----+
* dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
* | | |
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
* | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
* +-----+ | |
* | |dsiclk_sel
* |
* dsi0_pll_post_out_div_clk
*/
#define VCO_REF_CLK_RATE 19200000
#define FRAC_BITS 18
/* Hardware is pre V4.1 */
#define DSI_PHY_7NM_QUIRK_PRE_V4_1 BIT(0)
/* Hardware is V4.1 */
#define DSI_PHY_7NM_QUIRK_V4_1 BIT(1)
/* Hardware is V4.2 */
#define DSI_PHY_7NM_QUIRK_V4_2 BIT(2)
/* Hardware is V4.3 */
#define DSI_PHY_7NM_QUIRK_V4_3 BIT(3)
/* Hardware is V5.2 */
#define DSI_PHY_7NM_QUIRK_V5_2 BIT(4)
struct dsi_pll_config {
bool enable_ssc;
bool ssc_center;
u32 ssc_freq;
u32 ssc_offset;
u32 ssc_adj_per;
/* out */
u32 decimal_div_start;
u32 frac_div_start;
u32 pll_clock_inverters;
u32 ssc_stepsize;
u32 ssc_div_per;
};
struct pll_7nm_cached_state {
unsigned long vco_rate;
u8 bit_clk_div;
u8 pix_clk_div;
u8 pll_out_div;
u8 pll_mux;
};
struct dsi_pll_7nm {
struct clk_hw clk_hw;
struct msm_dsi_phy *phy;
u64 vco_current_rate;
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
spinlock_t postdiv_lock;
struct pll_7nm_cached_state cached_state;
struct dsi_pll_7nm *slave;
};
#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, clk_hw)
/*
* Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
static void dsi_pll_setup_config(struct dsi_pll_config *config)
{
config->ssc_freq = 31500;
config->ssc_offset = 4800;
config->ssc_adj_per = 2;
/* TODO: ssc enable */
config->enable_ssc = false;
config->ssc_center = 0;
}
static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
{
u64 fref = VCO_REF_CLK_RATE;
u64 pll_freq;
u64 divider;
u64 dec, dec_multiple;
u32 frac;
u64 multiplier;
pll_freq = pll->vco_current_rate;
divider = fref * 2;
multiplier = 1 << FRAC_BITS;
dec_multiple = div_u64(pll_freq * multiplier, divider);
dec = div_u64_rem(dec_multiple, multiplier, &frac);
if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)
config->pll_clock_inverters = 0x28;
else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (pll_freq <= 1300000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
config->pll_clock_inverters = 0x20;
else if (pll_freq <= 4000000000ULL)
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
} else {
if (pll_freq <= 1000000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
config->pll_clock_inverters = 0x20;
else if (pll_freq <= 3020000000ULL)
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
}
config->decimal_div_start = dec;
config->frac_div_start = frac;
}
#define SSC_CENTER BIT(0)
#define SSC_EN BIT(1)
static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
{
u32 ssc_per;
u32 ssc_mod;
u64 ssc_step_size;
u64 frac;
if (!config->enable_ssc) {
DBG("SSC not enabled\n");
return;
}
ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
ssc_per -= ssc_mod;
frac = config->frac_div_start;
ssc_step_size = config->decimal_div_start;
ssc_step_size *= (1 << FRAC_BITS);
ssc_step_size += frac;
ssc_step_size *= config->ssc_offset;
ssc_step_size *= (config->ssc_adj_per + 1);
ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
config->ssc_div_per = ssc_per;
config->ssc_stepsize = ssc_step_size;
pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
config->decimal_div_start, frac, FRAC_BITS);
pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
}
static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
{
void __iomem *base = pll->phy->pll_base;
if (config->enable_ssc) {
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
{
void __iomem *base = pll->phy->pll_base;
u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1))
if (pll->vco_current_rate >= 3100000000ULL)
analog_controls_five_1 = 0x03;
if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
if (pll->vco_current_rate < 1520000000ULL)
vco_config_1 = 0x08;
else if (pll->vco_current_rate < 2990000000ULL)
vco_config_1 = 0x01;
}
if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_2) ||
(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3)) {
if (pll->vco_current_rate < 1520000000ULL)
vco_config_1 = 0x08;
else if (pll->vco_current_rate >= 2990000000ULL)
vco_config_1 = 0x01;
}
if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (pll->vco_current_rate < 1557000000ULL)
vco_config_1 = 0x08;
else
vco_config_1 = 0x01;
}
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
analog_controls_five_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) ? 0x3f : 0x22);
if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) {
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
if (pll->slave)
dsi_phy_write(pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
}
}
static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
{
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,
config->decimal_div_start);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
(config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
(config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1,
pll->phy->cphy_mode ? 0x00 : 0x10);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,
config->pll_clock_inverters);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
struct dsi_pll_config config;
DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->phy->id, rate,
parent_rate);
pll_7nm->vco_current_rate = rate;
dsi_pll_setup_config(&config);
dsi_pll_calc_dec_frac(pll_7nm, &config);
dsi_pll_calc_ssc(pll_7nm, &config);
dsi_pll_commit(pll_7nm, &config);
dsi_pll_config_hzindep_reg(pll_7nm);
dsi_pll_ssc_commit(pll_7nm, &config);
/* flush, ensure all register writes are done*/
wmb();
return 0;
}
static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
{
int rc;
u32 status = 0;
u32 const delay_us = 100;
u32 const timeout_us = 5000;
rc = readl_poll_timeout_atomic(pll->phy->pll_base +
REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
status,
((status & BIT(0)) > 0),
delay_us,
timeout_us);
if (rc)
pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
pll->phy->id, status);
return rc;
}
static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
{
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
ndelay(250);
}
static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
{
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
ndelay(250);
}
static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
{
u32 data;
data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
}
static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
{
u32 data;
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
data | BIT(5) | BIT(4));
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
{
/*
* Reset the PHY digital domain. This would be needed when
* coming out of a CX or analog rail power collapse while
* ensuring that the pads maintain LP00 or LP11 state
*/
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
wmb(); /* Ensure that the reset is deasserted */
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
wmb(); /* Ensure that the reset is deasserted */
}
static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
int rc;
dsi_pll_enable_pll_bias(pll_7nm);
if (pll_7nm->slave)
dsi_pll_enable_pll_bias(pll_7nm->slave);
/* Start PLL */
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
/*
* ensure all PLL configurations are written prior to checking
* for PLL lock.
*/
wmb();
/* Check for PLL lock */
rc = dsi_pll_7nm_lock_status(pll_7nm);
if (rc) {
pr_err("PLL(%d) lock failed\n", pll_7nm->phy->id);
goto error;
}
pll_7nm->phy->pll_on = true;
/*
* assert power on reset for PHY digital in case the PLL is
* enabled after CX of analog domain power collapse. This needs
* to be done before enabling the global clk.
*/
dsi_pll_phy_dig_reset(pll_7nm);
if (pll_7nm->slave)
dsi_pll_phy_dig_reset(pll_7nm->slave);
dsi_pll_enable_global_clk(pll_7nm);
if (pll_7nm->slave)
dsi_pll_enable_global_clk(pll_7nm->slave);
error:
return rc;
}
static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
{
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
dsi_pll_disable_pll_bias(pll);
}
static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
/*
* To avoid any stray glitches while abruptly powering down the PLL
* make sure to gate the clock using the clock enable bit before
* powering down the PLL
*/
dsi_pll_disable_global_clk(pll_7nm);
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
dsi_pll_disable_sub(pll_7nm);
if (pll_7nm->slave) {
dsi_pll_disable_global_clk(pll_7nm->slave);
dsi_pll_disable_sub(pll_7nm->slave);
}
/* flush, ensure all register writes are done */
wmb();
pll_7nm->phy->pll_on = false;
}
static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
void __iomem *base = pll_7nm->phy->pll_base;
u64 ref_clk = VCO_REF_CLK_RATE;
u64 vco_rate = 0x0;
u64 multiplier;
u32 frac;
u32 dec;
u64 pll_freq, tmp64;
dec = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
dec &= 0xff;
frac = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
0xff) << 8);
frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
0x3) << 16);
/*
* TODO:
* 1. Assumes prescaler is disabled
*/
multiplier = 1 << FRAC_BITS;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
pll_7nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
return (unsigned long)vco_rate;
}
static long dsi_pll_7nm_clk_round_rate(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
if (rate < pll_7nm->phy->cfg->min_pll_rate)
return pll_7nm->phy->cfg->min_pll_rate;
else if (rate > pll_7nm->phy->cfg->max_pll_rate)
return pll_7nm->phy->cfg->max_pll_rate;
else
return rate;
}
static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
.round_rate = dsi_pll_7nm_clk_round_rate,
.set_rate = dsi_pll_7nm_vco_set_rate,
.recalc_rate = dsi_pll_7nm_vco_recalc_rate,
.prepare = dsi_pll_7nm_vco_prepare,
.unprepare = dsi_pll_7nm_vco_unprepare,
};
/*
* PLL Callbacks
*/
static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
void __iomem *phy_base = pll_7nm->phy->base;
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
cached->pll_mux = cmn_clk_cfg1 & 0x3;
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
cached->pix_clk_div, cached->pll_mux);
}
static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
void __iomem *phy_base = pll_7nm->phy->base;
u32 val;
int ret;
val = dsi_phy_read(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
val &= ~0x3;
val |= cached->pll_out_div;
dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
val |= cached->pll_mux;
dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
VCO_REF_CLK_RATE);
if (ret) {
DRM_DEV_ERROR(&pll_7nm->phy->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
DBG("DSI PLL%d", pll_7nm->phy->id);
return 0;
}
static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->phy->id);
switch (phy->usecase) {
case MSM_DSI_PHY_STANDALONE:
break;
case MSM_DSI_PHY_MASTER:
pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX];
break;
case MSM_DSI_PHY_SLAVE:
data = 0x1; /* external PLL */
break;
default:
return -EINVAL;
}
/* set PLL src */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
return 0;
}
/*
* The post dividers and mux clocks are created using the standard divider and
* mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
* state to follow the master PLL's divider/mux state. Therefore, we don't
* require special clock ops that also configure the slave PLL registers
*/
static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
{
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_7nm_vco,
};
struct device *dev = &pll_7nm->phy->pdev->dev;
struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent;
int ret;
DBG("DSI%d", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id);
pll_7nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw);
if (ret)
return ret;
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_7nm->clk_hw, CLK_SET_RATE_PARENT,
pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
if (IS_ERR(pll_out_div)) {
ret = PTR_ERR(pll_out_div);
goto fail;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
pll_out_div, CLK_SET_RATE_PARENT,
pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
if (IS_ERR(pll_bit)) {
ret = PTR_ERR(pll_bit);
goto fail;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
pll_bit, CLK_SET_RATE_PARENT, 1,
pll_7nm->phy->cphy_mode ? 7 : 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, pll_bit, 0, 1, 2);
if (IS_ERR(pll_by_2_bit)) {
ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
if (pll_7nm->phy->cphy_mode)
pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
dev, clk_name, pll_out_div, 0, 2, 7);
else
pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
dev, clk_name, pll_out_div, 0, 1, 4);
if (IS_ERR(pll_post_out_div)) {
ret = PTR_ERR(pll_post_out_div);
goto fail;
}
/* in CPHY mode, pclk_mux will always have post_out_div as parent
* don't register a pclk_mux clock and just use post_out_div instead
*/
if (pll_7nm->phy->cphy_mode) {
u32 data;
data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
phy_pll_out_dsi_parent = pll_post_out_div;
} else {
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
((const struct clk_hw *[]){
pll_bit,
pll_by_2_bit,
}), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
0, 1, 0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
phy_pll_out_dsi_parent = hw;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
phy_pll_out_dsi_parent, 0,
pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
return 0;
fail:
return ret;
}
static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
struct dsi_pll_7nm *pll_7nm;
int ret;
pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
if (!pll_7nm)
return -ENOMEM;
DBG("DSI PLL%d", phy->id);
pll_7nm_list[phy->id] = pll_7nm;
spin_lock_init(&pll_7nm->postdiv_lock);
pll_7nm->phy = phy;
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ret;
}
phy->vco_hw = &pll_7nm->clk_hw;
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy);
return 0;
}
static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
u32 data = 0;
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
mb(); /* make sure read happened */
return (data & BIT(0));
}
static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
{
void __iomem *lane_base = phy->lane_base;
int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
/*
* LPRX and CDRX need to enabled only for physical data lane
* corresponding to the logical data lane 0
*/
if (enable)
dsi_phy_write(lane_base +
REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
else
dsi_phy_write(lane_base +
REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
}
static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
{
int i;
const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 };
const u8 *tx_dctrl = tx_dctrl_0;
void __iomem *lane_base = phy->lane_base;
if (!(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1))
tx_dctrl = tx_dctrl_1;
/* Strength ctrl settings */
for (i = 0; i < 5; i++) {
/*
* Disable LPRX and CDRX for all lanes. And later on, it will
* be only enabled for the physical data lane corresponding
* to the logical data lane 0
*/
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
}
dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
/* other settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
}
}
static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req)
{
int ret;
u32 status;
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
u32 glbl_pemph_ctrl_0;
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
u32 data;
DBG("");
if (phy->cphy_mode)
ret = msm_dsi_cphy_timing_calc_v4(timing, clk_req);
else
ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: PHY timing calculation failed\n", __func__);
return -EINVAL;
}
if (dsi_phy_hw_v4_0_is_pll_on(phy))
pr_warn("PLL turned on before configuring PHY\n");
/* Request for REFGEN READY */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
dsi_phy_write(phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10, 0x1);
udelay(500);
}
/* wait for REFGEN READY */
ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
status, (status & BIT(0)),
delay_us, timeout_us);
if (ret) {
pr_err("Ref gen not ready. Aborting\n");
return -EINVAL;
}
/* TODO: CPHY enable path (this is for DPHY only) */
/* Alter PHY configurations if data rate less than 1.5GHZ*/
less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
glbl_str_swi_cal_sel_ctrl = 0x00;
if (phy->cphy_mode) {
vreg_ctrl_0 = 0x51;
vreg_ctrl_1 = 0x55;
glbl_hstx_str_ctrl_0 = 0x00;
glbl_pemph_ctrl_0 = 0x11;
lane_ctrl0 = 0x17;
} else {
vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
vreg_ctrl_1 = 0x5c;
glbl_hstx_str_ctrl_0 = 0x88;
glbl_pemph_ctrl_0 = 0x00;
lane_ctrl0 = 0x1f;
}
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (phy->cphy_mode) {
vreg_ctrl_0 = 0x45;
vreg_ctrl_1 = 0x45;
glbl_rescode_top_ctrl = 0x00;
glbl_rescode_bot_ctrl = 0x00;
} else {
vreg_ctrl_0 = 0x44;
vreg_ctrl_1 = 0x19;
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x03;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3c;
}
} else if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3)) {
if (phy->cphy_mode) {
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3b;
} else {
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x39;
}
} else if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_2) {
if (phy->cphy_mode) {
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3b;
} else {
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x00;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x39;
}
} else if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
if (phy->cphy_mode) {
glbl_hstx_str_ctrl_0 = 0x88;
glbl_rescode_top_ctrl = 0x00;
glbl_rescode_bot_ctrl = 0x3c;
} else {
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
}
} else {
if (phy->cphy_mode) {
glbl_str_swi_cal_sel_ctrl = 0x03;
glbl_hstx_str_ctrl_0 = 0x66;
} else {
vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
}
glbl_rescode_top_ctrl = 0x03;
glbl_rescode_bot_ctrl = 0x3c;
}
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
/* Assert PLL core reset */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
/* turn off resync FIFO */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
(dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
/* Configure PHY lane swap (TODO: we need to calculate this) */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
if (phy->cphy_mode)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL, BIT(6));
/* Enable LDO */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
glbl_str_swi_cal_sel_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
glbl_hstx_str_ctrl_0);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0,
glbl_pemph_ctrl_0);
if (phy->cphy_mode)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
glbl_rescode_top_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
glbl_rescode_bot_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
/* Remove power down from all blocks */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, lane_ctrl0);
/* Select full-rate mode */
if (!phy->cphy_mode)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
ret = dsi_7nm_set_usecase(phy);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
/* DSI PHY timings */
if (phy->cphy_mode) {
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
timing->shared_timings.clk_post);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
} else {
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
timing->shared_timings.clk_post);
}
/* DSI lane settings */
dsi_phy_hw_v4_0_lane_settings(phy);
DBG("DSI%d PHY enabled", phy->id);
return 0;
}
static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
{
void __iomem *base = phy->base;
u32 data;
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
if (enable)
data |= BIT(5) | BIT(6);
else
data &= ~(BIT(5) | BIT(6));
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1, data);
return enable;
}
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
u32 data;
DBG("");
if (dsi_phy_hw_v4_0_is_pll_on(phy))
pr_warn("Turning OFF PHY while PLL is on\n");
dsi_phy_hw_v4_0_config_lpcdrx(phy, false);
/* Turn off REFGEN Vote */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10, 0x0);
wmb();
/* Delay to ensure HW removes vote before PHY shut down */
udelay(2);
}
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_CTRL_0);
/* disable all lanes */
data &= ~0x1F;
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0);
/* Turn off all PHY blocks */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x00);
/* make sure phy is turned off */
wmb();
DBG("DSI%d PHY disabled", phy->id);
}
static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 36000 },
};
static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
static const struct regulator_bulk_data dsi_phy_7nm_97800uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 97800 },
};
static const struct regulator_bulk_data dsi_phy_7nm_98400uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 98400 },
};
const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_36mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
.set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 600000000UL,
#ifdef CONFIG_64BIT
.max_pll_rate = 5000000000UL,
#else
.max_pll_rate = ULONG_MAX,
#endif
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V4_1,
};
const struct msm_dsi_phy_cfg dsi_phy_7nm_6375_cfgs = {
.has_phy_lane = true,
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
},
.min_pll_rate = 600000000UL,
#ifdef CONFIG_64BIT
.max_pll_rate = 5000000000ULL,
#else
.max_pll_rate = ULONG_MAX,
#endif
.io_start = { 0x5e94400 },
.num_dsi_phy = 1,
.quirks = DSI_PHY_7NM_QUIRK_V4_1,
};
const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_36mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
.set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 1000000000UL,
.max_pll_rate = 3500000000UL,
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_PRE_V4_1,
};
const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_37750uA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
},
.min_pll_rate = 600000000UL,
#ifdef CONFIG_64BIT
.max_pll_rate = 5000000000ULL,
#else
.max_pll_rate = ULONG_MAX,
#endif
.io_start = { 0xae94400 },
.num_dsi_phy = 1,
.quirks = DSI_PHY_7NM_QUIRK_V4_1,
};
const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_37750uA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
.set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 600000000UL,
#ifdef CONFIG_64BIT
.max_pll_rate = 5000000000UL,
#else
.max_pll_rate = ULONG_MAX,
#endif
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V4_2,
};
const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_97800uA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
.set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 600000000UL,
#ifdef CONFIG_64BIT
.max_pll_rate = 5000000000UL,
#else
.max_pll_rate = ULONG_MAX,
#endif
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V4_3,
};
const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_98400uA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_98400uA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
.set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 600000000UL,
#ifdef CONFIG_64BIT
.max_pll_rate = 5000000000UL,
#else
.max_pll_rate = ULONG_MAX,
#endif
.io_start = { 0xae95000, 0xae97000 },
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V5_2,
};
| linux-master | drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include "dsi_phy.h"
#include "dsi.xml.h"
#include "dsi_phy_28nm.xml.h"
/*
* DSI PLL 28nm - clock diagram (eg: DSI0):
*
* dsi0analog_postdiv_clk
* | dsi0indirect_path_div2_clk
* | |
* +------+ | +----+ | |\ dsi0byte_mux
* dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ |
* | +------+ +----+ | m| | +----+
* | | u|--o--| /4 |-- dsi0pllbyte
* | | x| +----+
* o--------------------------| /
* | |/
* | +------+
* o----------| DIV3 |------------------------- dsi0pll
* +------+
*/
#define POLL_MAX_READS 10
#define POLL_TIMEOUT_US 50
#define VCO_REF_CLK_RATE 19200000
#define VCO_MIN_RATE 350000000
#define VCO_MAX_RATE 750000000
/* v2.0.0 28nm LP implementation */
#define DSI_PHY_28NM_QUIRK_PHY_LP BIT(0)
#define DSI_PHY_28NM_QUIRK_PHY_8226 BIT(1)
#define LPFR_LUT_SIZE 10
struct lpfr_cfg {
unsigned long vco_rate;
u32 resistance;
};
/* Loop filter resistance: */
static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
{ 479500000, 8 },
{ 480000000, 11 },
{ 575500000, 8 },
{ 576000000, 12 },
{ 610500000, 8 },
{ 659500000, 9 },
{ 671500000, 10 },
{ 672000000, 14 },
{ 708500000, 10 },
{ 750000000, 11 },
};
struct pll_28nm_cached_state {
unsigned long vco_rate;
u8 postdiv3;
u8 postdiv1;
u8 byte_mux;
};
struct dsi_pll_28nm {
struct clk_hw clk_hw;
struct msm_dsi_phy *phy;
struct pll_28nm_cached_state cached_state;
};
#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
u32 nb_tries, u32 timeout_us)
{
bool pll_locked = false;
u32 val;
while (nb_tries--) {
val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
if (pll_locked)
break;
udelay(timeout_us);
}
DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
return pll_locked;
}
static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
{
void __iomem *base = pll_28nm->phy->pll_base;
/*
* Add HW recommended delays after toggling the software
* reset bit off and back on.
*/
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
}
/*
* Clock Callbacks
*/
static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
struct device *dev = &pll_28nm->phy->pdev->dev;
void __iomem *base = pll_28nm->phy->pll_base;
unsigned long div_fbx1000, gen_vco_clk;
u32 refclk_cfg, frac_n_mode, frac_n_value;
u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
u32 cal_cfg10, cal_cfg11;
u32 rem;
int i;
VERB("rate=%lu, parent's=%lu", rate, parent_rate);
/* Force postdiv2 to be div-4 */
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
/* Configure the Loop filter resistance */
for (i = 0; i < LPFR_LUT_SIZE; i++)
if (rate <= lpfr_lut[i].vco_rate)
break;
if (i == LPFR_LUT_SIZE) {
DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
rate);
return -EINVAL;
}
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
/* Loop filter capacitance values : c1 and c2 */
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
rem = rate % VCO_REF_CLK_RATE;
if (rem) {
refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
frac_n_mode = 1;
div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
} else {
refclk_cfg = 0x0;
frac_n_mode = 0;
div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
}
DBG("refclk_cfg = %d", refclk_cfg);
rem = div_fbx1000 % 1000;
frac_n_value = (rem << 16) / 1000;
DBG("div_fb = %lu", div_fbx1000);
DBG("frac_n_value = %d", frac_n_value);
DBG("Generated VCO Clock: %lu", gen_vco_clk);
rem = 0;
sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
if (frac_n_mode) {
sdm_cfg0 = 0x0;
sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
sdm_cfg3 = frac_n_value >> 8;
sdm_cfg2 = frac_n_value & 0xff;
} else {
sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
sdm_cfg2 = 0;
sdm_cfg3 = 0;
}
DBG("sdm_cfg0=%d", sdm_cfg0);
DBG("sdm_cfg1=%d", sdm_cfg1);
DBG("sdm_cfg2=%d", sdm_cfg2);
DBG("sdm_cfg3=%d", sdm_cfg3);
cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
/* Add hardware recommended delay for correct PLL configuration */
if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
udelay(1000);
else
udelay(1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
return 0;
}
static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
POLL_TIMEOUT_US);
}
static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
void __iomem *base = pll_28nm->phy->pll_base;
u32 sdm0, doubler, sdm_byp_div;
u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
u32 ref_clk = VCO_REF_CLK_RATE;
unsigned long vco_rate;
VERB("parent_rate=%lu", parent_rate);
/* Check to see if the ref clk doubler is enabled */
doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
ref_clk += (doubler * VCO_REF_CLK_RATE);
/* see if it is integer mode or sdm mode */
sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
/* integer mode */
sdm_byp_div = FIELD(
dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
vco_rate = ref_clk * sdm_byp_div;
} else {
/* sdm mode */
sdm_dc_off = FIELD(
dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
DBG("sdm_dc_off = %d", sdm_dc_off);
sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
sdm_freq_seed = (sdm3 << 8) | sdm2;
DBG("sdm_freq_seed = %d", sdm_freq_seed);
vco_rate = (ref_clk * (sdm_dc_off + 1)) +
mult_frac(ref_clk, sdm_freq_seed, BIT(16));
DBG("vco rate = %lu", vco_rate);
}
DBG("returning vco rate = %lu", vco_rate);
return vco_rate;
}
static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
{
struct device *dev = &pll_28nm->phy->pdev->dev;
void __iomem *base = pll_28nm->phy->pll_base;
u32 max_reads = 5, timeout_us = 100;
bool locked;
u32 val;
int i;
DBG("id=%d", pll_28nm->phy->id);
pll_28nm_software_reset(pll_28nm);
/*
* PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
for (i = 0; i < 2; i++) {
/* DSI Uniphy lock detect setting */
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
0x0c, 100);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
/* poll for PLL ready status */
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
timeout_us);
if (locked)
break;
pll_28nm_software_reset(pll_28nm);
/*
* PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
}
if (unlikely(!locked))
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL Lock success");
return locked ? 0 : -EINVAL;
}
static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
int i, ret;
if (unlikely(pll_28nm->phy->pll_on))
return 0;
for (i = 0; i < 3; i++) {
ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
if (!ret) {
pll_28nm->phy->pll_on = true;
return 0;
}
}
return ret;
}
static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
struct device *dev = &pll_28nm->phy->pdev->dev;
void __iomem *base = pll_28nm->phy->pll_base;
u32 max_reads = 5, timeout_us = 100;
bool locked;
u32 val;
int i;
DBG("id=%d", pll_28nm->phy->id);
pll_28nm_software_reset(pll_28nm);
/*
* PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34);
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
for (i = 0; i < 7; i++) {
/* DSI Uniphy lock detect setting */
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
0x0c, 100);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
/* poll for PLL ready status */
locked = pll_28nm_poll_for_ready(pll_28nm,
max_reads, timeout_us);
if (locked)
break;
pll_28nm_software_reset(pll_28nm);
/*
* PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00, 50);
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 100);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
}
if (unlikely(!locked))
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL Lock success");
return locked ? 0 : -EINVAL;
}
static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
struct device *dev = &pll_28nm->phy->pdev->dev;
void __iomem *base = pll_28nm->phy->pll_base;
bool locked;
u32 max_reads = 10, timeout_us = 50;
u32 val;
DBG("id=%d", pll_28nm->phy->id);
if (unlikely(pll_28nm->phy->pll_on))
return 0;
pll_28nm_software_reset(pll_28nm);
/*
* PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
/* DSI PLL toggle lock detect setting */
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked)) {
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
return -EINVAL;
}
DBG("DSI PLL lock success");
pll_28nm->phy->pll_on = true;
return 0;
}
static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
DBG("id=%d", pll_28nm->phy->id);
if (unlikely(!pll_28nm->phy->pll_on))
return;
dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
pll_28nm->phy->pll_on = false;
}
static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
if (rate < pll_28nm->phy->cfg->min_pll_rate)
return pll_28nm->phy->cfg->min_pll_rate;
else if (rate > pll_28nm->phy->cfg->max_pll_rate)
return pll_28nm->phy->cfg->max_pll_rate;
else
return rate;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
.round_rate = dsi_pll_28nm_clk_round_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_hpm,
.unprepare = dsi_pll_28nm_vco_unprepare,
.is_enabled = dsi_pll_28nm_clk_is_enabled,
};
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
.round_rate = dsi_pll_28nm_clk_round_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_lp,
.unprepare = dsi_pll_28nm_vco_unprepare,
.is_enabled = dsi_pll_28nm_clk_is_enabled,
};
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
.round_rate = dsi_pll_28nm_clk_round_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_8226,
.unprepare = dsi_pll_28nm_vco_unprepare,
.is_enabled = dsi_pll_28nm_clk_is_enabled,
};
/*
* PLL Callbacks
*/
static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
void __iomem *base = pll_28nm->phy->pll_base;
cached_state->postdiv3 =
dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
cached_state->postdiv1 =
dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
else
cached_state->vco_rate = 0;
}
static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
void __iomem *base = pll_28nm->phy->pll_base;
int ret;
ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
cached_state->postdiv1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
cached_state->byte_mux);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref", .name = "xo",
},
.num_parents = 1,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
};
struct device *dev = &pll_28nm->phy->pdev->dev;
struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
int ret;
DBG("%d", pll_28nm->phy->id);
if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
else if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_8226)
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_8226;
else
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
pll_28nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
if (ret)
return ret;
snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
0, 4, 0, NULL);
if (IS_ERR(analog_postdiv))
return PTR_ERR(analog_postdiv);
snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
if (IS_ERR(indirect_path_div2))
return PTR_ERR(indirect_path_div2);
snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
((const struct clk_hw *[]){
&pll_28nm->clk_hw,
indirect_path_div2,
}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
if (IS_ERR(byte_mux))
return PTR_ERR(byte_mux);
snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
byte_mux, CLK_SET_RATE_PARENT, 1, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
return 0;
}
static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
struct dsi_pll_28nm *pll_28nm;
int ret;
if (!pdev)
return -ENODEV;
pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
if (!pll_28nm)
return -ENOMEM;
pll_28nm->phy = phy;
ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ret;
}
phy->vco_hw = &pll_28nm->clk_hw;
return 0;
}
static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing)
{
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
if (timing->clk_zero & BIT(8))
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
}
static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
else
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
{
if (!enable) {
dsi_phy_write(phy->reg_base +
REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
return;
}
if (phy->regulator_ldo_mode)
dsi_28nm_phy_regulator_enable_ldo(phy);
else
dsi_28nm_phy_regulator_enable_dcdc(phy);
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
int i;
void __iomem *base = phy->base;
u32 val;
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n",
__func__);
return -EINVAL;
}
dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
dsi_28nm_phy_regulator_ctrl(phy, true);
dsi_28nm_dphy_set_timing(phy, timing);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
for (i = 0; i < 4; i++) {
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
}
dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
else
val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
return 0;
}
static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
dsi_28nm_phy_regulator_ctrl(phy, false);
/*
* Wait for the registers writes to complete in order to
* ensure that the phy is completely disabled
*/
wmb();
}
static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
{ .supply = "vddio", .init_load_uA = 100000 },
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.has_phy_regulator = true,
.regulator_data = dsi_phy_28nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
.pll_init = dsi_pll_28nm_init,
.save_pll_state = dsi_28nm_pll_save_state,
.restore_pll_state = dsi_28nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0xfd922b00, 0xfd923100 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
.has_phy_regulator = true,
.regulator_data = dsi_phy_28nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
.pll_init = dsi_pll_28nm_init,
.save_pll_state = dsi_28nm_pll_save_state,
.restore_pll_state = dsi_28nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0x1a94400, 0x1a96400 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.has_phy_regulator = true,
.regulator_data = dsi_phy_28nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
.pll_init = dsi_pll_28nm_init,
.save_pll_state = dsi_28nm_pll_save_state,
.restore_pll_state = dsi_28nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0x1a98500 },
.num_dsi_phy = 1,
.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs = {
.has_phy_regulator = true,
.regulator_data = dsi_phy_28nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
.pll_init = dsi_pll_28nm_init,
.save_pll_state = dsi_28nm_pll_save_state,
.restore_pll_state = dsi_28nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0xfd922b00 },
.num_dsi_phy = 1,
.quirks = DSI_PHY_28NM_QUIRK_PHY_8226,
};
| linux-master | drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include "dsi_phy.h"
#include "dsi.xml.h"
#include "dsi_phy_14nm.xml.h"
#define PHY_14NM_CKLN_IDX 4
/*
* DSI PLL 14nm - clock diagram (eg: DSI0):
*
* dsi0n1_postdiv_clk
* |
* |
* +----+ | +----+
* dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
* +----+ | +----+
* | dsi0n1_postdivby2_clk
* | +----+ |
* o---| /2 |--o--|\
* | +----+ | \ +----+
* | | |--| n2 |-- dsi0pll
* o--------------| / +----+
* |/
*/
#define POLL_MAX_READS 15
#define POLL_TIMEOUT_US 1000
#define VCO_REF_CLK_RATE 19200000
#define VCO_MIN_RATE 1300000000UL
#define VCO_MAX_RATE 2600000000UL
struct dsi_pll_config {
u64 vco_current_rate;
u32 ssc_en; /* SSC enable/disable */
/* fixed params */
u32 plllock_cnt;
u32 ssc_center;
u32 ssc_adj_period;
u32 ssc_spread;
u32 ssc_freq;
/* calculated */
u32 dec_start;
u32 div_frac_start;
u32 ssc_period;
u32 ssc_step_size;
u32 plllock_cmp;
u32 pll_vco_div_ref;
u32 pll_vco_count;
u32 pll_kvco_div_ref;
u32 pll_kvco_count;
};
struct pll_14nm_cached_state {
unsigned long vco_rate;
u8 n2postdiv;
u8 n1postdiv;
};
struct dsi_pll_14nm {
struct clk_hw clk_hw;
struct msm_dsi_phy *phy;
/* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
spinlock_t postdiv_lock;
struct pll_14nm_cached_state cached_state;
struct dsi_pll_14nm *slave;
};
#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, clk_hw)
/*
* Private struct for N1/N2 post-divider clocks. These clocks are similar to
* the generic clk_divider class of clocks. The only difference is that it
* also sets the slave DSI PLL's post-dividers if in bonded DSI mode
*/
struct dsi_pll_14nm_postdiv {
struct clk_hw hw;
/* divider params */
u8 shift;
u8 width;
u8 flags; /* same flags as used by clk_divider struct */
struct dsi_pll_14nm *pll;
};
#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
/*
* Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
u32 nb_tries, u32 timeout_us)
{
bool pll_locked = false, pll_ready = false;
void __iomem *base = pll_14nm->phy->pll_base;
u32 tries, val;
tries = nb_tries;
while (tries--) {
val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
pll_locked = !!(val & BIT(5));
if (pll_locked)
break;
udelay(timeout_us);
}
if (!pll_locked)
goto out;
tries = nb_tries;
while (tries--) {
val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
pll_ready = !!(val & BIT(0));
if (pll_ready)
break;
udelay(timeout_us);
}
out:
DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
return pll_locked && pll_ready;
}
static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
{
/* fixed input */
pconf->plllock_cnt = 1;
/*
* SSC is enabled by default. We might need DT props for configuring
* some SSC params like PPM and center/down spread etc.
*/
pconf->ssc_en = 1;
pconf->ssc_center = 0; /* down spread by default */
pconf->ssc_spread = 5; /* PPM / 1000 */
pconf->ssc_freq = 31500; /* default recommended */
pconf->ssc_adj_period = 37;
}
#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
{
u32 period, ssc_period;
u32 ref, rem;
u64 step_size;
DBG("vco=%lld ref=%d", pconf->vco_current_rate, VCO_REF_CLK_RATE);
ssc_period = pconf->ssc_freq / 500;
period = (u32)VCO_REF_CLK_RATE / 1000;
ssc_period = CEIL(period, ssc_period);
ssc_period -= 1;
pconf->ssc_period = ssc_period;
DBG("ssc freq=%d spread=%d period=%d", pconf->ssc_freq,
pconf->ssc_spread, pconf->ssc_period);
step_size = (u32)pconf->vco_current_rate;
ref = VCO_REF_CLK_RATE;
ref /= 1000;
step_size = div_u64(step_size, ref);
step_size <<= 20;
step_size = div_u64(step_size, 1000);
step_size *= pconf->ssc_spread;
step_size = div_u64(step_size, 1000);
step_size *= (pconf->ssc_adj_period + 1);
rem = 0;
step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
if (rem)
step_size++;
DBG("step_size=%lld", step_size);
step_size &= 0x0ffff; /* take lower 16 bits */
pconf->ssc_step_size = step_size;
}
static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
{
u64 multiplier = BIT(20);
u64 dec_start_multiple, dec_start, pll_comp_val;
u32 duration, div_frac_start;
u64 vco_clk_rate = pconf->vco_current_rate;
u64 fref = VCO_REF_CLK_RATE;
DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
dec_start = div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
pconf->dec_start = (u32)dec_start;
pconf->div_frac_start = div_frac_start;
if (pconf->plllock_cnt == 0)
duration = 1024;
else if (pconf->plllock_cnt == 1)
duration = 256;
else if (pconf->plllock_cnt == 2)
duration = 128;
else
duration = 32;
pll_comp_val = duration * dec_start_multiple;
pll_comp_val = div_u64(pll_comp_val, multiplier);
do_div(pll_comp_val, 10);
pconf->plllock_cmp = (u32)pll_comp_val;
}
static u32 pll_14nm_kvco_slop(u32 vrate)
{
u32 slop = 0;
if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
slop = 600;
else if (vrate > 1800000000UL && vrate < 2300000000UL)
slop = 400;
else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
slop = 280;
return slop;
}
static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
{
u64 vco_clk_rate = pconf->vco_current_rate;
u64 fref = VCO_REF_CLK_RATE;
u32 vco_measure_time = 5;
u32 kvco_measure_time = 5;
u64 data;
u32 cnt;
data = fref * vco_measure_time;
do_div(data, 1000000);
data &= 0x03ff; /* 10 bits */
data -= 2;
pconf->pll_vco_div_ref = data;
data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */
data *= vco_measure_time;
do_div(data, 10);
pconf->pll_vco_count = data;
data = fref * kvco_measure_time;
do_div(data, 1000000);
data &= 0x03ff; /* 10 bits */
data -= 1;
pconf->pll_kvco_div_ref = data;
cnt = pll_14nm_kvco_slop(vco_clk_rate);
cnt *= 2;
cnt /= 100;
cnt *= kvco_measure_time;
pconf->pll_kvco_count = cnt;
}
static void pll_db_commit_ssc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
{
void __iomem *base = pll->phy->pll_base;
u8 data;
data = pconf->ssc_adj_period;
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
data = (pconf->ssc_adj_period >> 8);
data &= 0x03;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
data = pconf->ssc_period;
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
data = (pconf->ssc_period >> 8);
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
data = pconf->ssc_step_size;
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
data = (pconf->ssc_step_size >> 8);
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
data = (pconf->ssc_center & 0x01);
data <<= 1;
data |= 0x01; /* enable */
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
wmb(); /* make sure register committed */
}
static void pll_db_commit_common(struct dsi_pll_14nm *pll,
struct dsi_pll_config *pconf)
{
void __iomem *base = pll->phy->pll_base;
u8 data;
/* confgiure the non frequency dependent pll registers */
data = 0;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, 1);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, 48);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, 4 << 3); /* bandgap_timer */
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, 5); /* pll_wakeup_timer */
data = pconf->pll_vco_div_ref & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
data = (pconf->pll_vco_div_ref >> 8) & 0x3;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
data = pconf->pll_kvco_div_ref & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
data = (pconf->pll_kvco_div_ref >> 8) & 0x3;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, 16);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, 4);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, 4);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, 1 << 3 | 1);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, 0 << 3 | 0);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, 0 << 3 | 0);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, 4 << 3 | 4);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, 1 << 4 | 11);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, 7);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, 1 << 4 | 2);
}
static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
{
void __iomem *cmn_base = pll_14nm->phy->base;
/* de assert pll start and apply pll sw reset */
/* stop pll */
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
/* pll sw reset */
dsi_phy_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
wmb(); /* make sure register committed */
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
wmb(); /* make sure register committed */
}
static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
struct dsi_pll_config *pconf)
{
void __iomem *base = pll->phy->pll_base;
void __iomem *cmn_base = pll->phy->base;
u8 data;
DBG("DSI%d PLL", pll->phy->id);
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, 0x3c);
pll_db_commit_common(pll, pconf);
pll_14nm_software_reset(pll);
/* Use the /2 path in Mux */
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, 1);
data = 0xff; /* data, clk, pll normal operation */
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
/* configure the frequency dependent pll registers */
data = pconf->dec_start;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
data = pconf->div_frac_start & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
data = (pconf->div_frac_start >> 8) & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
data = (pconf->div_frac_start >> 16) & 0xf;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
data = pconf->plllock_cmp & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
data = (pconf->plllock_cmp >> 8) & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
data = (pconf->plllock_cmp >> 16) & 0x3;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
data = pconf->plllock_cnt << 1 | 0 << 3; /* plllock_rng */
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
data = pconf->pll_vco_count & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
data = (pconf->pll_vco_count >> 8) & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
data = pconf->pll_kvco_count & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
data = (pconf->pll_kvco_count >> 8) & 0x3;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
/*
* High nibble configures the post divider internal to the VCO. It's
* fixed to divide by 1 for now.
*
* 0: divided by 1
* 1: divided by 2
* 2: divided by 4
* 3: divided by 8
*/
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, 0 << 4 | 3);
if (pconf->ssc_en)
pll_db_commit_ssc(pll, pconf);
wmb(); /* make sure register committed */
}
/*
* VCO clock Callbacks
*/
static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
struct dsi_pll_config conf;
DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->phy->id, rate,
parent_rate);
dsi_pll_14nm_config_init(&conf);
conf.vco_current_rate = rate;
pll_14nm_dec_frac_calc(pll_14nm, &conf);
if (conf.ssc_en)
pll_14nm_ssc_calc(pll_14nm, &conf);
pll_14nm_calc_vco_count(pll_14nm, &conf);
/* commit the slave DSI PLL registers if we're master. Note that we
* don't lock the slave PLL. We just ensure that the PLL/PHY registers
* of the master and slave are identical
*/
if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
pll_db_commit_14nm(pll_14nm_slave, &conf);
}
pll_db_commit_14nm(pll_14nm, &conf);
return 0;
}
static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
void __iomem *base = pll_14nm->phy->pll_base;
u64 vco_rate, multiplier = BIT(20);
u32 div_frac_start;
u32 dec_start;
u64 ref_clk = parent_rate;
dec_start = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
dec_start &= 0x0ff;
DBG("dec_start = %x", dec_start);
div_frac_start = (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
& 0xf) << 16;
div_frac_start |= (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
& 0xff) << 8;
div_frac_start |= dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
& 0xff;
DBG("div_frac_start = %x", div_frac_start);
vco_rate = ref_clk * dec_start;
vco_rate += ((ref_clk * div_frac_start) / multiplier);
/*
* Recalculating the rate from dec_start and frac_start doesn't end up
* the rate we originally set. Convert the freq to KHz, round it up and
* convert it back to MHz.
*/
vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
DBG("returning vco rate = %lu", (unsigned long)vco_rate);
return (unsigned long)vco_rate;
}
static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
void __iomem *base = pll_14nm->phy->pll_base;
void __iomem *cmn_base = pll_14nm->phy->base;
bool locked;
DBG("");
if (unlikely(pll_14nm->phy->pll_on))
return 0;
if (dsi_pll_14nm_vco_recalc_rate(hw, VCO_REF_CLK_RATE) == 0)
dsi_pll_14nm_vco_set_rate(hw, pll_14nm->phy->cfg->min_pll_rate, VCO_REF_CLK_RATE);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
POLL_TIMEOUT_US);
if (unlikely(!locked)) {
DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev, "DSI PLL lock failed\n");
return -EINVAL;
}
DBG("DSI PLL lock success");
pll_14nm->phy->pll_on = true;
return 0;
}
static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
void __iomem *cmn_base = pll_14nm->phy->base;
DBG("");
if (unlikely(!pll_14nm->phy->pll_on))
return;
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
pll_14nm->phy->pll_on = false;
}
static long dsi_pll_14nm_clk_round_rate(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
if (rate < pll_14nm->phy->cfg->min_pll_rate)
return pll_14nm->phy->cfg->min_pll_rate;
else if (rate > pll_14nm->phy->cfg->max_pll_rate)
return pll_14nm->phy->cfg->max_pll_rate;
else
return rate;
}
static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
.round_rate = dsi_pll_14nm_clk_round_rate,
.set_rate = dsi_pll_14nm_vco_set_rate,
.recalc_rate = dsi_pll_14nm_vco_recalc_rate,
.prepare = dsi_pll_14nm_vco_prepare,
.unprepare = dsi_pll_14nm_vco_unprepare,
};
/*
* N1 and N2 post-divider clock callbacks
*/
#define div_mask(width) ((1 << (width)) - 1)
static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
struct dsi_pll_14nm *pll_14nm = postdiv->pll;
void __iomem *base = pll_14nm->phy->base;
u8 shift = postdiv->shift;
u8 width = postdiv->width;
u32 val;
DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, parent_rate);
val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
val &= div_mask(width);
return divider_recalc_rate(hw, parent_rate, val, NULL,
postdiv->flags, width);
}
static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *prate)
{
struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
struct dsi_pll_14nm *pll_14nm = postdiv->pll;
DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, rate);
return divider_round_rate(hw, rate, prate, NULL,
postdiv->width,
postdiv->flags);
}
static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
struct dsi_pll_14nm *pll_14nm = postdiv->pll;
void __iomem *base = pll_14nm->phy->base;
spinlock_t *lock = &pll_14nm->postdiv_lock;
u8 shift = postdiv->shift;
u8 width = postdiv->width;
unsigned int value;
unsigned long flags = 0;
u32 val;
DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->phy->id, rate,
parent_rate);
value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
postdiv->flags);
spin_lock_irqsave(lock, flags);
val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
val &= ~(div_mask(width) << shift);
val |= value << shift;
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
/* If we're master in bonded DSI mode, then the slave PLL's post-dividers
* follow the master's post dividers
*/
if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
void __iomem *slave_base = pll_14nm_slave->phy->base;
dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
}
spin_unlock_irqrestore(lock, flags);
return 0;
}
static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
.round_rate = dsi_pll_14nm_postdiv_round_rate,
.set_rate = dsi_pll_14nm_postdiv_set_rate,
};
/*
* PLL Callbacks
*/
static void dsi_14nm_pll_save_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
void __iomem *cmn_base = pll_14nm->phy->base;
u32 data;
data = dsi_phy_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
cached_state->n1postdiv = data & 0xf;
cached_state->n2postdiv = (data >> 4) & 0xf;
DBG("DSI%d PLL save state %x %x", pll_14nm->phy->id,
cached_state->n1postdiv, cached_state->n2postdiv);
cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
}
static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
void __iomem *cmn_base = pll_14nm->phy->base;
u32 data;
int ret;
ret = dsi_pll_14nm_vco_set_rate(phy->vco_hw,
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
DBG("DSI%d PLL restore state %x %x", pll_14nm->phy->id,
cached_state->n1postdiv, cached_state->n2postdiv);
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
/* also restore post-dividers for slave DSI PLL */
if (phy->usecase == MSM_DSI_PHY_MASTER) {
struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
void __iomem *slave_base = pll_14nm_slave->phy->base;
dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
}
return 0;
}
static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
void __iomem *base = phy->pll_base;
u32 clkbuflr_en, bandgap = 0;
switch (phy->usecase) {
case MSM_DSI_PHY_STANDALONE:
clkbuflr_en = 0x1;
break;
case MSM_DSI_PHY_MASTER:
clkbuflr_en = 0x3;
pll_14nm->slave = pll_14nm_list[(pll_14nm->phy->id + 1) % DSI_MAX];
break;
case MSM_DSI_PHY_SLAVE:
clkbuflr_en = 0x0;
bandgap = 0x3;
break;
default:
return -EINVAL;
}
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
if (bandgap)
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
return 0;
}
static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
const char *name,
const struct clk_hw *parent_hw,
unsigned long flags,
u8 shift)
{
struct dsi_pll_14nm_postdiv *pll_postdiv;
struct device *dev = &pll_14nm->phy->pdev->dev;
struct clk_init_data postdiv_init = {
.parent_hws = (const struct clk_hw *[]) { parent_hw },
.num_parents = 1,
.name = name,
.flags = flags,
.ops = &clk_ops_dsi_pll_14nm_postdiv,
};
int ret;
pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
if (!pll_postdiv)
return ERR_PTR(-ENOMEM);
pll_postdiv->pll = pll_14nm;
pll_postdiv->shift = shift;
/* both N1 and N2 postdividers are 4 bits wide */
pll_postdiv->width = 4;
/* range of each divider is from 1 to 15 */
pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
pll_postdiv->hw.init = &postdiv_init;
ret = devm_clk_hw_register(dev, &pll_postdiv->hw);
if (ret)
return ERR_PTR(ret);
return &pll_postdiv->hw;
}
static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
{
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_14nm_vco,
};
struct device *dev = &pll_14nm->phy->pdev->dev;
struct clk_hw *hw, *n1_postdiv, *n1_postdivby2;
int ret;
DBG("DSI%d", pll_14nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id);
pll_14nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
if (ret)
return ret;
snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name,
&pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0);
if (IS_ERR(n1_postdiv))
return PTR_ERR(n1_postdiv);
snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id);
/* DSI Byte clock = VCO_CLK / N1 / 8 */
hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
n1_postdiv, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
/*
* Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
* on the way. Don't let it set parent.
*/
n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, n1_postdiv, 0, 1, 2);
if (IS_ERR(n1_postdivby2))
return PTR_ERR(n1_postdivby2);
snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id);
/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
* This is the output of N2 post-divider, bits 4-7 in
* REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
*/
hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2,
0, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
return 0;
}
static int dsi_pll_14nm_init(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
struct dsi_pll_14nm *pll_14nm;
int ret;
if (!pdev)
return -ENODEV;
pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
if (!pll_14nm)
return -ENOMEM;
DBG("PLL%d", phy->id);
pll_14nm_list[phy->id] = pll_14nm;
spin_lock_init(&pll_14nm->postdiv_lock);
pll_14nm->phy = phy;
ret = pll_14nm_register(pll_14nm, phy->provided_clocks->hws);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ret;
}
phy->vco_hw = &pll_14nm->clk_hw;
return 0;
}
static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing,
int lane_idx)
{
void __iomem *base = phy->lane_base;
bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX);
u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero;
u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare;
u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail;
u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst;
u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly;
u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
timing->hs_halfbyte_en;
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
}
static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
u32 data;
int i;
int ret;
void __iomem *base = phy->base;
void __iomem *lane_base = phy->lane_base;
u32 glbl_test_ctrl;
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n",
__func__);
return -EINVAL;
}
data = 0x1c;
if (phy->usecase != MSM_DSI_PHY_STANDALONE)
data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
/* 4 data lanes + 1 clk lane configuration */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
0x1d);
dsi_phy_write(lane_base +
REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
dsi_phy_write(lane_base +
REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
(i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
(i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
0);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
0x88);
dsi_14nm_dphy_set_timing(phy, timing, i);
}
/* Make sure PLL is not start */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
wmb(); /* make sure everything is written before reset and enable */
/* reset digital block */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
wmb(); /* ensure reset is asserted */
udelay(100);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
glbl_test_ctrl = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
glbl_test_ctrl |= DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
else
glbl_test_ctrl &= ~DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, glbl_test_ctrl);
ret = dsi_14nm_set_usecase(phy);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
/* Remove power down from PLL and all lanes */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
return 0;
}
static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
/* ensure that the phy is completely disabled */
wmb();
}
static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = {
{ .supply = "vcca", .init_load_uA = 17000 },
};
static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
{ .supply = "vcca", .init_load_uA = 73400 },
};
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_17mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
.pll_init = dsi_pll_14nm_init,
.save_pll_state = dsi_14nm_pll_save_state,
.restore_pll_state = dsi_14nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0x994400, 0x996400 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_73p4mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
.pll_init = dsi_pll_14nm_init,
.save_pll_state = dsi_14nm_pll_save_state,
.restore_pll_state = dsi_14nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0xc994400, 0xc996400 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_17mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
.pll_init = dsi_pll_14nm_init,
.save_pll_state = dsi_14nm_pll_save_state,
.restore_pll_state = dsi_14nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0x1a94400, 0x1a96400 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = {
.has_phy_lane = true,
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
.pll_init = dsi_pll_14nm_init,
.save_pll_state = dsi_14nm_pll_save_state,
.restore_pll_state = dsi_14nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0x5e94400 },
.num_dsi_phy = 1,
};
| linux-master | drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c |
/*
* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018, The Linux Foundation
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
#include "dsi_phy.h"
#include "dsi.xml.h"
#include "dsi_phy_10nm.xml.h"
/*
* DSI PLL 10nm - clock diagram (eg: DSI0):
*
* dsi0_pll_out_div_clk dsi0_pll_bit_clk
* | |
* | |
* +---------+ | +----------+ | +----+
* dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
* | | |
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
* | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
* +-----+ | |
* | |dsiclk_sel
* |
* dsi0_pll_post_out_div_clk
*/
#define VCO_REF_CLK_RATE 19200000
#define FRAC_BITS 18
/* v3.0.0 10nm implementation that requires the old timings settings */
#define DSI_PHY_10NM_QUIRK_OLD_TIMINGS BIT(0)
struct dsi_pll_config {
bool enable_ssc;
bool ssc_center;
u32 ssc_freq;
u32 ssc_offset;
u32 ssc_adj_per;
/* out */
u32 pll_prop_gain_rate;
u32 decimal_div_start;
u32 frac_div_start;
u32 pll_clock_inverters;
u32 ssc_stepsize;
u32 ssc_div_per;
};
struct pll_10nm_cached_state {
unsigned long vco_rate;
u8 bit_clk_div;
u8 pix_clk_div;
u8 pll_out_div;
u8 pll_mux;
};
struct dsi_pll_10nm {
struct clk_hw clk_hw;
struct msm_dsi_phy *phy;
u64 vco_current_rate;
/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
spinlock_t postdiv_lock;
struct pll_10nm_cached_state cached_state;
struct dsi_pll_10nm *slave;
};
#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
/**
* struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters.
* @rescode_offset_top: Offset for pull-up legs rescode.
* @rescode_offset_bot: Offset for pull-down legs rescode.
* @vreg_ctrl: vreg ctrl to drive LDO level
*/
struct dsi_phy_10nm_tuning_cfg {
u8 rescode_offset_top[DSI_LANE_MAX];
u8 rescode_offset_bot[DSI_LANE_MAX];
u8 vreg_ctrl;
};
/*
* Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
static void dsi_pll_setup_config(struct dsi_pll_config *config)
{
config->ssc_freq = 31500;
config->ssc_offset = 5000;
config->ssc_adj_per = 2;
config->enable_ssc = false;
config->ssc_center = false;
}
static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
{
u64 fref = VCO_REF_CLK_RATE;
u64 pll_freq;
u64 divider;
u64 dec, dec_multiple;
u32 frac;
u64 multiplier;
pll_freq = pll->vco_current_rate;
divider = fref * 2;
multiplier = 1 << FRAC_BITS;
dec_multiple = div_u64(pll_freq * multiplier, divider);
dec = div_u64_rem(dec_multiple, multiplier, &frac);
if (pll_freq <= 1900000000UL)
config->pll_prop_gain_rate = 8;
else if (pll_freq <= 3000000000UL)
config->pll_prop_gain_rate = 10;
else
config->pll_prop_gain_rate = 12;
if (pll_freq < 1100000000UL)
config->pll_clock_inverters = 8;
else
config->pll_clock_inverters = 0;
config->decimal_div_start = dec;
config->frac_div_start = frac;
}
#define SSC_CENTER BIT(0)
#define SSC_EN BIT(1)
static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
{
u32 ssc_per;
u32 ssc_mod;
u64 ssc_step_size;
u64 frac;
if (!config->enable_ssc) {
DBG("SSC not enabled\n");
return;
}
ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
ssc_per -= ssc_mod;
frac = config->frac_div_start;
ssc_step_size = config->decimal_div_start;
ssc_step_size *= (1 << FRAC_BITS);
ssc_step_size += frac;
ssc_step_size *= config->ssc_offset;
ssc_step_size *= (config->ssc_adj_per + 1);
ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
config->ssc_div_per = ssc_per;
config->ssc_stepsize = ssc_step_size;
pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
config->decimal_div_start, frac, FRAC_BITS);
pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
}
static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
{
void __iomem *base = pll->phy->pll_base;
if (config->enable_ssc) {
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
{
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
0xba);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE,
0x0c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO,
0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1,
0xfa);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
0x4c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
}
static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
{
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
config->decimal_div_start);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
(config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
(config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
config->pll_clock_inverters);
}
static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
struct dsi_pll_config config;
DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
parent_rate);
pll_10nm->vco_current_rate = rate;
dsi_pll_setup_config(&config);
dsi_pll_calc_dec_frac(pll_10nm, &config);
dsi_pll_calc_ssc(pll_10nm, &config);
dsi_pll_commit(pll_10nm, &config);
dsi_pll_config_hzindep_reg(pll_10nm);
dsi_pll_ssc_commit(pll_10nm, &config);
/* flush, ensure all register writes are done*/
wmb();
return 0;
}
static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
{
struct device *dev = &pll->phy->pdev->dev;
int rc;
u32 status = 0;
u32 const delay_us = 100;
u32 const timeout_us = 5000;
rc = readl_poll_timeout_atomic(pll->phy->pll_base +
REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
status,
((status & BIT(0)) > 0),
delay_us,
timeout_us);
if (rc)
DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
pll->phy->id, status);
return rc;
}
static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
{
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
data & ~BIT(5));
ndelay(250);
}
static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
{
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
data | BIT(5));
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
ndelay(250);
}
static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
{
u32 data;
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
data & ~BIT(5));
}
static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
{
u32 data;
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
data | BIT(5));
}
static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
struct device *dev = &pll_10nm->phy->pdev->dev;
int rc;
dsi_pll_enable_pll_bias(pll_10nm);
if (pll_10nm->slave)
dsi_pll_enable_pll_bias(pll_10nm->slave);
rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
if (rc) {
DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
return rc;
}
/* Start PLL */
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
0x01);
/*
* ensure all PLL configurations are written prior to checking
* for PLL lock.
*/
wmb();
/* Check for PLL lock */
rc = dsi_pll_10nm_lock_status(pll_10nm);
if (rc) {
DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
goto error;
}
pll_10nm->phy->pll_on = true;
dsi_pll_enable_global_clk(pll_10nm);
if (pll_10nm->slave)
dsi_pll_enable_global_clk(pll_10nm->slave);
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
0x01);
if (pll_10nm->slave)
dsi_phy_write(pll_10nm->slave->phy->base +
REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
error:
return rc;
}
static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
{
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
dsi_pll_disable_pll_bias(pll);
}
static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
/*
* To avoid any stray glitches while abruptly powering down the PLL
* make sure to gate the clock using the clock enable bit before
* powering down the PLL
*/
dsi_pll_disable_global_clk(pll_10nm);
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
dsi_pll_disable_sub(pll_10nm);
if (pll_10nm->slave) {
dsi_pll_disable_global_clk(pll_10nm->slave);
dsi_pll_disable_sub(pll_10nm->slave);
}
/* flush, ensure all register writes are done */
wmb();
pll_10nm->phy->pll_on = false;
}
static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
void __iomem *base = pll_10nm->phy->pll_base;
u64 ref_clk = VCO_REF_CLK_RATE;
u64 vco_rate = 0x0;
u64 multiplier;
u32 frac;
u32 dec;
u64 pll_freq, tmp64;
dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
dec &= 0xff;
frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
0xff) << 8);
frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
0x3) << 16);
/*
* TODO:
* 1. Assumes prescaler is disabled
*/
multiplier = 1 << FRAC_BITS;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
pll_10nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
return (unsigned long)vco_rate;
}
static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
if (rate < pll_10nm->phy->cfg->min_pll_rate)
return pll_10nm->phy->cfg->min_pll_rate;
else if (rate > pll_10nm->phy->cfg->max_pll_rate)
return pll_10nm->phy->cfg->max_pll_rate;
else
return rate;
}
static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
.round_rate = dsi_pll_10nm_clk_round_rate,
.set_rate = dsi_pll_10nm_vco_set_rate,
.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
.prepare = dsi_pll_10nm_vco_prepare,
.unprepare = dsi_pll_10nm_vco_unprepare,
};
/*
* PLL Callbacks
*/
static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
void __iomem *phy_base = pll_10nm->phy->base;
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
cached->pll_mux = cmn_clk_cfg1 & 0x3;
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
cached->pix_clk_div, cached->pll_mux);
}
static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
void __iomem *phy_base = pll_10nm->phy->base;
u32 val;
int ret;
val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
val &= ~0x3;
val |= cached->pll_out_div;
dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
val |= cached->pll_mux;
dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
pll_10nm->vco_current_rate,
VCO_REF_CLK_RATE);
if (ret) {
DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
DBG("DSI PLL%d", pll_10nm->phy->id);
return 0;
}
static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_10nm->phy->id);
switch (phy->usecase) {
case MSM_DSI_PHY_STANDALONE:
break;
case MSM_DSI_PHY_MASTER:
pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
break;
case MSM_DSI_PHY_SLAVE:
data = 0x1; /* external PLL */
break;
default:
return -EINVAL;
}
/* set PLL src */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
return 0;
}
/*
* The post dividers and mux clocks are created using the standard divider and
* mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
* state to follow the master PLL's divider/mux state. Therefore, we don't
* require special clock ops that also configure the slave PLL registers
*/
static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
{
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_10nm_vco,
};
struct device *dev = &pll_10nm->phy->pdev->dev;
struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
struct clk_hw *pll_post_out_div, *pclk_mux;
int ret;
DBG("DSI%d", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id);
pll_10nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
if (ret)
return ret;
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_10nm->clk_hw, CLK_SET_RATE_PARENT,
pll_10nm->phy->pll_base +
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
if (IS_ERR(pll_out_div)) {
ret = PTR_ERR(pll_out_div);
goto fail;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
pll_out_div, CLK_SET_RATE_PARENT,
pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
if (IS_ERR(pll_bit)) {
ret = PTR_ERR(pll_bit);
goto fail;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
pll_bit, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, pll_bit, 0, 1, 2);
if (IS_ERR(pll_by_2_bit)) {
ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, pll_out_div, 0, 1, 4);
if (IS_ERR(pll_post_out_div)) {
ret = PTR_ERR(pll_post_out_div);
goto fail;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id);
pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
((const struct clk_hw *[]){
pll_bit,
pll_by_2_bit,
pll_out_div,
pll_post_out_div,
}), 4, 0, pll_10nm->phy->base +
REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL);
if (IS_ERR(pclk_mux)) {
ret = PTR_ERR(pclk_mux);
goto fail;
}
snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux,
0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
return 0;
fail:
return ret;
}
static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
struct dsi_pll_10nm *pll_10nm;
int ret;
pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
if (!pll_10nm)
return -ENOMEM;
DBG("DSI PLL%d", phy->id);
pll_10nm_list[phy->id] = pll_10nm;
spin_lock_init(&pll_10nm->postdiv_lock);
pll_10nm->phy = phy;
ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ret;
}
phy->vco_hw = &pll_10nm->clk_hw;
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy);
return 0;
}
static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
u32 data = 0;
data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
mb(); /* make sure read happened */
return (data & BIT(0));
}
static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
{
void __iomem *lane_base = phy->lane_base;
int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
/*
* LPRX and CDRX need to enabled only for physical data lane
* corresponding to the logical data lane 0
*/
if (enable)
dsi_phy_write(lane_base +
REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
else
dsi_phy_write(lane_base +
REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
}
static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
{
int i;
u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
void __iomem *lane_base = phy->lane_base;
struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
tx_dctrl[3] = 0x02;
/* Strength ctrl settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
0x55);
/*
* Disable LPRX and CDRX for all lanes. And later on, it will
* be only enabled for the physical data lane corresponding
* to the logical data lane 0
*/
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
0x88);
}
dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
/* other settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
i == 4 ? 0x80 : 0x0);
/* platform specific dsi phy drive strength adjustment */
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i),
tuning_cfg->rescode_offset_top[i]);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i),
tuning_cfg->rescode_offset_bot[i]);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
tx_dctrl[i]);
}
if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
/* Toggle BIT 0 to release freeze I/0 */
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
}
}
static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req)
{
int ret;
u32 status;
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base;
struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
u32 data;
DBG("");
if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
if (dsi_phy_hw_v3_0_is_pll_on(phy))
pr_warn("PLL turned on before configuring PHY\n");
/* wait for REFGEN READY */
ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
status, (status & BIT(0)),
delay_us, timeout_us);
if (ret) {
pr_err("Ref gen not ready. Aborting\n");
return -EINVAL;
}
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
/* Assert PLL core reset */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
/* turn off resync FIFO */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
/* Select MS1 byte-clk */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
/* Enable LDO with platform specific drive level/amplitude adjustment */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL,
tuning_cfg->vreg_ctrl);
/* Configure PHY lane swap (TODO: we need to calculate this) */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
/* DSI PHY timings */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
timing->hs_halfbyte_en);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
timing->clk_zero);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
timing->clk_prepare);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
timing->clk_trail);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
timing->hs_exit);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
timing->hs_zero);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
timing->hs_prepare);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
timing->hs_trail);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
timing->hs_rqst);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
timing->ta_go | (timing->ta_sure << 3));
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
timing->ta_get);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
0x00);
/* Remove power down from all blocks */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
/* power up lanes */
data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
/* TODO: only power up lanes that are used */
data |= 0x1F;
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
/* Select full-rate mode */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
ret = dsi_10nm_set_usecase(phy);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
/* DSI lane settings */
dsi_phy_hw_v3_0_lane_settings(phy);
DBG("DSI%d PHY enabled", phy->id);
return 0;
}
static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
u32 data;
DBG("");
if (dsi_phy_hw_v3_0_is_pll_on(phy))
pr_warn("Turning OFF PHY while PLL is on\n");
dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
/* disable all lanes */
data &= ~0x1F;
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
/* Turn off all PHY blocks */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
/* make sure phy is turned off */
wmb();
DBG("DSI%d PHY disabled", phy->id);
}
static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
{
struct device *dev = &phy->pdev->dev;
struct dsi_phy_10nm_tuning_cfg *tuning_cfg;
s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */
s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */
u32 ldo_level = 400; /* 400mV */
u8 level;
int ret, i;
tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL);
if (!tuning_cfg)
return -ENOMEM;
/* Drive strength adjustment parameters */
ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top",
offset_top, DSI_LANE_MAX);
if (ret && ret != -EINVAL) {
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret);
return ret;
}
for (i = 0; i < DSI_LANE_MAX; i++) {
if (offset_top[i] < -32 || offset_top[i] > 31) {
DRM_DEV_ERROR(dev,
"qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n",
offset_top[i]);
return -EINVAL;
}
tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i];
}
ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot",
offset_bot, DSI_LANE_MAX);
if (ret && ret != -EINVAL) {
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret);
return ret;
}
for (i = 0; i < DSI_LANE_MAX; i++) {
if (offset_bot[i] < -32 || offset_bot[i] > 31) {
DRM_DEV_ERROR(dev,
"qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n",
offset_bot[i]);
return -EINVAL;
}
tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i];
}
/* Drive level/amplitude adjustment parameters */
ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level);
if (ret && ret != -EINVAL) {
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret);
return ret;
}
switch (ldo_level) {
case 375:
level = 0;
break;
case 400:
level = 1;
break;
case 425:
level = 2;
break;
case 450:
level = 3;
break;
case 475:
level = 4;
break;
case 500:
level = 5;
break;
default:
DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level);
return -EINVAL;
}
tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level);
phy->tuning_cfg = tuning_cfg;
return 0;
}
static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = {
{ .supply = "vdds", .init_load_uA = 36000 },
};
const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_10nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
.pll_init = dsi_pll_10nm_init,
.save_pll_state = dsi_10nm_pll_save_state,
.restore_pll_state = dsi_10nm_pll_restore_state,
.parse_dt_properties = dsi_10nm_phy_parse_dt,
},
.min_pll_rate = 1000000000UL,
.max_pll_rate = 3500000000UL,
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_10nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
.pll_init = dsi_pll_10nm_init,
.save_pll_state = dsi_10nm_pll_save_state,
.restore_pll_state = dsi_10nm_pll_restore_state,
.parse_dt_properties = dsi_10nm_phy_parse_dt,
},
.min_pll_rate = 1000000000UL,
.max_pll_rate = 3500000000UL,
.io_start = { 0xc994400, 0xc996400 },
.num_dsi_phy = 2,
.quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
};
| linux-master | drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include "dsi_phy.h"
#include "dsi.xml.h"
#include "dsi_phy_20nm.xml.h"
static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing)
{
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
if (timing->clk_zero & BIT(8))
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
{
void __iomem *base = phy->reg_base;
if (!enable) {
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
return;
}
if (phy->regulator_ldo_mode) {
dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
return;
}
/* non LDO mode */
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
}
static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
int i;
void __iomem *base = phy->base;
u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
u32 val;
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
dsi_20nm_phy_regulator_ctrl(phy, true);
dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
val = dsi_phy_read(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL);
if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_STANDALONE)
val |= DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
else
val &= ~DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
dsi_phy_write(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL, val);
for (i = 0; i < 4; i++) {
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
(i >> 1) * 0x40);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
}
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
dsi_20nm_dphy_set_timing(phy, timing);
dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
/* make sure everything is written before enable */
wmb();
dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
return 0;
}
static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
dsi_20nm_phy_regulator_ctrl(phy, false);
}
static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = {
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
{ .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
};
const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.has_phy_regulator = true,
.regulator_data = dsi_phy_20nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators),
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
},
.io_start = { 0xfd998500, 0xfd9a0500 },
.num_dsi_phy = 2,
};
| linux-master | drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <generated/utsrelease.h>
#include "msm_disp_snapshot.h"
static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr)
{
u32 len_padded;
u32 num_rows;
u32 x0, x4, x8, xc;
void __iomem *addr;
u32 *dump_addr = NULL;
void __iomem *end_addr;
int i;
len_padded = aligned_len * REG_DUMP_ALIGN;
num_rows = aligned_len / REG_DUMP_ALIGN;
addr = base_addr;
end_addr = base_addr + aligned_len;
if (!(*reg))
*reg = kzalloc(len_padded, GFP_KERNEL);
if (*reg)
dump_addr = *reg;
for (i = 0; i < num_rows; i++) {
x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
if (dump_addr) {
dump_addr[i * 4] = x0;
dump_addr[i * 4 + 1] = x4;
dump_addr[i * 4 + 2] = x8;
dump_addr[i * 4 + 3] = xc;
}
addr += REG_DUMP_ALIGN;
}
}
static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
struct drm_printer *p)
{
int i;
u32 *dump_addr = NULL;
void __iomem *addr;
u32 num_rows;
addr = base_addr;
num_rows = len / REG_DUMP_ALIGN;
if (*reg)
dump_addr = *reg;
for (i = 0; i < num_rows; i++) {
drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
(unsigned long)(addr - base_addr),
dump_addr[i * 4], dump_addr[i * 4 + 1],
dump_addr[i * 4 + 2], dump_addr[i * 4 + 3]);
addr += REG_DUMP_ALIGN;
}
}
void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
{
struct msm_disp_state_block *block, *tmp;
if (!p) {
DRM_ERROR("invalid drm printer\n");
return;
}
drm_printf(p, "---\n");
drm_printf(p, "kernel: " UTS_RELEASE "\n");
drm_printf(p, "module: " KBUILD_MODNAME "\n");
drm_printf(p, "dpu devcoredump\n");
drm_printf(p, "time: %lld.%09ld\n",
state->time.tv_sec, state->time.tv_nsec);
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
drm_printf(p, "====================%s================\n", block->name);
msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
}
drm_printf(p, "===================dpu drm state================\n");
if (state->atomic_state)
drm_atomic_print_new_state(state->atomic_state, p);
}
static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state)
{
struct drm_device *ddev;
struct drm_modeset_acquire_ctx ctx;
ktime_get_real_ts64(&disp_state->time);
ddev = disp_state->drm_dev;
drm_modeset_acquire_init(&ctx, 0);
while (drm_modeset_lock_all_ctx(ddev, &ctx) != 0)
drm_modeset_backoff(&ctx);
disp_state->atomic_state = drm_atomic_helper_duplicate_state(ddev,
&ctx);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
{
struct msm_drm_private *priv;
struct drm_device *drm_dev;
struct msm_kms *kms;
int i;
drm_dev = disp_state->drm_dev;
priv = drm_dev->dev_private;
kms = priv->kms;
for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
if (!priv->dp[i])
continue;
msm_dp_snapshot(disp_state, priv->dp[i]);
}
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
if (!priv->dsi[i])
continue;
msm_dsi_snapshot(disp_state, priv->dsi[i]);
}
if (kms->funcs->snapshot)
kms->funcs->snapshot(disp_state, kms);
msm_disp_capture_atomic_state(disp_state);
}
void msm_disp_state_free(void *data)
{
struct msm_disp_state *disp_state = data;
struct msm_disp_state_block *block, *tmp;
if (disp_state->atomic_state) {
drm_atomic_state_put(disp_state->atomic_state);
disp_state->atomic_state = NULL;
}
list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
list_del(&block->node);
kfree(block->state);
kfree(block);
}
kfree(disp_state);
}
void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
void __iomem *base_addr, const char *fmt, ...)
{
struct msm_disp_state_block *new_blk;
struct va_format vaf;
va_list va;
new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
if (!new_blk)
return;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
snprintf(new_blk->name, sizeof(new_blk->name), "%pV", &vaf);
va_end(va);
INIT_LIST_HEAD(&new_blk->node);
new_blk->size = ALIGN(len, REG_DUMP_ALIGN);
new_blk->base_addr = base_addr;
msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
list_add_tail(&new_blk->node, &disp_state->blocks);
}
| linux-master | drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include "msm_disp_snapshot.h"
static ssize_t __maybe_unused disp_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct drm_print_iterator iter;
struct drm_printer p;
struct msm_disp_state *disp_state;
disp_state = data;
iter.data = buffer;
iter.offset = 0;
iter.start = offset;
iter.remain = count;
p = drm_coredump_printer(&iter);
msm_disp_state_print(disp_state, &p);
return count - iter.remain;
}
struct msm_disp_state *
msm_disp_snapshot_state_sync(struct msm_kms *kms)
{
struct drm_device *drm_dev = kms->dev;
struct msm_disp_state *disp_state;
WARN_ON(!mutex_is_locked(&kms->dump_mutex));
disp_state = kzalloc(sizeof(struct msm_disp_state), GFP_KERNEL);
if (!disp_state)
return ERR_PTR(-ENOMEM);
disp_state->dev = drm_dev->dev;
disp_state->drm_dev = drm_dev;
INIT_LIST_HEAD(&disp_state->blocks);
msm_disp_snapshot_capture_state(disp_state);
return disp_state;
}
static void _msm_disp_snapshot_work(struct kthread_work *work)
{
struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
struct msm_disp_state *disp_state;
struct drm_printer p;
/* Serialize dumping here */
mutex_lock(&kms->dump_mutex);
disp_state = msm_disp_snapshot_state_sync(kms);
mutex_unlock(&kms->dump_mutex);
if (IS_ERR(disp_state))
return;
if (MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE) {
p = drm_info_printer(disp_state->drm_dev->dev);
msm_disp_state_print(disp_state, &p);
}
/*
* If COREDUMP is disabled, the stub will call the free function.
* If there is a codedump pending for the device, the dev_coredumpm()
* will also free new coredump state.
*/
dev_coredumpm(disp_state->dev, THIS_MODULE, disp_state, 0, GFP_KERNEL,
disp_devcoredump_read, msm_disp_state_free);
}
void msm_disp_snapshot_state(struct drm_device *drm_dev)
{
struct msm_drm_private *priv;
struct msm_kms *kms;
if (!drm_dev) {
DRM_ERROR("invalid params\n");
return;
}
priv = drm_dev->dev_private;
kms = priv->kms;
kthread_queue_work(kms->dump_worker, &kms->dump_work);
}
int msm_disp_snapshot_init(struct drm_device *drm_dev)
{
struct msm_drm_private *priv;
struct msm_kms *kms;
if (!drm_dev) {
DRM_ERROR("invalid params\n");
return -EINVAL;
}
priv = drm_dev->dev_private;
kms = priv->kms;
mutex_init(&kms->dump_mutex);
kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
if (IS_ERR(kms->dump_worker))
DRM_ERROR("failed to create disp state task\n");
kthread_init_work(&kms->dump_work, _msm_disp_snapshot_work);
return 0;
}
void msm_disp_snapshot_destroy(struct drm_device *drm_dev)
{
struct msm_kms *kms;
struct msm_drm_private *priv;
if (!drm_dev) {
DRM_ERROR("invalid params\n");
return;
}
priv = drm_dev->dev_private;
kms = priv->kms;
if (kms->dump_worker)
kthread_destroy_worker(kms->dump_worker);
mutex_destroy(&kms->dump_mutex);
}
| linux-master | drivers/gpu/drm/msm/disp/msm_disp_snapshot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "msm_drv.h"
#include "mdp_kms.h"
struct mdp_irq_wait {
struct mdp_irq irq;
int count;
};
static DECLARE_WAIT_QUEUE_HEAD(wait_event);
static DEFINE_SPINLOCK(list_lock);
static void update_irq(struct mdp_kms *mdp_kms)
{
struct mdp_irq *irq;
uint32_t irqmask = mdp_kms->vblank_mask;
assert_spin_locked(&list_lock);
list_for_each_entry(irq, &mdp_kms->irq_list, node)
irqmask |= irq->irqmask;
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
mdp_kms->cur_irq_mask = irqmask;
}
/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
* link changes, this must be called to figure out the new global irqmask
*/
void mdp_irq_update(struct mdp_kms *mdp_kms)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
update_irq(mdp_kms);
spin_unlock_irqrestore(&list_lock, flags);
}
void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
{
struct mdp_irq *handler, *n;
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
mdp_kms->in_irq = true;
list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
if (handler->irqmask & status) {
spin_unlock_irqrestore(&list_lock, flags);
handler->irq(handler, handler->irqmask & status);
spin_lock_irqsave(&list_lock, flags);
}
}
mdp_kms->in_irq = false;
update_irq(mdp_kms);
spin_unlock_irqrestore(&list_lock, flags);
}
void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
if (enable)
mdp_kms->vblank_mask |= mask;
else
mdp_kms->vblank_mask &= ~mask;
update_irq(mdp_kms);
spin_unlock_irqrestore(&list_lock, flags);
}
static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp_irq_wait *wait =
container_of(irq, struct mdp_irq_wait, irq);
wait->count--;
wake_up_all(&wait_event);
}
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
{
struct mdp_irq_wait wait = {
.irq = {
.irq = wait_irq,
.irqmask = irqmask,
},
.count = 1,
};
mdp_irq_register(mdp_kms, &wait.irq);
wait_event_timeout(wait_event, (wait.count <= 0),
msecs_to_jiffies(100));
mdp_irq_unregister(mdp_kms, &wait.irq);
}
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
{
unsigned long flags;
bool needs_update = false;
spin_lock_irqsave(&list_lock, flags);
if (!irq->registered) {
irq->registered = true;
list_add(&irq->node, &mdp_kms->irq_list);
needs_update = !mdp_kms->in_irq;
}
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
mdp_irq_update(mdp_kms);
}
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
{
unsigned long flags;
bool needs_update = false;
spin_lock_irqsave(&list_lock, flags);
if (irq->registered) {
irq->registered = false;
list_del(&irq->node);
needs_update = !mdp_kms->in_irq;
}
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
mdp_irq_update(mdp_kms);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp_kms.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include "msm_drv.h"
#include "mdp_kms.h"
static struct csc_cfg csc_convert[CSC_MAX] = {
[CSC_RGB2RGB] = {
.type = CSC_RGB2RGB,
.matrix = {
0x0200, 0x0000, 0x0000,
0x0000, 0x0200, 0x0000,
0x0000, 0x0000, 0x0200
},
.pre_bias = { 0x0, 0x0, 0x0 },
.post_bias = { 0x0, 0x0, 0x0 },
.pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff },
.post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff },
},
[CSC_YUV2RGB] = {
.type = CSC_YUV2RGB,
.matrix = {
0x0254, 0x0000, 0x0331,
0x0254, 0xff37, 0xfe60,
0x0254, 0x0409, 0x0000
},
.pre_bias = { 0xfff0, 0xff80, 0xff80 },
.post_bias = { 0x00, 0x00, 0x00 },
.pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
.post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
},
[CSC_RGB2YUV] = {
.type = CSC_RGB2YUV,
.matrix = {
0x0083, 0x0102, 0x0032,
0x1fb5, 0x1f6c, 0x00e1,
0x00e1, 0x1f45, 0x1fdc
},
.pre_bias = { 0x00, 0x00, 0x00 },
.post_bias = { 0x10, 0x80, 0x80 },
.pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
.post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 },
},
[CSC_YUV2YUV] = {
.type = CSC_YUV2YUV,
.matrix = {
0x0200, 0x0000, 0x0000,
0x0000, 0x0200, 0x0000,
0x0000, 0x0000, 0x0200
},
.pre_bias = { 0x00, 0x00, 0x00 },
.post_bias = { 0x00, 0x00, 0x00 },
.pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
.post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
},
};
#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
.base = { .pixel_format = DRM_FORMAT_ ## name }, \
.bpc_a = BPC ## a ## A, \
.bpc_r = BPC ## r, \
.bpc_g = BPC ## g, \
.bpc_b = BPC ## b, \
.unpack = { e0, e1, e2, e3 }, \
.alpha_enable = alpha, \
.unpack_tight = tight, \
.cpp = c, \
.unpack_count = cnt, \
.fetch_type = fp, \
.chroma_sample = cs, \
.is_yuv = yuv, \
}
#define BPC0A 0
/*
* Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking
* mdp_get_rgb_formats()'s implementation.
*/
static const struct mdp_format formats[] = {
/* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
/* --- RGB formats above / YUV formats below this line --- */
/* 2 plane YUV */
FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
/* 1 plane YUV */
FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4,
MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4,
MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4,
MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4,
MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
/* 3 plane YUV */
FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1,
MDP_PLANE_PLANAR, CHROMA_420, true),
FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1,
MDP_PLANE_PLANAR, CHROMA_420, true),
};
/*
* Note:
* @rgb_only must be set to true, when requesting
* supported formats for RGB pipes.
*/
uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
bool rgb_only)
{
uint32_t i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
const struct mdp_format *f = &formats[i];
if (i == max_formats)
break;
if (rgb_only && MDP_FORMAT_IS_YUV(f))
break;
pixel_formats[i] = f->base.pixel_format;
}
return i;
}
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
uint64_t modifier)
{
int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
const struct mdp_format *f = &formats[i];
if (f->base.pixel_format == format)
return &f->base;
}
return NULL;
}
struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type)
{
if (WARN_ON(type >= CSC_MAX))
return NULL;
return &csc_convert[type];
}
| linux-master | drivers/gpu/drm/msm/disp/mdp_format.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_mode.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "mdp4_kms.h"
#include "msm_gem.h"
struct mdp4_crtc {
struct drm_crtc base;
char name[8];
int id;
int ovlp;
enum mdp4_dma dma;
bool enabled;
/* which mixer/encoder we route output to: */
int mixer;
struct {
spinlock_t lock;
bool stale;
uint32_t width, height;
uint32_t x, y;
/* next cursor to scan-out: */
uint32_t next_iova;
struct drm_gem_object *next_bo;
/* current cursor being scanned out: */
struct drm_gem_object *scanout_bo;
} cursor;
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
/* Bits have been flushed at the last commit,
* used to decide if a vsync has happened since last commit.
*/
u32 flushed_mask;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
/* for unref'ing cursor bo's after scanout completes: */
struct drm_flip_work unref_cursor_work;
struct mdp_irq vblank;
struct mdp_irq err;
};
#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
atomic_or(pending, &mdp4_crtc->pending);
mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
}
static void crtc_flush(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_plane *plane;
uint32_t flush = 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
flush |= pipe2flush(pipe_id);
}
flush |= ovlp2flush(mdp4_crtc->ovlp);
DBG("%s: flush=%08x", mdp4_crtc->name, flush);
mdp4_crtc->flushed_mask = flush;
mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp4_crtc->event;
if (event) {
mdp4_crtc->event = NULL;
DBG("%s: send event: %p", mdp4_crtc->name, event);
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
struct mdp4_crtc *mdp4_crtc =
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put(val);
}
static void mdp4_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
kfree(mdp4_crtc);
}
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
[VG1] = 1,
[VG2] = 2,
[RGB1] = 0,
[RGB2] = 0,
[RGB3] = 0,
[VG3] = 3,
[VG4] = 4,
};
/* setup mixer config, for which we need to consider all crtc's and
* the planes attached to them
*
* TODO may possibly need some extra locking here
*/
static void setup_mixer(struct mdp4_kms *mdp4_kms)
{
struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
struct drm_crtc *crtc;
uint32_t mixer_cfg = 0;
static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
list_for_each_entry(crtc, &config->crtc_list, head) {
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane;
drm_atomic_crtc_for_each_plane(plane, crtc) {
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id];
mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
pipe_id, stages[idx]);
}
}
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
}
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_plane *plane;
int i, ovlp = mdp4_crtc->ovlp;
bool alpha[4]= { false, false, false, false };
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
drm_atomic_crtc_for_each_plane(plane, crtc) {
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id];
if (idx > 0) {
const struct mdp_format *format =
to_mdp_format(msm_framebuffer_format(plane->state->fb));
alpha[idx-1] = format->alpha_enable;
}
}
for (i = 0; i < 4; i++) {
uint32_t op;
if (alpha[i]) {
op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
} else {
op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
}
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
}
setup_mixer(mdp4_kms);
}
static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
int ovlp = mdp4_crtc->ovlp;
struct drm_display_mode *mode;
if (WARN_ON(!crtc->state))
return;
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: " DRM_MODE_FMT,
mdp4_crtc->name, DRM_MODE_ARG(mode));
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
/* take data from pipe: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
MDP4_DMA_DST_SIZE_WIDTH(0) |
MDP4_DMA_DST_SIZE_HEIGHT(0));
mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
if (dma == DMA_E) {
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
}
static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
DBG("%s", mdp4_crtc->name);
if (WARN_ON(!mdp4_crtc->enabled))
return;
/* Disable/save vblank irq handling before power is disabled */
drm_crtc_vblank_off(crtc);
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
mdp4_crtc->enabled = false;
}
static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
DBG("%s", mdp4_crtc->name);
if (WARN_ON(mdp4_crtc->enabled))
return;
mdp4_enable(mdp4_kms);
/* Restore vblank irq handling after power is enabled */
drm_crtc_vblank_on(crtc);
mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
crtc_flush(crtc);
mdp4_crtc->enabled = true;
}
static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: check", mdp4_crtc->name);
// TODO anything else to check?
return 0;
}
static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: begin", mdp4_crtc->name);
}
static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
unsigned long flags;
DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
WARN_ON(mdp4_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
mdp4_crtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
blend_setup(crtc);
crtc_flush(crtc);
request_pending(crtc, PENDING_FLIP);
}
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
/* called from IRQ to update cursor related registers (if needed). The
* cursor registers, other than x/y position, appear not to be double
* buffered, and changing them other than from vblank seems to trigger
* underflow.
*/
static void update_cursor(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct msm_kms *kms = &mdp4_kms->base.base;
enum mdp4_dma dma = mdp4_crtc->dma;
unsigned long flags;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
uint64_t iova = mdp4_crtc->cursor.next_iova;
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else {
/* disable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
mdp4_kms->blank_cursor_iova);
}
/* and drop the iova ref + obj rev when done scanning out: */
if (prev_bo)
drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
mdp4_crtc->cursor.scanout_bo = next_bo;
mdp4_crtc->cursor.stale = false;
}
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}
static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv, uint32_t handle,
uint32_t width, uint32_t height)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct msm_kms *kms = &mdp4_kms->base.base;
struct drm_device *dev = crtc->dev;
struct drm_gem_object *cursor_bo, *old_bo;
unsigned long flags;
uint64_t iova;
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
if (handle) {
cursor_bo = drm_gem_object_lookup(file_priv, handle);
if (!cursor_bo)
return -ENOENT;
} else {
cursor_bo = NULL;
}
if (cursor_bo) {
ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
if (ret)
goto fail;
} else {
iova = 0;
}
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
old_bo = mdp4_crtc->cursor.next_bo;
mdp4_crtc->cursor.next_bo = cursor_bo;
mdp4_crtc->cursor.next_iova = iova;
mdp4_crtc->cursor.width = width;
mdp4_crtc->cursor.height = height;
mdp4_crtc->cursor.stale = true;
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
if (old_bo) {
/* drop our previous reference: */
drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
}
request_pending(crtc, PENDING_CURSOR);
return 0;
fail:
drm_gem_object_put(cursor_bo);
return ret;
}
static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
unsigned long flags;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
mdp4_crtc->cursor.x = x;
mdp4_crtc->cursor.y = y;
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
}
static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp4_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = msm_crtc_enable_vblank,
.disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
.mode_set_nofb = mdp4_crtc_mode_set_nofb,
.atomic_check = mdp4_crtc_atomic_check,
.atomic_begin = mdp4_crtc_atomic_begin,
.atomic_flush = mdp4_crtc_atomic_flush,
.atomic_enable = mdp4_crtc_atomic_enable,
.atomic_disable = mdp4_crtc_atomic_disable,
};
static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
struct drm_crtc *crtc = &mdp4_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
pending = atomic_xchg(&mdp4_crtc->pending, 0);
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
}
if (pending & PENDING_CURSOR) {
update_cursor(crtc);
drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
}
}
static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
struct drm_crtc *crtc = &mdp4_crtc->base;
DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
crtc_flush(crtc);
}
static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
int ret;
ret = drm_crtc_vblank_get(crtc);
if (ret)
return;
ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
!(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
mdp4_crtc->flushed_mask),
msecs_to_jiffies(50));
if (ret <= 0)
dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
mdp4_crtc->flushed_mask = 0;
drm_crtc_vblank_put(crtc);
}
uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
return mdp4_crtc->vblank.irqmask;
}
/* set dma config, ie. the format the encoder wants. */
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
}
/* set interface for routing crtc->encoder: */
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
uint32_t intf_sel;
intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
switch (mdp4_crtc->dma) {
case DMA_P:
intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
break;
case DMA_S:
intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
break;
case DMA_E:
intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
break;
}
if (intf == INTF_DSI_VIDEO) {
intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
} else if (intf == INTF_DSI_CMD) {
intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
}
mdp4_crtc->mixer = mixer;
blend_setup(crtc);
DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
}
void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
/* wait_for_flush_done is the only case for now.
* Later we will have command mode CRTC to wait for
* other event.
*/
mdp4_crtc_wait_for_flush_done(crtc);
}
static const char *dma_names[] = {
"DMA_P", "DMA_S", "DMA_E",
};
/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id, int ovlp_id,
enum mdp4_dma dma_id)
{
struct drm_crtc *crtc = NULL;
struct mdp4_crtc *mdp4_crtc;
mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
if (!mdp4_crtc)
return ERR_PTR(-ENOMEM);
crtc = &mdp4_crtc->base;
mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
mdp4_crtc->err.irq = mdp4_crtc_err_irq;
snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
dma_names[dma_id], ovlp_id);
spin_lock_init(&mdp4_crtc->cursor.lock);
drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
return crtc;
}
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <[email protected]>
* Author: Vinay Simha <[email protected]>
*/
#include "mdp4_kms.h"
struct mdp4_lvds_connector {
struct drm_connector base;
struct drm_encoder *encoder;
struct device_node *panel_node;
struct drm_panel *panel;
};
#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
static enum drm_connector_status mdp4_lvds_connector_detect(
struct drm_connector *connector, bool force)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
if (!mdp4_lvds_connector->panel) {
mdp4_lvds_connector->panel =
of_drm_find_panel(mdp4_lvds_connector->panel_node);
if (IS_ERR(mdp4_lvds_connector->panel))
mdp4_lvds_connector->panel = NULL;
}
return mdp4_lvds_connector->panel ?
connector_status_connected :
connector_status_disconnected;
}
static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
drm_connector_cleanup(connector);
kfree(mdp4_lvds_connector);
}
static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
struct drm_panel *panel = mdp4_lvds_connector->panel;
int ret = 0;
if (panel)
ret = drm_panel_get_modes(panel, connector);
return ret;
}
static enum drm_mode_status
mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
struct drm_encoder *encoder = mdp4_lvds_connector->encoder;
long actual, requested;
requested = 1000 * mode->clock;
actual = mdp4_lcdc_round_pixclk(encoder, requested);
DBG("requested=%ld, actual=%ld", requested, actual);
if (actual != requested)
return MODE_CLOCK_RANGE;
return MODE_OK;
}
static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
.detect = mdp4_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mdp4_lvds_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
.get_modes = mdp4_lvds_connector_get_modes,
.mode_valid = mdp4_lvds_connector_mode_valid,
};
/* initialize connector */
struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
struct device_node *panel_node, struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct mdp4_lvds_connector *mdp4_lvds_connector;
mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
if (!mdp4_lvds_connector)
return ERR_PTR(-ENOMEM);
mdp4_lvds_connector->encoder = encoder;
mdp4_lvds_connector->panel_node = panel_node;
connector = &mdp4_lvds_connector->base;
drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
connector->polled = 0;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
drm_connector_attach_encoder(connector, encoder);
return connector;
}
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <[email protected]>
* Author: Vinay Simha <[email protected]>
*/
#include <linux/delay.h>
#include <drm/drm_crtc.h>
#include <drm/drm_probe_helper.h>
#include "mdp4_kms.h"
struct mdp4_lcdc_encoder {
struct drm_encoder base;
struct device_node *panel_node;
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
struct regulator *regs[3];
bool enabled;
uint32_t bsc;
};
#define to_mdp4_lcdc_encoder(x) container_of(x, struct mdp4_lcdc_encoder, base)
static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(mdp4_lcdc_encoder);
}
static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
.destroy = mdp4_lcdc_encoder_destroy,
};
/* this should probably be a helper: */
static struct drm_connector *get_connector(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
return connector;
return NULL;
}
static void setup_phy(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector = get_connector(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
uint32_t lvds_intf = 0, lvds_phy_cfg0 = 0;
int bpp, nchan, swap;
if (!connector)
return;
bpp = 3 * connector->display_info.bpc;
if (!bpp)
bpp = 18;
/* TODO, these should come from panel somehow: */
nchan = 1;
swap = 0;
switch (bpp) {
case 24:
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0),
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x08) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x05) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x04) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x03));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0),
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x02) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x01) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x00));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1),
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x11) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x10) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0d) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0c));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1),
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0b) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0a) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x09));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2),
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x15));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2),
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x14) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x13) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x12));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(3),
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1b) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x17) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x16) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0f));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(3),
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0e) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x07) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x06));
if (nchan == 2) {
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
} else {
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
}
break;
case 18:
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0),
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x0a) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x07) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x06) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x05));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0),
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x04) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x03) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x02));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1),
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x13) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x12) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0f) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0e));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1),
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0d) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0c) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x0b));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2),
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) |
MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x17));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2),
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x16) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x15) |
MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x14));
if (nchan == 2) {
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
} else {
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
}
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT;
break;
default:
DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
return;
}
switch (nchan) {
case 1:
lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0;
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN |
MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL;
break;
case 2:
lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0 |
MDP4_LVDS_PHY_CFG0_CHANNEL1;
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN |
MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
break;
default:
DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
return;
}
if (swap)
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP;
lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_ENABLE;
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_INTF_CTL, lvds_intf);
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG2, 0x30);
mb();
udelay(1);
lvds_phy_cfg0 |= MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE;
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
}
static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
uint32_t lcdc_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
mode = adjusted_mode;
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
mdp4_lcdc_encoder->pixclock = mode->clock * 1000;
DBG("pixclock=%lu", mdp4_lcdc_encoder->pixclock);
ctrl_pol = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW;
/* probably need to get DATA_EN polarity from panel.. */
lcdc_hsync_skew = 0; /* get this from panel? */
hsync_start_x = (mode->htotal - mode->hsync_start);
hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
vsync_period = mode->vtotal * mode->htotal;
vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + lcdc_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + lcdc_hsync_skew - 1;
mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_CTRL,
MDP4_LCDC_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP4_LCDC_HSYNC_CTRL_PERIOD(mode->htotal));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_PERIOD, vsync_period);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_LEN, vsync_len);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_HCTRL,
MDP4_LCDC_DISPLAY_HCTRL_START(hsync_start_x) |
MDP4_LCDC_DISPLAY_HCTRL_END(hsync_end_x));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VSTART, display_v_start);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VEND, display_v_end);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_BORDER_CLR, 0);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_UNDERFLOW_CLR,
MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY |
MDP4_LCDC_UNDERFLOW_CLR_COLOR(0xff));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_SKEW, lcdc_hsync_skew);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_CTRL_POLARITY, ctrl_pol);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_HCTL,
MDP4_LCDC_ACTIVE_HCTL_START(0) |
MDP4_LCDC_ACTIVE_HCTL_END(0));
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VSTART, 0);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0);
}
static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
int i, ret;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
return;
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (!IS_ERR(panel)) {
drm_panel_disable(panel);
drm_panel_unprepare(panel);
}
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
}
mdp4_lcdc_encoder->enabled = false;
}
static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
uint32_t config;
int i, ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
/* TODO: hard-coded for 18bpp: */
config =
MDP4_DMA_CONFIG_R_BPC(BPC6) |
MDP4_DMA_CONFIG_G_BPC(BPC6) |
MDP4_DMA_CONFIG_B_BPC(BPC6) |
MDP4_DMA_CONFIG_PACK(0x21) |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN;
if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
}
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (!IS_ERR(panel)) {
drm_panel_prepare(panel);
drm_panel_enable(panel);
}
setup_phy(encoder);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
mdp4_lcdc_encoder->enabled = true;
}
static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
.mode_set = mdp4_lcdc_encoder_mode_set,
.disable = mdp4_lcdc_encoder_disable,
.enable = mdp4_lcdc_encoder_enable,
};
long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
{
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate);
}
/* initialize encoder */
struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
struct device_node *panel_node)
{
struct drm_encoder *encoder = NULL;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
struct regulator *reg;
int ret;
mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL);
if (!mdp4_lcdc_encoder) {
ret = -ENOMEM;
goto fail;
}
mdp4_lcdc_encoder->panel_node = panel_node;
encoder = &mdp4_lcdc_encoder->base;
drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
goto fail;
}
/* TODO: different regulators in other cases? */
reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[0] = reg;
reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[1] = reg;
reg = devm_regulator_get(dev->dev, "lvds-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[2] = reg;
return encoder;
fail:
if (encoder)
mdp4_lcdc_encoder_destroy(encoder);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include "mdp4_kms.h"
struct mdp4_lvds_pll {
struct clk_hw pll_hw;
struct drm_device *dev;
unsigned long pixclk;
};
#define to_mdp4_lvds_pll(x) container_of(x, struct mdp4_lvds_pll, pll_hw)
static struct mdp4_kms *get_kms(struct mdp4_lvds_pll *lvds_pll)
{
struct msm_drm_private *priv = lvds_pll->dev->dev_private;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
struct pll_rate {
unsigned long rate;
struct {
uint32_t val;
uint32_t reg;
} conf[32];
};
/* NOTE: keep sorted highest freq to lowest: */
static const struct pll_rate freqtbl[] = {
{ 72000000, {
{ 0x8f, REG_MDP4_LVDS_PHY_PLL_CTRL_1 },
{ 0x30, REG_MDP4_LVDS_PHY_PLL_CTRL_2 },
{ 0xc6, REG_MDP4_LVDS_PHY_PLL_CTRL_3 },
{ 0x10, REG_MDP4_LVDS_PHY_PLL_CTRL_5 },
{ 0x07, REG_MDP4_LVDS_PHY_PLL_CTRL_6 },
{ 0x62, REG_MDP4_LVDS_PHY_PLL_CTRL_7 },
{ 0x41, REG_MDP4_LVDS_PHY_PLL_CTRL_8 },
{ 0x0d, REG_MDP4_LVDS_PHY_PLL_CTRL_9 },
{ 0, 0 } }
},
};
static const struct pll_rate *find_rate(unsigned long rate)
{
int i;
for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
if (rate > freqtbl[i].rate)
return &freqtbl[i-1];
return &freqtbl[i-1];
}
static int mpd4_lvds_pll_enable(struct clk_hw *hw)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
const struct pll_rate *pll_rate = find_rate(lvds_pll->pixclk);
int i;
DBG("pixclk=%lu (%lu)", lvds_pll->pixclk, pll_rate->rate);
if (WARN_ON(!pll_rate))
return -EINVAL;
mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_PHY_RESET, 0x33);
for (i = 0; pll_rate->conf[i].reg; i++)
mdp4_write(mdp4_kms, pll_rate->conf[i].reg, pll_rate->conf[i].val);
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x01);
/* Wait until LVDS PLL is locked and ready */
while (!mdp4_read(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_LOCKED))
cpu_relax();
return 0;
}
static void mpd4_lvds_pll_disable(struct clk_hw *hw)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
DBG("");
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, 0x0);
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0);
}
static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
return lvds_pll->pixclk;
}
static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
const struct pll_rate *pll_rate = find_rate(rate);
return pll_rate->rate;
}
static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
lvds_pll->pixclk = rate;
return 0;
}
static const struct clk_ops mpd4_lvds_pll_ops = {
.enable = mpd4_lvds_pll_enable,
.disable = mpd4_lvds_pll_disable,
.recalc_rate = mpd4_lvds_pll_recalc_rate,
.round_rate = mpd4_lvds_pll_round_rate,
.set_rate = mpd4_lvds_pll_set_rate,
};
static const char *mpd4_lvds_pll_parents[] = {
"pxo",
};
static struct clk_init_data pll_init = {
.name = "mpd4_lvds_pll",
.ops = &mpd4_lvds_pll_ops,
.parent_names = mpd4_lvds_pll_parents,
.num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
};
struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
{
struct mdp4_lvds_pll *lvds_pll;
struct clk *clk;
int ret;
lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL);
if (!lvds_pll) {
ret = -ENOMEM;
goto fail;
}
lvds_pll->dev = dev;
lvds_pll->pll_hw.init = &pll_init;
clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto fail;
}
return clk;
fail:
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_crtc.h>
#include <drm/drm_probe_helper.h>
#include "mdp4_kms.h"
struct mdp4_dtv_encoder {
struct drm_encoder base;
struct clk *hdmi_clk;
struct clk *mdp_clk;
unsigned long int pixclock;
bool enabled;
uint32_t bsc;
};
#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(mdp4_dtv_encoder);
}
static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
.destroy = mdp4_dtv_encoder_destroy,
};
static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
mode = adjusted_mode;
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
mdp4_dtv_encoder->pixclock = mode->clock * 1000;
DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
ctrl_pol = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
/* probably need to get DATA_EN polarity from panel.. */
dtv_hsync_skew = 0; /* get this from panel? */
hsync_start_x = (mode->htotal - mode->hsync_start);
hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
vsync_period = mode->vtotal * mode->htotal;
vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
MDP4_DTV_ACTIVE_HCTL_START(0) |
MDP4_DTV_ACTIVE_HCTL_END(0));
mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
}
static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
if (WARN_ON(!mdp4_dtv_encoder->enabled))
return;
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
mdp4_dtv_encoder->enabled = false;
}
static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
unsigned long pc = mdp4_dtv_encoder->pixclock;
int ret;
if (WARN_ON(mdp4_dtv_encoder->enabled))
return;
mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_R_BPC(BPC8) |
MDP4_DMA_CONFIG_G_BPC(BPC8) |
MDP4_DMA_CONFIG_B_BPC(BPC8) |
MDP4_DMA_CONFIG_PACK(0x21));
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
DBG("setting mdp_clk=%lu", pc);
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
pc, ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
mdp4_dtv_encoder->enabled = true;
}
static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
.mode_set = mdp4_dtv_encoder_mode_set,
.enable = mdp4_dtv_encoder_enable,
.disable = mdp4_dtv_encoder_disable,
};
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
}
/* initialize encoder */
struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder = NULL;
struct mdp4_dtv_encoder *mdp4_dtv_encoder;
int ret;
mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
if (!mdp4_dtv_encoder) {
ret = -ENOMEM;
goto fail;
}
encoder = &mdp4_dtv_encoder->base;
drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
goto fail;
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}
return encoder;
fail:
if (encoder)
mdp4_dtv_encoder_destroy(encoder);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2014, Inforce Computing. All rights reserved.
*
* Author: Vinay Simha <[email protected]>
*/
#include <drm/drm_crtc.h>
#include <drm/drm_probe_helper.h>
#include "mdp4_kms.h"
#ifdef CONFIG_DRM_MSM_DSI
struct mdp4_dsi_encoder {
struct drm_encoder base;
struct drm_panel *panel;
bool enabled;
};
#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base)
static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(mdp4_dsi_encoder);
}
static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
.destroy = mdp4_dsi_encoder_destroy,
};
static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp4_kms *mdp4_kms = get_kms(encoder);
uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
mode = adjusted_mode;
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
ctrl_pol = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW;
/* probably need to get DATA_EN polarity from panel.. */
dsi_hsync_skew = 0; /* get this from panel? */
hsync_start_x = (mode->htotal - mode->hsync_start);
hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
vsync_period = mode->vtotal * mode->htotal;
vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1;
mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL,
MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal));
mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period);
mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len);
mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL,
MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) |
MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x));
mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start);
mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end);
mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol);
mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR,
MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY |
MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff));
mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL,
MDP4_DSI_ACTIVE_HCTL_START(0) |
MDP4_DSI_ACTIVE_HCTL_END(0));
mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew);
mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0);
}
static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder)
{
struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
if (!mdp4_dsi_encoder->enabled)
return;
mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
mdp4_dsi_encoder->enabled = false;
}
static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
{
struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
if (mdp4_dsi_encoder->enabled)
return;
mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN |
MDP4_DMA_CONFIG_R_BPC(BPC8) |
MDP4_DMA_CONFIG_G_BPC(BPC8) |
MDP4_DMA_CONFIG_B_BPC(BPC8) |
MDP4_DMA_CONFIG_PACK(0x21));
mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1);
mdp4_dsi_encoder->enabled = true;
}
static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
.mode_set = mdp4_dsi_encoder_mode_set,
.disable = mdp4_dsi_encoder_disable,
.enable = mdp4_dsi_encoder_enable,
};
/* initialize encoder */
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder = NULL;
struct mdp4_dsi_encoder *mdp4_dsi_encoder;
int ret;
mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
if (!mdp4_dsi_encoder) {
ret = -ENOMEM;
goto fail;
}
encoder = &mdp4_dsi_encoder->base;
drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
return encoder;
fail:
if (encoder)
mdp4_dsi_encoder_destroy(encoder);
return ERR_PTR(ret);
}
#endif /* CONFIG_DRM_MSM_DSI */
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/delay.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
static int mdp4_hw_init(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp4_kms->dev;
u32 dmap_cfg, vg_cfg;
unsigned long clk;
pm_runtime_get_sync(dev->dev);
if (mdp4_kms->rev > 1) {
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
}
mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
/* max read pending cmd config, 3 pending requests: */
mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
clk = clk_get_rate(mdp4_kms->clk);
if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
} else {
dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
}
DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
if (mdp4_kms->rev >= 2)
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
/* disable CSC matrix / YUV by default: */
mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
pm_runtime_put_sync(dev->dev);
return 0;
}
static void mdp4_enable_commit(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
mdp4_enable(mdp4_kms);
}
static void mdp4_disable_commit(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
mdp4_disable(mdp4_kms);
}
static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
{
/* TODO */
}
static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_crtc *crtc;
for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
mdp4_crtc_wait_for_commit_done(crtc);
}
static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
}
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder)
{
/* if we had >1 encoder, we'd need something more clever: */
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_TMDS:
return mdp4_dtv_round_pixclk(encoder, rate);
case DRM_MODE_ENCODER_LVDS:
case DRM_MODE_ENCODER_DSI:
default:
return rate;
}
}
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_put(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
if (mdp4_kms->rpm_enabled)
pm_runtime_disable(dev);
mdp_kms_destroy(&mdp4_kms->base);
kfree(mdp4_kms);
}
static const struct mdp_kms_funcs kms_funcs = {
.base = {
.hw_init = mdp4_hw_init,
.irq_preinstall = mdp4_irq_preinstall,
.irq_postinstall = mdp4_irq_postinstall,
.irq_uninstall = mdp4_irq_uninstall,
.irq = mdp4_irq,
.enable_vblank = mdp4_enable_vblank,
.disable_vblank = mdp4_disable_vblank,
.enable_commit = mdp4_enable_commit,
.disable_commit = mdp4_disable_commit,
.flush_commit = mdp4_flush_commit,
.wait_flush = mdp4_wait_flush,
.complete_commit = mdp4_complete_commit,
.get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
.destroy = mdp4_destroy,
},
.set_irqmask = mdp4_set_irqmask,
};
int mdp4_disable(struct mdp4_kms *mdp4_kms)
{
DBG("");
clk_disable_unprepare(mdp4_kms->clk);
clk_disable_unprepare(mdp4_kms->pclk);
clk_disable_unprepare(mdp4_kms->lut_clk);
clk_disable_unprepare(mdp4_kms->axi_clk);
return 0;
}
int mdp4_enable(struct mdp4_kms *mdp4_kms)
{
DBG("");
clk_prepare_enable(mdp4_kms->clk);
clk_prepare_enable(mdp4_kms->pclk);
clk_prepare_enable(mdp4_kms->lut_clk);
clk_prepare_enable(mdp4_kms->axi_clk);
return 0;
}
static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
int intf_type)
{
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct device_node *panel_node;
int dsi_id;
int ret;
switch (intf_type) {
case DRM_MODE_ENCODER_LVDS:
/*
* bail out early if there is no panel node (no need to
* initialize LCDC encoder and LVDS connector)
*/
panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
if (!panel_node)
return 0;
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
of_node_put(panel_node);
return PTR_ERR(encoder);
}
/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
encoder->possible_crtcs = 1 << DMA_P;
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
of_node_put(panel_node);
return PTR_ERR(connector);
}
break;
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
return PTR_ERR(encoder);
}
/* DTV can be hooked to DMA_E: */
encoder->possible_crtcs = 1 << 1;
if (priv->hdmi) {
/* Construct bridge/connector for HDMI: */
ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
}
}
break;
case DRM_MODE_ENCODER_DSI:
/* only DSI1 supported for now */
dsi_id = 0;
if (!priv->dsi[dsi_id])
break;
encoder = mdp4_dsi_encoder_init(dev);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
DRM_DEV_ERROR(dev->dev,
"failed to construct DSI encoder: %d\n", ret);
return ret;
}
/* TODO: Add DMA_S later? */
encoder->possible_crtcs = 1 << DMA_P;
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
return ret;
}
break;
default:
DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
return -EINVAL;
}
return 0;
}
static int modeset_init(struct mdp4_kms *mdp4_kms)
{
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
static const enum mdp4_pipe rgb_planes[] = {
RGB1, RGB2,
};
static const enum mdp4_pipe vg_planes[] = {
VG1, VG2,
};
static const enum mdp4_dma mdp4_crtcs[] = {
DMA_P, DMA_E,
};
static const char * const mdp4_crtc_names[] = {
"DMA_P", "DMA_E",
};
static const int mdp4_intfs[] = {
DRM_MODE_ENCODER_LVDS,
DRM_MODE_ENCODER_DSI,
DRM_MODE_ENCODER_TMDS,
};
/* construct non-private planes: */
for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
plane = mdp4_plane_init(dev, vg_planes[i], false);
if (IS_ERR(plane)) {
DRM_DEV_ERROR(dev->dev,
"failed to construct plane for VG%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
}
}
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
plane = mdp4_plane_init(dev, rgb_planes[i], true);
if (IS_ERR(plane)) {
DRM_DEV_ERROR(dev->dev,
"failed to construct plane for RGB%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
}
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
mdp4_crtc_names[i]);
ret = PTR_ERR(crtc);
goto fail;
}
priv->num_crtcs++;
}
/*
* we currently set up two relatively fixed paths:
*
* LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
* or
* DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
*
* DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
*/
for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
i, ret);
goto fail;
}
}
return 0;
fail:
return ret;
}
static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
u32 *major, u32 *minor)
{
struct drm_device *dev = mdp4_kms->dev;
u32 version;
mdp4_enable(mdp4_kms);
version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
mdp4_disable(mdp4_kms);
*major = FIELD(version, MDP4_VERSION_MAJOR);
*minor = FIELD(version, MDP4_VERSION_MINOR);
DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
}
static int mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
int irq, ret;
u32 major, minor;
unsigned long max_clk;
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
max_clk = 266667000;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
return -ENOMEM;
}
ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
goto fail;
}
priv->kms = &mdp4_kms->base.base;
kms = priv->kms;
mdp4_kms->dev = dev;
mdp4_kms->mmio = msm_ioremap(pdev, NULL);
if (IS_ERR(mdp4_kms->mmio)) {
ret = PTR_ERR(mdp4_kms->mmio);
goto fail;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto fail;
}
kms->irq = irq;
/* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us)
*/
mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;
if (mdp4_kms->vdd) {
ret = regulator_enable(mdp4_kms->vdd);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
goto fail;
}
}
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(mdp4_kms->clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
ret = PTR_ERR(mdp4_kms->clk);
goto fail;
}
mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
goto fail;
}
clk_set_rate(mdp4_kms->clk, max_clk);
read_mdp_hw_revision(mdp4_kms, &major, &minor);
if (major != 4) {
DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
}
mdp4_kms->rev = minor;
if (mdp4_kms->rev >= 2) {
mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
if (IS_ERR(mdp4_kms->lut_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
ret = PTR_ERR(mdp4_kms->lut_clk);
goto fail;
}
clk_set_rate(mdp4_kms->lut_clk, max_clk);
}
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
mdp4_enable(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
mdp4_disable(mdp4_kms);
mdelay(16);
mmu = msm_iommu_new(&pdev->dev, 0);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
} else if (!mmu) {
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
aspace = NULL;
} else {
aspace = msm_gem_address_space_create(mmu,
"mdp4", 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
kms->aspace = aspace;
}
ret = modeset_init(mdp4_kms);
if (ret) {
DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
mdp4_kms->blank_cursor_bo = NULL;
goto fail;
}
ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
goto fail;
}
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
return 0;
fail:
if (kms)
mdp4_destroy(kms);
return ret;
}
static const struct dev_pm_ops mdp4_pm_ops = {
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
};
static int mdp4_probe(struct platform_device *pdev)
{
return msm_drv_probe(&pdev->dev, mdp4_kms_init);
}
static int mdp4_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
return 0;
}
static const struct of_device_id mdp4_dt_match[] = {
{ .compatible = "qcom,mdp4" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mdp4_dt_match);
static struct platform_driver mdp4_platform_driver = {
.probe = mdp4_probe,
.remove = mdp4_remove,
.shutdown = msm_drv_shutdown,
.driver = {
.name = "mdp4",
.of_match_table = mdp4_dt_match,
.pm = &mdp4_pm_ops,
},
};
void __init msm_mdp4_register(void)
{
platform_driver_register(&mdp4_platform_driver);
}
void __exit msm_mdp4_unregister(void)
{
platform_driver_unregister(&mdp4_platform_driver);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include "mdp4_kms.h"
#define DOWN_SCALE_MAX 8
#define UP_SCALE_MAX 8
struct mdp4_plane {
struct drm_plane base;
const char *name;
enum mdp4_pipe pipe;
uint32_t caps;
uint32_t nformats;
uint32_t formats[32];
bool enabled;
};
#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
/* MDP format helper functions */
static inline
enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb)
{
bool is_tile = false;
if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
is_tile = true;
if (fb->format->format == DRM_FORMAT_NV12 && is_tile)
return FRAME_TILE_YCBCR_420;
return FRAME_LINEAR;
}
static void mdp4_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb);
static int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
static struct mdp4_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
}
/* helper to install properties which are common to planes and crtcs */
static void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
// XXX
}
static int mdp4_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val)
{
// XXX
return -EINVAL;
}
static const struct drm_plane_funcs mdp4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp4_plane_destroy,
.set_property = mdp4_plane_set_property,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!new_state->fb)
return 0;
drm_gem_plane_helper_prepare_fb(plane, new_state);
return msm_framebuffer_prepare(new_state->fb, kms->aspace, false);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
struct msm_kms *kms = &mdp4_kms->base.base;
struct drm_framebuffer *fb = old_state->fb;
if (!fb)
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, kms->aspace, false);
}
static int mdp4_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
return 0;
}
static void mdp4_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
int ret;
ret = mdp4_plane_mode_set(plane,
new_state->crtc, new_state->fb,
new_state->crtc_x, new_state->crtc_y,
new_state->crtc_w, new_state->crtc_h,
new_state->src_x, new_state->src_y,
new_state->src_w, new_state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
}
static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
.prepare_fb = mdp4_plane_prepare_fb,
.cleanup_fb = mdp4_plane_cleanup_fb,
.atomic_check = mdp4_plane_atomic_check,
.atomic_update = mdp4_plane_atomic_update,
};
static void mdp4_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
struct msm_kms *kms = &mdp4_kms->base.base;
enum mdp4_pipe pipe = mdp4_plane->pipe;
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 3));
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
enum mdp4_pipe pipe, struct csc_cfg *csc)
{
int i;
for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) {
mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i),
csc->matrix[i]);
}
for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) {
mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i),
csc->pre_bias[i]);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i),
csc->post_bias[i]);
}
for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) {
mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i),
csc->pre_clamp[i]);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i),
csc->post_clamp[i]);
}
}
#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
static int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
const struct mdp_format *format;
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
enum mdp4_frame_format frame_type;
if (!(crtc && fb)) {
DBG("%s: disabled!", mdp4_plane->name);
return 0;
}
frame_type = mdp4_get_frame_format(fb);
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
src_w = src_w >> 16;
src_h = src_h >> 16;
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
format = to_mdp_format(msm_framebuffer_format(fb));
if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
return -ERANGE;
}
if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_w > (src_w * UP_SCALE_MAX)) {
DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_h > (src_h * UP_SCALE_MAX)) {
DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
return -ERANGE;
}
if (src_w != crtc_w) {
uint32_t sel_unit = SCALE_FIR;
op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
if (MDP_FORMAT_IS_YUV(format)) {
if (crtc_w > src_w)
sel_unit = SCALE_PIXEL_RPT;
else if (crtc_w <= (src_w / 4))
sel_unit = SCALE_MN_PHASE;
op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit);
phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
src_w, crtc_w);
}
}
if (src_h != crtc_h) {
uint32_t sel_unit = SCALE_FIR;
op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
if (MDP_FORMAT_IS_YUV(format)) {
if (crtc_h > src_h)
sel_unit = SCALE_PIXEL_RPT;
else if (crtc_h <= (src_h / 4))
sel_unit = SCALE_MN_PHASE;
op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit);
phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
src_h, crtc_h);
}
}
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
MDP4_PIPE_SRC_XY_X(src_x) |
MDP4_PIPE_SRC_XY_Y(src_y));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
MDP4_PIPE_DST_XY_X(crtc_x) |
MDP4_PIPE_DST_XY_Y(crtc_y));
mdp4_plane_set_scanout(plane, fb);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) |
MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) |
MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) |
COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
if (MDP_FORMAT_IS_YUV(format)) {
struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB);
op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR;
op_mode |= MDP4_PIPE_OP_MODE_CSC_EN;
mdp4_write_csc_config(mdp4_kms, pipe, csc);
}
mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
if (frame_type != FRAME_LINEAR)
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe),
MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) |
MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h));
return 0;
}
static const char *pipe_names[] = {
"VG1", "VG2",
"RGB1", "RGB2", "RGB3",
"VG3", "VG4",
};
enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
return mdp4_plane->pipe;
}
static const uint64_t supported_format_modifiers[] = {
DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/* initialize plane */
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane)
{
struct drm_plane *plane = NULL;
struct mdp4_plane *mdp4_plane;
int ret;
enum drm_plane_type type;
mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
if (!mdp4_plane) {
ret = -ENOMEM;
goto fail;
}
plane = &mdp4_plane->base;
mdp4_plane->pipe = pipe_id;
mdp4_plane->name = pipe_names[pipe_id];
mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
ARRAY_SIZE(mdp4_plane->formats),
!pipe_supports_yuv(mdp4_plane->caps));
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats,
supported_format_modifiers, type, NULL);
if (ret)
goto fail;
drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
mdp4_plane_install_properties(plane, &plane->base);
drm_plane_enable_fb_damage_clips(plane);
return plane;
fail:
if (plane)
mdp4_plane_destroy(plane);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "mdp4_kms.h"
void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask)
{
mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
irqmask ^ (irqmask & old_irqmask));
mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
}
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler);
static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
extern bool dumpstate;
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
if (dumpstate && __ratelimit(&rs)) {
struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
drm_state_dump(mdp4_kms->dev, &p);
}
}
void mdp4_irq_preinstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
mdp4_enable(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
mdp4_disable(mdp4_kms);
}
int mdp4_irq_postinstall(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
struct mdp_irq *error_handler = &mdp4_kms->error_handler;
error_handler->irq = mdp4_irq_error_handler;
error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
mdp_irq_register(mdp_kms, error_handler);
return 0;
}
void mdp4_irq_uninstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
mdp4_enable(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
mdp4_disable(mdp4_kms);
}
irqreturn_t mdp4_irq(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
struct drm_device *dev = mdp4_kms->dev;
struct drm_crtc *crtc;
uint32_t status, enable;
enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
VERB("status=%08x", status);
mdp_dispatch_irqs(mdp_kms, status);
drm_for_each_crtc(crtc, dev)
if (status & mdp4_crtc_vblank(crtc))
drm_crtc_handle_vblank(crtc);
return IRQ_HANDLED;
}
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
mdp4_enable(mdp4_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp4_crtc_vblank(crtc), true);
mdp4_disable(mdp4_kms);
return 0;
}
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
mdp4_enable(mdp4_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp4_crtc_vblank(crtc), false);
mdp4_disable(mdp4_kms);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <[email protected]>
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/of_irq.h>
#include <linux/pm_opp.h>
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
#include "msm_drv.h"
#include "msm_mmu.h"
#include "msm_mdss.h"
#include "msm_gem.h"
#include "disp/msm_disp_snapshot.h"
#include "dpu_core_irq.h"
#include "dpu_crtc.h"
#include "dpu_encoder.h"
#include "dpu_formats.h"
#include "dpu_hw_vbif.h"
#include "dpu_kms.h"
#include "dpu_plane.h"
#include "dpu_vbif.h"
#include "dpu_writeback.h"
#define CREATE_TRACE_POINTS
#include "dpu_trace.h"
/*
* To enable overall DRM driver logging
* # echo 0x2 > /sys/module/drm/parameters/debug
*
* To enable DRM driver h/w logging
* # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
*
* See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
*/
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
#ifdef CONFIG_DEBUG_FS
static int _dpu_danger_signal_status(struct seq_file *s,
bool danger_status)
{
struct dpu_danger_safe_status status;
struct dpu_kms *kms = s->private;
int i;
if (!kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
memset(&status, 0, sizeof(struct dpu_danger_safe_status));
pm_runtime_get_sync(&kms->pdev->dev);
if (danger_status) {
seq_puts(s, "\nDanger signal status:\n");
if (kms->hw_mdp->ops.get_danger_status)
kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
&status);
} else {
seq_puts(s, "\nSafe signal status:\n");
if (kms->hw_mdp->ops.get_safe_status)
kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
&status);
}
pm_runtime_put_sync(&kms->pdev->dev);
seq_printf(s, "MDP : 0x%x\n", status.mdp);
for (i = SSPP_VIG0; i < SSPP_MAX; i++)
seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0,
status.sspp[i]);
seq_puts(s, "\n");
return 0;
}
static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
{
return _dpu_danger_signal_status(s, true);
}
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
{
return _dpu_danger_signal_status(s, false);
}
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
static ssize_t _dpu_plane_danger_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
int len;
char buf[40];
len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
{
struct drm_plane *plane;
drm_for_each_plane(plane, kms->dev) {
if (plane->fb && plane->state) {
dpu_plane_danger_signal_ctrl(plane, enable);
DPU_DEBUG("plane:%d img:%dx%d ",
plane->base.id, plane->fb->width,
plane->fb->height);
DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
plane->state->src_x >> 16,
plane->state->src_y >> 16,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
plane->state->crtc_x, plane->state->crtc_y,
plane->state->crtc_w, plane->state->crtc_h);
} else {
DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
}
}
}
static ssize_t _dpu_plane_danger_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
int disable_panic;
int ret;
ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
if (ret)
return ret;
if (disable_panic) {
/* Disable panic signal for all active pipes */
DPU_DEBUG("Disabling danger:\n");
_dpu_plane_set_danger_state(kms, false);
kms->has_danger_ctrl = false;
} else {
/* Enable panic signal for all active pipes */
DPU_DEBUG("Enabling danger:\n");
kms->has_danger_ctrl = true;
_dpu_plane_set_danger_state(kms, true);
}
return count;
}
static const struct file_operations dpu_plane_danger_enable = {
.open = simple_open,
.read = _dpu_plane_danger_read,
.write = _dpu_plane_danger_write,
};
static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
struct dentry *entry = debugfs_create_dir("danger", parent);
debugfs_create_file("danger_status", 0600, entry,
dpu_kms, &dpu_debugfs_danger_stats_fops);
debugfs_create_file("safe_status", 0600, entry,
dpu_kms, &dpu_debugfs_safe_stats_fops);
debugfs_create_file("disable_danger", 0600, entry,
dpu_kms, &dpu_plane_danger_enable);
}
/*
* Companion structure for dpu_debugfs_create_regset32.
*/
struct dpu_debugfs_regset32 {
uint32_t offset;
uint32_t blk_len;
struct dpu_kms *dpu_kms;
};
static int dpu_regset32_show(struct seq_file *s, void *data)
{
struct dpu_debugfs_regset32 *regset = s->private;
struct dpu_kms *dpu_kms = regset->dpu_kms;
void __iomem *base;
uint32_t i, addr;
if (!dpu_kms->mmio)
return 0;
base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */
if (regset->offset & 0xF) {
seq_printf(s, "[%x]", regset->offset & ~0xF);
for (i = 0; i < (regset->offset & 0xF); i += 4)
seq_puts(s, " ");
}
pm_runtime_get_sync(&dpu_kms->pdev->dev);
/* main register output */
for (i = 0; i < regset->blk_len; i += 4) {
addr = regset->offset + i;
if ((addr & 0xF) == 0x0)
seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
seq_printf(s, " %08x", readl_relaxed(base + i));
}
seq_puts(s, "\n");
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dpu_regset32);
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent,
uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
{
struct dpu_debugfs_regset32 *regset;
if (WARN_ON(!name || !dpu_kms || !length))
return;
regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
return;
/* make sure offset is a multiple of 4 */
regset->offset = round_down(offset, 4);
regset->blk_len = length;
regset->dpu_kms = dpu_kms;
debugfs_create_file(name, mode, parent, regset, &dpu_regset32_fops);
}
static void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
struct dentry *entry = debugfs_create_dir("sspp", debugfs_root);
int i;
if (IS_ERR(entry))
return;
for (i = SSPP_NONE; i < SSPP_MAX; i++) {
struct dpu_hw_sspp *hw = dpu_rm_get_sspp(&dpu_kms->rm, i);
if (!hw)
continue;
_dpu_hw_sspp_init_debugfs(hw, dpu_kms, entry);
}
}
static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
void *p = dpu_hw_util_get_log_mask_ptr();
struct dentry *entry;
struct drm_device *dev;
struct msm_drm_private *priv;
int i;
if (!p)
return -EINVAL;
/* Only create a set of debugfs for the primary node, ignore render nodes */
if (minor->type != DRM_MINOR_PRIMARY)
return 0;
dev = dpu_kms->dev;
priv = dev->dev_private;
entry = debugfs_create_dir("debug", minor->debugfs_root);
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
dpu_debugfs_danger_init(dpu_kms, entry);
dpu_debugfs_vbif_init(dpu_kms, entry);
dpu_debugfs_core_irq_init(dpu_kms, entry);
dpu_debugfs_sspp_init(dpu_kms, entry);
for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
if (priv->dp[i])
msm_dp_debugfs_init(priv->dp[i], minor);
}
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
/* Global/shared object state funcs */
/*
* This is a helper that returns the private state currently in operation.
* Note that this would return the "old_state" if called in the atomic check
* path, and the "new_state" after the atomic swap has been done.
*/
struct dpu_global_state *
dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
{
return to_dpu_global_state(dpu_kms->global_state.state);
}
/*
* This acquires the modeset lock set aside for global state, creates
* a new duplicated private object state.
*/
struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_private_state *priv_state;
int ret;
ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
if (ret)
return ERR_PTR(ret);
priv_state = drm_atomic_get_private_obj_state(s,
&dpu_kms->global_state);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_dpu_global_state(priv_state);
}
static struct drm_private_state *
dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
{
struct dpu_global_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct dpu_global_state *dpu_state = to_dpu_global_state(state);
kfree(dpu_state);
}
static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
.atomic_duplicate_state = dpu_kms_global_duplicate_state,
.atomic_destroy_state = dpu_kms_global_destroy_state,
};
static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
{
struct dpu_global_state *state;
drm_modeset_lock_init(&dpu_kms->global_state_lock);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
&state->base,
&dpu_kms_global_state_funcs);
return 0;
}
static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
{
struct icc_path *path0;
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
struct device *dpu_dev = dev->dev;
path0 = msm_icc_get(dpu_dev, "mdp0-mem");
path1 = msm_icc_get(dpu_dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
dpu_kms->path[0] = path0;
dpu_kms->num_paths = 1;
if (!IS_ERR_OR_NULL(path1)) {
dpu_kms->path[1] = path1;
dpu_kms->num_paths++;
}
return 0;
}
static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
return dpu_crtc_vblank(crtc, true);
}
static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
dpu_crtc_vblank(crtc, false);
}
static void dpu_kms_enable_commit(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
pm_runtime_get_sync(&dpu_kms->pdev->dev);
}
static void dpu_kms_disable_commit(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct drm_crtc *crtc;
for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
if (!crtc->state->active)
continue;
trace_dpu_kms_commit(DRMID(crtc));
dpu_crtc_commit_kickoff(crtc);
}
}
static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct drm_crtc *crtc;
DPU_ATRACE_BEGIN("kms_complete_commit");
for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
dpu_crtc_complete_commit(crtc);
DPU_ATRACE_END("kms_complete_commit");
}
static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct drm_device *dev;
int ret;
if (!kms || !crtc || !crtc->state) {
DPU_ERROR("invalid params\n");
return;
}
dev = crtc->dev;
if (!crtc->state->enable) {
DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
return;
}
if (!drm_atomic_crtc_effectively_active(crtc->state)) {
DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
return;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
/*
* Wait for post-flush if necessary to delay before
* plane_cleanup. For example, wait for vsync in case of video
* mode panels. This may be a no-op for command mode panels.
*/
trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
if (ret && ret != -EWOULDBLOCK) {
DPU_ERROR("wait for commit done returned %d\n", ret);
break;
}
}
}
static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct drm_crtc *crtc;
for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
dpu_kms_wait_for_commit_done(kms, crtc);
}
static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
int i, rc = 0;
if (!(priv->dsi[0] || priv->dsi[1]))
return rc;
/*
* We support following confiurations:
* - Single DSI host (dsi0 or dsi1)
* - Two independent DSI hosts
* - Bonded DSI0 and DSI1 hosts
*
* TODO: Support swapping DSI0 and DSI1 in the bonded setup.
*/
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
int other = (i + 1) % 2;
if (!priv->dsi[i])
continue;
if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
!msm_dsi_is_master_dsi(priv->dsi[i]))
continue;
memset(&info, 0, sizeof(info));
info.intf_type = INTF_DSI;
info.h_tile_instance[info.num_of_h_tiles++] = i;
if (msm_dsi_is_bonded_dsi(priv->dsi[i]))
info.h_tile_instance[info.num_of_h_tiles++] = other;
info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return PTR_ERR(encoder);
}
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
break;
}
if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
other, rc);
break;
}
}
}
return rc;
}
static int _dpu_kms_initialize_displayport(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
int rc;
int i;
for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
if (!priv->dp[i])
continue;
memset(&info, 0, sizeof(info));
info.num_of_h_tiles = 1;
info.h_tile_instance[0] = i;
info.intf_type = INTF_DP;
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return PTR_ERR(encoder);
}
rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
drm_encoder_cleanup(encoder);
return rc;
}
}
return 0;
}
static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
int rc;
if (!priv->hdmi)
return 0;
memset(&info, 0, sizeof(info));
info.num_of_h_tiles = 1;
info.h_tile_instance[0] = 0;
info.intf_type = INTF_HDMI;
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for HDMI display\n");
return PTR_ERR(encoder);
}
rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
drm_encoder_cleanup(encoder);
return rc;
}
return 0;
}
static int _dpu_kms_initialize_writeback(struct drm_device *dev,
struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
const u32 *wb_formats, int n_formats)
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
int rc;
memset(&info, 0, sizeof(info));
info.num_of_h_tiles = 1;
/* use only WB idx 2 instance for DPU */
info.h_tile_instance[0] = WB_2;
info.intf_type = INTF_WB;
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return PTR_ERR(encoder);
}
rc = dpu_writeback_init(dev, encoder, wb_formats,
n_formats);
if (rc) {
DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
drm_encoder_cleanup(encoder);
return rc;
}
return 0;
}
/**
* _dpu_kms_setup_displays - create encoders, bridges and connectors
* for underlying displays
* @dev: Pointer to drm device structure
* @priv: Pointer to private drm device data
* @dpu_kms: Pointer to dpu kms structure
* Returns: Zero on success
*/
static int _dpu_kms_setup_displays(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
int rc = 0;
int i;
rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
if (rc) {
DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
return rc;
}
rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
if (rc) {
DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
return rc;
}
rc = _dpu_kms_initialize_hdmi(dev, priv, dpu_kms);
if (rc) {
DPU_ERROR("initialize HDMI failed, rc = %d\n", rc);
return rc;
}
/* Since WB isn't a driver check the catalog before initializing */
if (dpu_kms->catalog->wb_count) {
for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
if (dpu_kms->catalog->wb[i].id == WB_2) {
rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
dpu_kms->catalog->wb[i].format_list,
dpu_kms->catalog->wb[i].num_formats);
if (rc) {
DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
return rc;
}
}
}
}
return rc;
}
#define MAX_PLANES 20
static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
{
struct drm_device *dev;
struct drm_plane *primary_planes[MAX_PLANES], *plane;
struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
struct drm_crtc *crtc;
struct drm_encoder *encoder;
unsigned int num_encoders;
struct msm_drm_private *priv;
const struct dpu_mdss_cfg *catalog;
int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
dev = dpu_kms->dev;
priv = dev->dev_private;
catalog = dpu_kms->catalog;
/*
* Create encoder and query display drivers to create
* bridges and connectors
*/
ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
if (ret)
return ret;
num_encoders = 0;
drm_for_each_encoder(encoder, dev)
num_encoders++;
max_crtc_count = min(catalog->mixer_count, num_encoders);
/* Create the planes, keeping track of one primary/cursor per crtc */
for (i = 0; i < catalog->sspp_count; i++) {
enum drm_plane_type type;
if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
&& cursor_planes_idx < max_crtc_count)
type = DRM_PLANE_TYPE_CURSOR;
else if (primary_planes_idx < max_crtc_count)
type = DRM_PLANE_TYPE_PRIMARY;
else
type = DRM_PLANE_TYPE_OVERLAY;
DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
type, catalog->sspp[i].features,
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
(1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
return ret;
}
if (type == DRM_PLANE_TYPE_CURSOR)
cursor_planes[cursor_planes_idx++] = plane;
else if (type == DRM_PLANE_TYPE_PRIMARY)
primary_planes[primary_planes_idx++] = plane;
}
max_crtc_count = min(max_crtc_count, primary_planes_idx);
/* Create one CRTC per encoder */
for (i = 0; i < max_crtc_count; i++) {
crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
return ret;
}
priv->num_crtcs++;
}
/* All CRTCs are compatible with all encoders */
drm_for_each_encoder(encoder, dev)
encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
return 0;
}
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
{
int i;
if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
/* safe to call these more than once during shutdown */
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
if (dpu_kms->hw_vbif[i]) {
dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
dpu_kms->hw_vbif[i] = NULL;
}
}
}
if (dpu_kms->rm_init)
dpu_rm_destroy(&dpu_kms->rm);
dpu_kms->rm_init = false;
dpu_kms->catalog = NULL;
if (dpu_kms->vbif[VBIF_NRT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
dpu_kms->vbif[VBIF_NRT] = NULL;
if (dpu_kms->vbif[VBIF_RT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
dpu_kms->vbif[VBIF_RT] = NULL;
if (dpu_kms->hw_mdp)
dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
dpu_kms->hw_mdp = NULL;
if (dpu_kms->mmio)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
dpu_kms->mmio = NULL;
}
static void dpu_kms_destroy(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
if (!kms) {
DPU_ERROR("invalid kms\n");
return;
}
dpu_kms = to_dpu_kms(kms);
_dpu_kms_hw_destroy(dpu_kms);
msm_kms_destroy(&dpu_kms->base);
if (dpu_kms->rpm_enabled)
pm_runtime_disable(&dpu_kms->pdev->dev);
}
static int dpu_irq_postinstall(struct msm_kms *kms)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
if (!dpu_kms || !dpu_kms->dev)
return -EINVAL;
priv = dpu_kms->dev->dev_private;
if (!priv)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
msm_dp_irq_postinstall(priv->dp[i]);
return 0;
}
static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
{
int i;
struct dpu_kms *dpu_kms;
const struct dpu_mdss_cfg *cat;
void __iomem *base;
dpu_kms = to_dpu_kms(kms);
cat = dpu_kms->catalog;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
/* dump CTL sub-blocks HW regs info */
for (i = 0; i < cat->ctl_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
dpu_kms->mmio + cat->ctl[i].base, cat->ctl[i].name);
/* dump DSPP sub-blocks HW regs info */
for (i = 0; i < cat->dspp_count; i++) {
base = dpu_kms->mmio + cat->dspp[i].base;
msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base, cat->dspp[i].name);
if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
base + cat->dspp[i].sblk->pcc.base, "%s_%s",
cat->dspp[i].name,
cat->dspp[i].sblk->pcc.name);
}
/* dump INTF sub-blocks HW regs info */
for (i = 0; i < cat->intf_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
dpu_kms->mmio + cat->intf[i].base, cat->intf[i].name);
/* dump PP sub-blocks HW regs info */
for (i = 0; i < cat->pingpong_count; i++) {
base = dpu_kms->mmio + cat->pingpong[i].base;
msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
cat->pingpong[i].name);
/* TE2 sub-block has length of 0, so will not print it */
if (cat->pingpong[i].sblk && cat->pingpong[i].sblk->dither.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].sblk->dither.len,
base + cat->pingpong[i].sblk->dither.base,
"%s_%s", cat->pingpong[i].name,
cat->pingpong[i].sblk->dither.name);
}
/* dump SSPP sub-blocks HW regs info */
for (i = 0; i < cat->sspp_count; i++) {
base = dpu_kms->mmio + cat->sspp[i].base;
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base, cat->sspp[i].name);
if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
base + cat->sspp[i].sblk->scaler_blk.base,
"%s_%s", cat->sspp[i].name,
cat->sspp[i].sblk->scaler_blk.name);
if (cat->sspp[i].sblk && cat->sspp[i].sblk->csc_blk.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->csc_blk.len,
base + cat->sspp[i].sblk->csc_blk.base,
"%s_%s", cat->sspp[i].name,
cat->sspp[i].sblk->csc_blk.name);
}
/* dump LM sub-blocks HW regs info */
for (i = 0; i < cat->mixer_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
dpu_kms->mmio + cat->mixer[i].base, cat->mixer[i].name);
/* dump WB sub-blocks HW regs info */
for (i = 0; i < cat->wb_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
dpu_kms->mmio + cat->wb[i].base, cat->wb[i].name);
if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
dpu_kms->mmio + cat->mdp[0].base, "top");
msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
dpu_kms->mmio + cat->mdp[0].base + MDP_PERIPH_TOP0_END, "top_2");
} else {
msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
dpu_kms->mmio + cat->mdp[0].base, "top");
}
/* dump DSC sub-blocks HW regs info */
for (i = 0; i < cat->dsc_count; i++) {
base = dpu_kms->mmio + cat->dsc[i].base;
msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base, cat->dsc[i].name);
if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl;
msm_disp_snapshot_add_block(disp_state, enc.len, base + enc.base, "%s_%s",
cat->dsc[i].name, enc.name);
msm_disp_snapshot_add_block(disp_state, ctl.len, base + ctl.base, "%s_%s",
cat->dsc[i].name, ctl.name);
}
}
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_core_irq_preinstall,
.irq_postinstall = dpu_irq_postinstall,
.irq_uninstall = dpu_core_irq_uninstall,
.irq = dpu_core_irq,
.enable_commit = dpu_kms_enable_commit,
.disable_commit = dpu_kms_disable_commit,
.flush_commit = dpu_kms_flush_commit,
.wait_flush = dpu_kms_wait_flush,
.complete_commit = dpu_kms_complete_commit,
.enable_vblank = dpu_kms_enable_vblank,
.disable_vblank = dpu_kms_disable_vblank,
.check_modified_format = dpu_format_check_modified_format,
.get_format = dpu_get_msm_format,
.destroy = dpu_kms_destroy,
.snapshot = dpu_kms_mdp_snapshot,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = dpu_kms_debugfs_init,
#endif
};
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
if (!dpu_kms->base.aspace)
return;
mmu = dpu_kms->base.aspace->mmu;
mmu->funcs->detach(mmu);
msm_gem_address_space_put(dpu_kms->base.aspace);
dpu_kms->base.aspace = NULL;
}
static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
struct msm_gem_address_space *aspace;
aspace = msm_kms_init_aspace(dpu_kms->dev);
if (IS_ERR(aspace))
return PTR_ERR(aspace);
dpu_kms->base.aspace = aspace;
return 0;
}
unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
{
struct clk *clk;
clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
if (!clk)
return 0;
return clk_get_rate(clk);
}
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
struct drm_device *dev;
int i, rc = -EINVAL;
unsigned long max_core_clk_rate;
u32 core_rev;
if (!kms) {
DPU_ERROR("invalid kms\n");
return rc;
}
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
rc = dpu_kms_global_obj_init(dpu_kms);
if (rc)
return rc;
atomic_set(&dpu_kms->bandwidth_ref, 0);
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
if (IS_ERR(dpu_kms->mmio)) {
rc = PTR_ERR(dpu_kms->mmio);
DPU_ERROR("mdp register memory map failed: %d\n", rc);
dpu_kms->mmio = NULL;
goto error;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
DPU_ERROR("vbif register memory map failed: %d\n", rc);
dpu_kms->vbif[VBIF_RT] = NULL;
goto error;
}
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
dpu_kms->vbif[VBIF_NRT] = NULL;
DPU_DEBUG("VBIF NRT is not defined");
}
dpu_kms_parse_data_bus_icc_path(dpu_kms);
rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
if (rc < 0)
goto error;
core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
pr_info("dpu hardware revision:0x%x\n", core_rev);
dpu_kms->catalog = of_device_get_match_data(dev->dev);
if (!dpu_kms->catalog) {
DPU_ERROR("device config not known!\n");
rc = -EINVAL;
goto power_error;
}
/*
* Now we need to read the HW catalog and initialize resources such as
* clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
*/
rc = _dpu_kms_mmu_init(dpu_kms);
if (rc) {
DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
goto power_error;
}
dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
if (IS_ERR(dpu_kms->mdss)) {
rc = PTR_ERR(dpu_kms->mdss);
DPU_ERROR("failed to get MDSS data: %d\n", rc);
goto power_error;
}
if (!dpu_kms->mdss) {
rc = -EINVAL;
DPU_ERROR("NULL MDSS data\n");
goto power_error;
}
rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
goto power_error;
}
dpu_kms->rm_init = true;
dpu_kms->hw_mdp = dpu_hw_mdptop_init(dpu_kms->catalog->mdp,
dpu_kms->mmio,
dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
dpu_kms->hw_mdp = NULL;
goto power_error;
}
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_hw_vbif *hw;
const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc);
goto power_error;
}
dpu_kms->hw_vbif[vbif->id] = hw;
}
/* TODO: use the same max_freq as in dpu_kms_hw_init */
max_core_clk_rate = dpu_kms_get_clk_rate(dpu_kms, "core");
if (!max_core_clk_rate) {
DPU_DEBUG("max core clk rate not determined, using default\n");
max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
}
rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate);
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
goto perf_err;
}
dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
rc = PTR_ERR(dpu_kms->hw_intr);
DPU_ERROR("hw_intr init failed: %d\n", rc);
dpu_kms->hw_intr = NULL;
goto hw_intr_init_err;
}
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
/*
* max crtc width is equal to the max mixer width * 2 and max height is
* is 4K
*/
dev->mode_config.max_width =
dpu_kms->catalog->caps->max_mixer_width * 2;
dev->mode_config.max_height = 4096;
dev->max_vblank_count = 0xffffffff;
/* Disable vblank irqs aggressively for power-saving */
dev->vblank_disable_immediate = true;
/*
* _dpu_kms_drm_obj_init should create the DRM related objects
* i.e. CRTCs, planes, encoders, connectors and so forth
*/
rc = _dpu_kms_drm_obj_init(dpu_kms);
if (rc) {
DPU_ERROR("modeset init failed: %d\n", rc);
goto drm_obj_init_err;
}
dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
drm_obj_init_err:
hw_intr_init_err:
perf_err:
power_error:
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
return rc;
}
static int dpu_kms_init(struct drm_device *ddev)
{
struct msm_drm_private *priv = ddev->dev_private;
struct device *dev = ddev->dev;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms;
int irq;
struct dev_pm_opp *opp;
int ret = 0;
unsigned long max_freq = ULONG_MAX;
dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
return -ENOMEM;
ret = devm_pm_opp_set_clkname(dev, "core");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV) {
dev_err(dev, "invalid OPP table in device tree\n");
return ret;
}
ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
if (ret < 0) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
return ret;
}
dpu_kms->num_clocks = ret;
opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
if (!IS_ERR(opp))
dev_pm_opp_put(opp);
dev_pm_opp_set_rate(dev, max_freq);
ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
if (ret) {
DPU_ERROR("failed to init kms, ret=%d\n", ret);
return ret;
}
dpu_kms->dev = ddev;
dpu_kms->pdev = pdev;
pm_runtime_enable(&pdev->dev);
dpu_kms->rpm_enabled = true;
priv->kms = &dpu_kms->base;
irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
if (!irq) {
DPU_ERROR("failed to get irq\n");
return -EINVAL;
}
dpu_kms->base.irq = irq;
return 0;
}
static int dpu_dev_probe(struct platform_device *pdev)
{
return msm_drv_probe(&pdev->dev, dpu_kms_init);
}
static int dpu_dev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
return 0;
}
static int __maybe_unused dpu_runtime_suspend(struct device *dev)
{
int i;
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
/* Drop the performance state vote */
dev_pm_opp_set_rate(dev, 0);
clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
for (i = 0; i < dpu_kms->num_paths; i++)
icc_set_bw(dpu_kms->path[i], 0, 0);
return 0;
}
static int __maybe_unused dpu_runtime_resume(struct device *dev)
{
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *encoder;
struct drm_device *ddev;
ddev = dpu_kms->dev;
rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
return rc;
}
dpu_vbif_init_memtypes(dpu_kms);
drm_for_each_encoder(encoder, ddev)
dpu_encoder_virt_runtime_resume(encoder);
return rc;
}
static const struct dev_pm_ops dpu_pm_ops = {
SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
};
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
{ .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
{ .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
{ .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, },
{ .compatible = "qcom,sc8180x-dpu", .data = &dpu_sc8180x_cfg, },
{ .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
{ .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
{ .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
{ .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, },
{ .compatible = "qcom,sm8250-dpu", .data = &dpu_sm8250_cfg, },
{ .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
static struct platform_driver dpu_driver = {
.probe = dpu_dev_probe,
.remove = dpu_dev_remove,
.shutdown = msm_drv_shutdown,
.driver = {
.name = "msm_dpu",
.of_match_table = dpu_dt_match,
.pm = &dpu_pm_ops,
},
};
void __init msm_dpu_register(void)
{
platform_driver_register(&dpu_driver);
}
void __exit msm_dpu_unregister(void)
{
platform_driver_unregister(&dpu_driver);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
#define VBIF_VERSION 0x0000
#define VBIF_CLK_FORCE_CTRL0 0x0008
#define VBIF_CLK_FORCE_CTRL1 0x000C
#define VBIF_QOS_REMAP_00 0x0020
#define VBIF_QOS_REMAP_01 0x0024
#define VBIF_QOS_REMAP_10 0x0028
#define VBIF_QOS_REMAP_11 0x002C
#define VBIF_WRITE_GATHER_EN 0x00AC
#define VBIF_IN_RD_LIM_CONF0 0x00B0
#define VBIF_IN_RD_LIM_CONF1 0x00B4
#define VBIF_IN_RD_LIM_CONF2 0x00B8
#define VBIF_IN_WR_LIM_CONF0 0x00C0
#define VBIF_IN_WR_LIM_CONF1 0x00C4
#define VBIF_IN_WR_LIM_CONF2 0x00C8
#define VBIF_OUT_RD_LIM_CONF0 0x00D0
#define VBIF_OUT_WR_LIM_CONF0 0x00D4
#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
#define VBIF_XIN_PND_ERR 0x0190
#define VBIF_XIN_SRC_ERR 0x0194
#define VBIF_XIN_CLR_ERR 0x019C
#define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
#define VBIF_XINL_QOS_LVL_REMAP_000(vbif) (VBIF_XINL_QOS_RP_REMAP_000 + (vbif)->cap->qos_rp_remap_size)
static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
u32 *pnd_errors, u32 *src_errors)
{
struct dpu_hw_blk_reg_map *c;
u32 pnd, src;
if (!vbif)
return;
c = &vbif->hw;
pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
if (pnd_errors)
*pnd_errors = pnd;
if (src_errors)
*src_errors = src;
DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
}
static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 value)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_off;
u32 bit_off;
u32 reg_val;
/*
* Assume 4 bits per bit field, 8 fields per 32-bit register so
* 16 bit fields maximum across two registers
*/
if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
return;
c = &vbif->hw;
if (xin_id >= 8) {
xin_id -= 8;
reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
} else {
reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
}
bit_off = (xin_id & 0x7) * 4;
reg_val = DPU_REG_READ(c, reg_off);
reg_val &= ~(0x7 << bit_off);
reg_val |= (value & 0x7) << bit_off;
DPU_REG_WRITE(c, reg_off, reg_val);
}
static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
u32 xin_id, bool rd, u32 limit)
{
struct dpu_hw_blk_reg_map *c = &vbif->hw;
u32 reg_val;
u32 reg_off;
u32 bit_off;
if (rd)
reg_off = VBIF_IN_RD_LIM_CONF0;
else
reg_off = VBIF_IN_WR_LIM_CONF0;
reg_off += (xin_id / 4) * 4;
bit_off = (xin_id % 4) * 8;
reg_val = DPU_REG_READ(c, reg_off);
reg_val &= ~(0xFF << bit_off);
reg_val |= (limit) << bit_off;
DPU_REG_WRITE(c, reg_off, reg_val);
}
static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
u32 xin_id, bool rd)
{
struct dpu_hw_blk_reg_map *c = &vbif->hw;
u32 reg_val;
u32 reg_off;
u32 bit_off;
u32 limit;
if (rd)
reg_off = VBIF_IN_RD_LIM_CONF0;
else
reg_off = VBIF_IN_WR_LIM_CONF0;
reg_off += (xin_id / 4) * 4;
bit_off = (xin_id % 4) * 8;
reg_val = DPU_REG_READ(c, reg_off);
limit = (reg_val >> bit_off) & 0xFF;
return limit;
}
static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
u32 xin_id, bool enable)
{
struct dpu_hw_blk_reg_map *c = &vbif->hw;
u32 reg_val;
reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
if (enable)
reg_val |= BIT(xin_id);
else
reg_val &= ~BIT(xin_id);
DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
}
static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
u32 xin_id)
{
struct dpu_hw_blk_reg_map *c = &vbif->hw;
u32 reg_val;
reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
return (reg_val & BIT(xin_id)) ? true : false;
}
static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
if (!vbif)
return;
c = &vbif->hw;
reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(vbif);
reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
reg_shift = (xin_id & 0x7) * 4;
reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
mask = 0x7 << reg_shift;
reg_val &= ~mask;
reg_val |= (remap_level << reg_shift) & mask;
reg_val_lvl &= ~mask;
reg_val_lvl |= (remap_level << reg_shift) & mask;
DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
}
static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_val;
if (!vbif || xin_id >= MAX_XIN_COUNT)
return;
c = &vbif->hw;
reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
reg_val |= BIT(xin_id);
DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
}
static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
unsigned long cap)
{
ops->set_limit_conf = dpu_hw_set_limit_conf;
ops->get_limit_conf = dpu_hw_get_limit_conf;
ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
ops->set_qos_remap = dpu_hw_set_qos_remap;
ops->set_mem_type = dpu_hw_set_mem_type;
ops->clear_errors = dpu_hw_clear_errors;
ops->set_write_gather_en = dpu_hw_set_write_gather_en;
}
struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_vbif *c;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_VBIF;
/*
* Assign ops
*/
c->idx = cfg->id;
c->cap = cfg;
_setup_vbif_ops(&c->ops, c->cap->features);
/* no need to register sub-range in dpu dbg, dump entire vbif io base */
return c;
}
void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
{
kfree(vbif);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/pm_opp.h>
#include <linux/sort.h>
#include <linux/clk.h>
#include <linux/bitmap.h>
#include "dpu_kms.h"
#include "dpu_trace.h"
#include "dpu_crtc.h"
#include "dpu_core_perf.h"
/**
* enum dpu_perf_mode - performance tuning mode
* @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
* @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
* @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
* @DPU_PERF_MODE_MAX: maximum value, used for error checking
*/
enum dpu_perf_mode {
DPU_PERF_MODE_NORMAL,
DPU_PERF_MODE_MINIMUM,
DPU_PERF_MODE_FIXED,
DPU_PERF_MODE_MAX
};
/**
* _dpu_core_perf_calc_bw() - to calculate BW per crtc
* @perf_cfg: performance configuration
* @crtc: pointer to a crtc
* Return: returns aggregated BW for all planes in crtc.
*/
static u64 _dpu_core_perf_calc_bw(const struct dpu_perf_cfg *perf_cfg,
struct drm_crtc *crtc)
{
struct drm_plane *plane;
struct dpu_plane_state *pstate;
u64 crtc_plane_bw = 0;
u32 bw_factor;
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
if (!pstate)
continue;
crtc_plane_bw += pstate->plane_fetch_bw;
}
bw_factor = perf_cfg->bw_inefficiency_factor;
if (bw_factor) {
crtc_plane_bw *= bw_factor;
do_div(crtc_plane_bw, 100);
}
return crtc_plane_bw;
}
/**
* _dpu_core_perf_calc_clk() - to calculate clock per crtc
* @perf_cfg: performance configuration
* @crtc: pointer to a crtc
* @state: pointer to a crtc state
* Return: returns max clk for all planes in crtc.
*/
static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct drm_plane *plane;
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
u64 crtc_clk;
u32 clk_factor;
mode = &state->adjusted_mode;
crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
if (!pstate)
continue;
crtc_clk = max(pstate->plane_clk, crtc_clk);
}
clk_factor = perf_cfg->clk_inefficiency_factor;
if (clk_factor) {
crtc_clk *= clk_factor;
do_div(crtc_clk, 100);
}
return crtc_clk;
}
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
priv = crtc->dev->dev_private;
return to_dpu_kms(priv->kms);
}
static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct dpu_core_perf_params *perf)
{
const struct dpu_perf_cfg *perf_cfg = core_perf->perf_cfg;
if (!perf_cfg || !crtc || !state || !perf) {
DPU_ERROR("invalid parameters\n");
return;
}
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
perf->bw_ctl = 0;
perf->max_per_pipe_ib = 0;
perf->core_clk_rate = 0;
} else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) {
perf->bw_ctl = core_perf->fix_core_ab_vote;
perf->max_per_pipe_ib = core_perf->fix_core_ib_vote;
perf->core_clk_rate = core_perf->fix_core_clk_rate;
} else {
perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
}
DRM_DEBUG_ATOMIC(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
perf->max_per_pipe_ib, perf->bw_ctl);
}
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
u32 bw, threshold;
u64 bw_sum_of_intfs = 0;
enum dpu_crtc_client_type curr_client_type;
struct dpu_crtc_state *dpu_cstate;
struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
if (!crtc || !state) {
DPU_ERROR("invalid crtc\n");
return -EINVAL;
}
kms = _dpu_crtc_get_kms(crtc);
/* we only need bandwidth check on real-time clients (interfaces) */
if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
return 0;
dpu_cstate = to_dpu_crtc_state(state);
/* obtain new values */
_dpu_core_perf_calc_crtc(&kms->perf, crtc, state, &dpu_cstate->new_perf);
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
curr_client_type = dpu_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (tmp_crtc->enabled &&
dpu_crtc_get_client_type(tmp_crtc) == curr_client_type &&
tmp_crtc != crtc) {
struct dpu_crtc_state *tmp_cstate =
to_dpu_crtc_state(tmp_crtc->state);
DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
tmp_cstate->bw_control);
bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
/* convert bandwidth to kb */
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
threshold = kms->perf.perf_cfg->max_bw_high;
DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
if (!threshold) {
DPU_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
threshold);
return -E2BIG;
}
}
return 0;
}
static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
struct drm_crtc *crtc)
{
struct dpu_core_perf_params perf = { 0 };
enum dpu_crtc_client_type curr_client_type
= dpu_crtc_get_client_type(crtc);
struct drm_crtc *tmp_crtc;
struct dpu_crtc_state *dpu_cstate;
int i, ret = 0;
u64 avg_bw;
if (!kms->num_paths)
return 0;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (tmp_crtc->enabled &&
curr_client_type ==
dpu_crtc_get_client_type(tmp_crtc)) {
dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
dpu_cstate->new_perf.max_per_pipe_ib);
perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
tmp_crtc->base.id,
dpu_cstate->new_perf.bw_ctl, kms->num_paths);
}
}
avg_bw = perf.bw_ctl;
do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
for (i = 0; i < kms->num_paths; i++)
icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
return ret;
}
/**
* dpu_core_perf_crtc_release_bw() - request zero bandwidth
* @crtc: pointer to a crtc
*
* Function checks a state variable for the crtc, if all pending commit
* requests are done, meaning no more bandwidth is needed, release
* bandwidth request.
*/
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc;
struct dpu_kms *kms;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
kms = _dpu_crtc_get_kms(crtc);
dpu_crtc = to_dpu_crtc(crtc);
if (atomic_dec_return(&kms->bandwidth_ref) > 0)
return;
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DRM_DEBUG_ATOMIC("Release BW crtc=%d\n", crtc->base.id);
dpu_crtc->cur_perf.bw_ctl = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc);
}
}
static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
{
u64 clk_rate;
struct drm_crtc *crtc;
struct dpu_crtc_state *dpu_cstate;
if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
return kms->perf.fix_core_clk_rate;
if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM)
return kms->perf.max_core_clk_rate;
clk_rate = 0;
drm_for_each_crtc(crtc, kms->dev) {
if (crtc->enabled) {
dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate);
}
}
return clk_rate;
}
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed)
{
struct dpu_core_perf_params *new, *old;
bool update_bus = false, update_clk = false;
u64 clk_rate = 0;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
struct dpu_kms *kms;
int ret;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return -EINVAL;
}
kms = _dpu_crtc_get_kms(crtc);
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
DRM_DEBUG_ATOMIC("crtc:%d enabled:%d core_clk:%llu\n",
crtc->base.id, crtc->enabled, kms->perf.core_clk_rate);
old = &dpu_crtc->cur_perf;
new = &dpu_cstate->new_perf;
if (crtc->enabled) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
* than current vote for update request.
* 2. new bandwidth vote - "ab or ib vote" is lower
* than current vote at end of commit or stop.
*/
if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
(new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
(!params_changed && ((new->bw_ctl < old->bw_ctl) ||
(new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
DRM_DEBUG_ATOMIC("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
crtc->base.id, params_changed,
new->bw_ctl, old->bw_ctl);
old->bw_ctl = new->bw_ctl;
old->max_per_pipe_ib = new->max_per_pipe_ib;
update_bus = true;
}
if ((params_changed && new->core_clk_rate > old->core_clk_rate) ||
(!params_changed && new->core_clk_rate < old->core_clk_rate)) {
old->core_clk_rate = new->core_clk_rate;
update_clk = true;
}
} else {
DRM_DEBUG_ATOMIC("crtc=%d disable\n", crtc->base.id);
memset(old, 0, sizeof(*old));
update_bus = true;
update_clk = true;
}
trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl,
new->core_clk_rate, !crtc->enabled, update_bus, update_clk);
if (update_bus) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc);
if (ret) {
DPU_ERROR("crtc-%d: failed to update bus bw vote\n",
crtc->base.id);
return ret;
}
}
/*
* Update the clock after bandwidth vote to ensure
* bandwidth is available before clock rate is increased.
*/
if (update_clk) {
clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate);
trace_dpu_core_perf_update_clk(kms->dev, !crtc->enabled, clk_rate);
clk_rate = min(clk_rate, kms->perf.max_core_clk_rate);
ret = dev_pm_opp_set_rate(&kms->pdev->dev, clk_rate);
if (ret) {
DPU_ERROR("failed to set core clock rate %llu\n", clk_rate);
return ret;
}
kms->perf.core_clk_rate = clk_rate;
DRM_DEBUG_ATOMIC("update clk rate = %lld HZ\n", clk_rate);
}
return 0;
}
#ifdef CONFIG_DEBUG_FS
static ssize_t _dpu_core_perf_mode_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
u32 perf_mode = 0;
int ret;
ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
if (ret)
return ret;
if (perf_mode >= DPU_PERF_MODE_MAX)
return -EINVAL;
if (perf_mode == DPU_PERF_MODE_FIXED) {
DRM_INFO("fix performance mode\n");
} else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
/* run the driver with max clk and BW vote */
DRM_INFO("minimum performance mode\n");
} else if (perf_mode == DPU_PERF_MODE_NORMAL) {
/* reset the perf tune params to 0 */
DRM_INFO("normal performance mode\n");
}
perf->perf_tune.mode = perf_mode;
return count;
}
static ssize_t _dpu_core_perf_mode_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
int len;
char buf[128];
len = scnprintf(buf, sizeof(buf),
"mode %d\n",
perf->perf_tune.mode);
return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static const struct file_operations dpu_core_perf_mode_fops = {
.open = simple_open,
.read = _dpu_core_perf_mode_read,
.write = _dpu_core_perf_mode_write,
};
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
struct dpu_core_perf *perf = &dpu_kms->perf;
struct dentry *entry;
entry = debugfs_create_dir("core_perf", parent);
debugfs_create_u64("max_core_clk_rate", 0600, entry,
&perf->max_core_clk_rate);
debugfs_create_u64("core_clk_rate", 0600, entry,
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
debugfs_create_u32("threshold_low", 0600, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
debugfs_create_u32("threshold_high", 0600, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
debugfs_create_u32("min_core_ib", 0600, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
debugfs_create_u32("min_llcc_ib", 0600, entry,
(u32 *)&perf->perf_cfg->min_llcc_ib);
debugfs_create_u32("min_dram_ib", 0600, entry,
(u32 *)&perf->perf_cfg->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
debugfs_create_u64("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
debugfs_create_u64("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
}
#endif
int dpu_core_perf_init(struct dpu_core_perf *perf,
const struct dpu_perf_cfg *perf_cfg,
unsigned long max_core_clk_rate)
{
perf->perf_cfg = perf_cfg;
perf->max_core_clk_rate = max_core_clk_rate;
return 0;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/iopoll.h>
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_merge3d.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
#define MERGE_3D_MUX 0x000
#define MERGE_3D_MODE 0x004
static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d,
enum dpu_3d_blend_mode mode_3d)
{
struct dpu_hw_blk_reg_map *c;
u32 data;
c = &merge_3d->hw;
if (mode_3d == BLEND_3D_NONE) {
DPU_REG_WRITE(c, MERGE_3D_MODE, 0);
DPU_REG_WRITE(c, MERGE_3D_MUX, 0);
} else {
data = BIT(0) | ((mode_3d - 1) << 1);
DPU_REG_WRITE(c, MERGE_3D_MODE, data);
}
}
static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
unsigned long features)
{
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_merge_3d *c;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_PINGPONG;
c->idx = cfg->id;
c->caps = cfg;
_setup_merge_3d_ops(c, c->caps->features);
return c;
}
void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw)
{
kfree(hw);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <[email protected]>
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
#include <drm/drm_probe_helper.h>
#include "msm_drv.h"
#include "dpu_kms.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_dspp.h"
#include "dpu_hw_dsc.h"
#include "dpu_hw_merge3d.h"
#include "dpu_formats.h"
#include "dpu_encoder_phys.h"
#include "dpu_crtc.h"
#include "dpu_trace.h"
#include "dpu_core_irq.h"
#include "disp/msm_disp_snapshot.h"
#define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
/*
* Two to anticipate panels that can do cmd/vid dynamic switching
* plan is to create all possible physical encoder types, and switch between
* them at runtime
*/
#define NUM_PHYS_ENCODER_TYPES 2
#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
#define MAX_CHANNELS_PER_ENC 2
#define IDLE_SHORT_TIMEOUT 1
#define MAX_HDISPLAY_SPLIT 1080
/* timeout in frames waiting for frame done */
#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
/**
* enum dpu_enc_rc_events - events for resource control state machine
* @DPU_ENC_RC_EVENT_KICKOFF:
* This event happens at NORMAL priority.
* Event that signals the start of the transfer. When this event is
* received, enable MDP/DSI core clocks. Regardless of the previous
* state, the resource should be in ON state at the end of this event.
* @DPU_ENC_RC_EVENT_FRAME_DONE:
* This event happens at INTERRUPT level.
* Event signals the end of the data transfer after the PP FRAME_DONE
* event. At the end of this event, a delayed work is scheduled to go to
* IDLE_PC state after IDLE_TIMEOUT time.
* @DPU_ENC_RC_EVENT_PRE_STOP:
* This event happens at NORMAL priority.
* This event, when received during the ON state, leave the RC STATE
* in the PRE_OFF state. It should be followed by the STOP event as
* part of encoder disable.
* If received during IDLE or OFF states, it will do nothing.
* @DPU_ENC_RC_EVENT_STOP:
* This event happens at NORMAL priority.
* When this event is received, disable all the MDP/DSI core clocks, and
* disable IRQs. It should be called from the PRE_OFF or IDLE states.
* IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
* PRE_OFF is expected when PRE_STOP was executed during the ON state.
* Resource state should be in OFF at the end of the event.
* @DPU_ENC_RC_EVENT_ENTER_IDLE:
* This event happens at NORMAL priority from a work item.
* Event signals that there were no frame updates for IDLE_TIMEOUT time.
* This would disable MDP/DSI core clocks and change the resource state
* to IDLE.
*/
enum dpu_enc_rc_events {
DPU_ENC_RC_EVENT_KICKOFF = 1,
DPU_ENC_RC_EVENT_FRAME_DONE,
DPU_ENC_RC_EVENT_PRE_STOP,
DPU_ENC_RC_EVENT_STOP,
DPU_ENC_RC_EVENT_ENTER_IDLE
};
/*
* enum dpu_enc_rc_states - states that the resource control maintains
* @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
* @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
* @DPU_ENC_RC_STATE_ON: Resource is in ON state
* @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
* @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
*/
enum dpu_enc_rc_states {
DPU_ENC_RC_STATE_OFF,
DPU_ENC_RC_STATE_PRE_OFF,
DPU_ENC_RC_STATE_ON,
DPU_ENC_RC_STATE_IDLE
};
/**
* struct dpu_encoder_virt - virtual encoder. Container of one or more physical
* encoders. Virtual encoder manages one "logical" display. Physical
* encoders manage one intf block, tied to a specific panel/sub-panel.
* Virtual encoder defers as much as possible to the physical encoders.
* Virtual encoder registers itself with the DRM Framework as the encoder.
* @base: drm_encoder base class for registration with DRM
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enabled: True if the encoder is active, protected by enc_lock
* @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization
* Only valid after enable. Cleared as disable.
* @cur_slave: As above but for the slave encoder.
* @hw_pp: Handle to the pingpong blocks used for the display. No.
* pingpong blocks can be different than num_phys_encs.
* @hw_dsc: Handle to the DSC blocks used for the display.
* @dsc_mask: Bitmask of used DSC blocks.
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
* @crtc: Pointer to the currently assigned crtc. Normally you
* would use crtc->state->encoder_mask to determine the
* link between encoder/crtc. However in this case we need
* to track crtc in the disable() hook which is called
* _after_ encoder_mask is cleared.
* @connector: If a mode is set, cached pointer to the active connector
* @crtc_kickoff_cb: Callback into CRTC that will flush & start
* all CTL paths
* @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
* @debugfs_root: Debug file system root file node
* @enc_lock: Lock around physical encoder
* create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
* busy processing current command.
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
* @frame_done_timeout_ms: frame done timeout in ms
* @frame_done_timer: watchdog timer for frame done event
* @disp_info: local copy of msm_display_info struct
* @idle_pc_supported: indicate if idle power collaps is supported
* @rc_lock: resource control mutex lock to protect
* virt encoder over various state changes
* @rc_state: resource controller state
* @delayed_off_work: delayed worker to schedule disabling of
* clks and resources after IDLE_TIMEOUT time.
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
* @wide_bus_en: wide bus is enabled on this interface
* @dsc: drm_dsc_config pointer, for DSC-enabled encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
spinlock_t enc_spinlock;
bool enabled;
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
unsigned int dsc_mask;
bool intfs_swapped;
struct drm_crtc *crtc;
struct drm_connector *connector;
struct dentry *debugfs_root;
struct mutex enc_lock;
DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
atomic_t frame_done_timeout_ms;
struct timer_list frame_done_timer;
struct msm_display_info disp_info;
bool idle_pc_supported;
struct mutex rc_lock;
enum dpu_enc_rc_states rc_state;
struct delayed_work delayed_off_work;
struct msm_display_topology topology;
u32 idle_timeout;
bool wide_bus_en;
/* DSC configuration */
struct drm_dsc_config *dsc;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
static u32 dither_matrix[DITHER_MATRIX_SZ] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
};
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
{
const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
return dpu_enc->wide_bus_en;
}
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
int i, num_intf = 0;
dpu_enc = to_dpu_encoder_virt(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->hw_intf && phys->hw_intf->ops.setup_misr
&& phys->hw_intf->ops.collect_misr)
num_intf++;
}
return num_intf;
}
void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
int i;
dpu_enc = to_dpu_encoder_virt(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
continue;
phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1);
}
}
int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
{
struct dpu_encoder_virt *dpu_enc;
int i, rc = 0, entries_added = 0;
if (!drm_enc->crtc) {
DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
return -EINVAL;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
continue;
rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
if (rc)
return rc;
entries_added++;
}
return entries_added;
}
static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
{
struct dpu_hw_dither_cfg dither_cfg = { 0 };
if (!hw_pp->ops.setup_dither)
return;
switch (bpc) {
case 6:
dither_cfg.c0_bitdepth = 6;
dither_cfg.c1_bitdepth = 6;
dither_cfg.c2_bitdepth = 6;
dither_cfg.c3_bitdepth = 6;
dither_cfg.temporal_en = 0;
break;
default:
hw_pp->ops.setup_dither(hw_pp, NULL);
return;
}
memcpy(&dither_cfg.matrix, dither_matrix,
sizeof(u32) * DITHER_MATRIX_SZ);
hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
}
static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
{
switch (intf_mode) {
case INTF_MODE_VIDEO:
return "INTF_MODE_VIDEO";
case INTF_MODE_CMD:
return "INTF_MODE_CMD";
case INTF_MODE_WB_BLOCK:
return "INTF_MODE_WB_BLOCK";
case INTF_MODE_WB_LINE:
return "INTF_MODE_WB_LINE";
default:
return "INTF_MODE_UNKNOWN";
}
}
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
{
DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
DRMID(phys_enc->parent),
dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
DPU_ENCODER_FRAME_EVENT_ERROR);
}
static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
u32 irq_idx, struct dpu_encoder_wait_info *info);
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
int irq,
void (*func)(void *arg, int irq_idx),
struct dpu_encoder_wait_info *wait_info)
{
u32 irq_status;
int ret;
if (!wait_info) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
/* note: do master / slave checking outside */
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
DRMID(phys_enc->parent), func,
irq);
return -EWOULDBLOCK;
}
if (irq < 0) {
DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
DRMID(phys_enc->parent), func);
return 0;
}
DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
DRMID(phys_enc->parent), func,
irq, phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
ret = dpu_encoder_helper_wait_event_timeout(
DRMID(phys_enc->parent),
irq,
wait_info);
if (ret <= 0) {
irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
if (irq_status) {
unsigned long flags;
DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
DRMID(phys_enc->parent), func,
irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
local_irq_save(flags);
func(phys_enc, irq);
local_irq_restore(flags);
ret = 0;
} else {
ret = -ETIMEDOUT;
DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
DRMID(phys_enc->parent), func,
irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
} else {
ret = 0;
trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
func, irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
return ret;
}
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
return phys ? atomic_read(&phys->vsync_cnt) : 0;
}
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
int linecount = 0;
dpu_enc = to_dpu_encoder_virt(drm_enc);
phys = dpu_enc ? dpu_enc->cur_master : NULL;
if (phys && phys->ops.get_line_count)
linecount = phys->ops.get_line_count(phys);
return linecount;
}
static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i = 0;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.destroy) {
phys->ops.destroy(phys);
--dpu_enc->num_phys_encs;
dpu_enc->phys_encs[i] = NULL;
}
}
if (dpu_enc->num_phys_encs)
DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
dpu_enc->num_phys_encs);
dpu_enc->num_phys_encs = 0;
mutex_unlock(&dpu_enc->enc_lock);
drm_encoder_cleanup(drm_enc);
mutex_destroy(&dpu_enc->enc_lock);
}
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface)
{
struct dpu_encoder_virt *dpu_enc;
struct split_pipe_cfg cfg = { 0 };
struct dpu_hw_mdp *hw_mdptop;
struct msm_display_info *disp_info;
if (!phys_enc->hw_mdptop || !phys_enc->parent) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
hw_mdptop = phys_enc->hw_mdptop;
disp_info = &dpu_enc->disp_info;
if (disp_info->intf_type != INTF_DSI)
return;
/**
* disable split modes since encoder will be operating in as the only
* encoder, either for the entire use case in the case of, for example,
* single DSI, or for this frame in the case of left/right only partial
* update.
*/
if (phys_enc->split_role == ENC_ROLE_SOLO) {
if (hw_mdptop->ops.setup_split_pipe)
hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
return;
}
cfg.en = true;
cfg.mode = phys_enc->intf_mode;
cfg.intf = interface;
if (cfg.en && phys_enc->ops.needs_single_flush &&
phys_enc->ops.needs_single_flush(phys_enc))
cfg.split_flush_en = true;
if (phys_enc->split_role == ENC_ROLE_MASTER) {
DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
if (hw_mdptop->ops.setup_split_pipe)
hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
}
}
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
int i, intf_count = 0, num_dsc = 0;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
if (dpu_enc->phys_encs[i])
intf_count++;
/* See dpu_encoder_get_topology, we only support 2:2:1 topology */
if (dpu_enc->dsc)
num_dsc = 2;
return (num_dsc > 0) && (num_dsc > intf_count);
}
static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
{
struct msm_drm_private *priv = drm_enc->dev->dev_private;
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
int index = dpu_enc->disp_info.h_tile_instance[0];
if (dpu_enc->disp_info.intf_type == INTF_DSI)
return msm_dsi_get_dsc_config(priv->dsi[index]);
return NULL;
}
static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct drm_display_mode *mode,
struct drm_crtc_state *crtc_state,
struct drm_dsc_config *dsc)
{
struct msm_display_topology topology = {0};
int i, intf_count = 0;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
if (dpu_enc->phys_encs[i])
intf_count++;
/* Datapath topology selection
*
* Dual display
* 2 LM, 2 INTF ( Split display using 2 interfaces)
*
* Single display
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
* Add dspps to the reservation requirements if ctm is requested
*/
if (intf_count == 2)
topology.num_lm = 2;
else if (!dpu_kms->catalog->caps->has_3d_merge)
topology.num_lm = 1;
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
if (crtc_state->ctm)
topology.num_dspp = topology.num_lm;
topology.num_intf = intf_count;
if (dsc) {
/*
* In case of Display Stream Compression (DSC), we would use
* 2 DSC encoders, 2 layer mixers and 1 interface
* this is power optimal and can drive up to (including) 4k
* screens
*/
topology.num_dsc = 2;
topology.num_lm = 2;
topology.num_intf = 1;
}
return topology;
}
static int dpu_encoder_virt_atomic_check(
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct dpu_global_state *global_state;
struct drm_dsc_config *dsc;
int i = 0;
int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) {
DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
return -EINVAL;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_global_state(crtc_state->state);
if (IS_ERR(global_state))
return PTR_ERR(global_state);
trace_dpu_enc_atomic_check(DRMID(drm_enc));
/* perform atomic check on the first physical encoder (master) */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.atomic_check)
ret = phys->ops.atomic_check(phys, crtc_state,
conn_state);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
"mode unsupported, phys idx %d\n", i);
return ret;
}
}
dsc = dpu_encoder_get_dsc_config(drm_enc);
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
/*
* Release and Allocate resources on every modeset
* Dont allocate when active is false.
*/
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
dpu_rm_release(global_state, drm_enc);
if (!crtc_state->active_changed || crtc_state->enable)
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
drm_enc, crtc_state, topology);
}
trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
return ret;
}
static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
struct msm_display_info *disp_info)
{
struct dpu_vsync_source_cfg vsync_cfg = { 0 };
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
struct dpu_hw_mdp *hw_mdptop;
struct drm_encoder *drm_enc;
struct dpu_encoder_phys *phys_enc;
int i;
if (!dpu_enc || !disp_info) {
DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
dpu_enc != NULL, disp_info != NULL);
return;
} else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
DPU_ERROR("invalid num phys enc %d/%d\n",
dpu_enc->num_phys_encs,
(int) ARRAY_SIZE(dpu_enc->hw_pp));
return;
}
drm_enc = &dpu_enc->base;
/* this pointers are checked in virt_enable_helper */
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
hw_mdptop = dpu_kms->hw_mdp;
if (!hw_mdptop) {
DPU_ERROR("invalid mdptop\n");
return;
}
if (hw_mdptop->ops.setup_vsync_source &&
disp_info->is_cmd_mode) {
for (i = 0; i < dpu_enc->num_phys_encs; i++)
vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
vsync_cfg.pp_count = dpu_enc->num_phys_encs;
vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
if (disp_info->is_te_using_watchdog_timer)
vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
else
vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys_enc = dpu_enc->phys_encs[i];
if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
vsync_cfg.vsync_source);
}
}
}
static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
{
struct dpu_encoder_virt *dpu_enc;
int i;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.irq_control)
phys->ops.irq_control(phys, enable);
}
}
static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
bool enable)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
struct dpu_encoder_virt *dpu_enc;
dpu_enc = to_dpu_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
if (!dpu_enc->cur_master) {
DPU_ERROR("encoder master not set\n");
return;
}
if (enable) {
/* enable DPU core clks */
pm_runtime_get_sync(&dpu_kms->pdev->dev);
/* enable all the irq */
_dpu_encoder_irq_control(drm_enc, true);
} else {
/* disable all the irq */
_dpu_encoder_irq_control(drm_enc, false);
/* disable DPU core clks */
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
}
static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
u32 sw_event)
{
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
bool is_vid_mode = false;
if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
/*
* when idle_pc is not supported, process only KICKOFF, STOP and MODESET
* events and return early for other events (ie wb display).
*/
if (!dpu_enc->idle_pc_supported &&
(sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
sw_event != DPU_ENC_RC_EVENT_STOP &&
sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
return 0;
trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
dpu_enc->rc_state, "begin");
switch (sw_event) {
case DPU_ENC_RC_EVENT_KICKOFF:
/* cancel delayed off work, if any */
if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
mutex_lock(&dpu_enc->rc_lock);
/* return if the resource control is already in ON state */
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
DRMID(drm_enc), sw_event);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
DRMID(drm_enc), sw_event,
dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return -EINVAL;
}
if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
_dpu_encoder_irq_control(drm_enc, true);
else
_dpu_encoder_resource_control_helper(drm_enc, true);
dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
"kickoff");
mutex_unlock(&dpu_enc->rc_lock);
break;
case DPU_ENC_RC_EVENT_FRAME_DONE:
/*
* mutex lock is not used as this event happens at interrupt
* context. And locking is not required as, the other events
* like KICKOFF and STOP does a wait-for-idle before executing
* the resource_control
*/
if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
DRMID(drm_enc), sw_event,
dpu_enc->rc_state);
return -EINVAL;
}
/*
* schedule off work item only when there are no
* frames pending
*/
if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
DRM_DEBUG_KMS("id:%d skip schedule work\n",
DRMID(drm_enc));
return 0;
}
queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
msecs_to_jiffies(dpu_enc->idle_timeout));
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
"frame done");
break;
case DPU_ENC_RC_EVENT_PRE_STOP:
/* cancel delayed off work, if any */
if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
mutex_lock(&dpu_enc->rc_lock);
if (is_vid_mode &&
dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
_dpu_encoder_irq_control(drm_enc, true);
}
/* skip if is already OFF or IDLE, resources are off already */
else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
DRMID(drm_enc), sw_event,
dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
}
dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
"pre stop");
mutex_unlock(&dpu_enc->rc_lock);
break;
case DPU_ENC_RC_EVENT_STOP:
mutex_lock(&dpu_enc->rc_lock);
/* return if the resource control is already in OFF state */
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
DRMID(drm_enc), sw_event);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
} else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
DRMID(drm_enc), sw_event, dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return -EINVAL;
}
/**
* expect to arrive here only if in either idle state or pre-off
* and in IDLE state the resources are already disabled
*/
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
_dpu_encoder_resource_control_helper(drm_enc, false);
dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
"stop");
mutex_unlock(&dpu_enc->rc_lock);
break;
case DPU_ENC_RC_EVENT_ENTER_IDLE:
mutex_lock(&dpu_enc->rc_lock);
if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
DRMID(drm_enc), sw_event, dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
}
/*
* if we are in ON but a frame was just kicked off,
* ignore the IDLE event, it's probably a stale timer event
*/
if (dpu_enc->frame_busy_mask[0]) {
DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
DRMID(drm_enc), sw_event, dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
}
if (is_vid_mode)
_dpu_encoder_irq_control(drm_enc, false);
else
_dpu_encoder_resource_control_helper(drm_enc, false);
dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
"idle");
mutex_unlock(&dpu_enc->rc_lock);
break;
default:
DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
sw_event);
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
"error");
break;
}
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
"end");
return 0;
}
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job)
{
struct dpu_encoder_virt *dpu_enc;
int i;
dpu_enc = to_dpu_encoder_virt(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.prepare_wb_job)
phys->ops.prepare_wb_job(phys, job);
}
}
void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job)
{
struct dpu_encoder_virt *dpu_enc;
int i;
dpu_enc = to_dpu_encoder_virt(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.cleanup_wb_job)
phys->ops.cleanup_wb_job(phys, job);
}
}
static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
struct dpu_crtc_state *cstate;
struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
int num_lm, num_ctl, num_pp, num_dsc;
unsigned int dsc_mask = 0;
int i;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
global_state = dpu_kms_get_existing_global_state(dpu_kms);
if (IS_ERR_OR_NULL(global_state)) {
DPU_ERROR("Failed to get global state");
return;
}
trace_dpu_enc_mode_set(DRMID(drm_enc));
/* Query resource that have been reserved in atomic check step. */
num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
ARRAY_SIZE(hw_pp));
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
ARRAY_SIZE(hw_dspp));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_DSC,
hw_dsc, ARRAY_SIZE(hw_dsc));
for (i = 0; i < num_dsc; i++) {
dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
}
dpu_enc->dsc_mask = dsc_mask;
cstate = to_dpu_crtc_state(crtc_state);
for (i = 0; i < num_lm; i++) {
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
}
cstate->num_mixers = num_lm;
dpu_enc->connector = conn_state->connector;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i);
return;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
return;
}
phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
phys->cached_mode = crtc_state->adjusted_mode;
if (phys->ops.atomic_mode_set)
phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
}
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i;
if (!drm_enc || !drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!dpu_enc || !dpu_enc->cur_master) {
DPU_ERROR("invalid dpu encoder/master\n");
return;
}
if (dpu_enc->disp_info.intf_type == INTF_DP &&
dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
dpu_enc->cur_master->hw_mdptop);
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
if (dpu_enc->disp_info.intf_type == INTF_DSI &&
!WARN_ON(dpu_enc->num_phys_encs == 0)) {
unsigned bpc = dpu_enc->connector->display_info.bpc;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
if (!dpu_enc->hw_pp[i])
continue;
_dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
}
}
}
void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
mutex_lock(&dpu_enc->enc_lock);
if (!dpu_enc->enabled)
goto out;
if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
_dpu_encoder_virt_enable_helper(drm_enc);
out:
mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
struct drm_atomic_state *state)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0;
struct drm_display_mode *cur_mode = NULL;
dpu_enc = to_dpu_encoder_virt(drm_enc);
dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);
/* always enable slave encoder before master */
if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
ret);
goto out;
}
_dpu_encoder_virt_enable_helper(drm_enc);
dpu_enc->enabled = true;
out:
mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
struct drm_atomic_state *state)
{
struct dpu_encoder_virt *dpu_enc = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *old_state = NULL;
int i = 0;
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc);
if (crtc)
old_state = drm_atomic_get_old_crtc_state(state, crtc);
/*
* The encoder is already disabled if self refresh mode was set earlier,
* in the old_state for the corresponding crtc.
*/
if (old_state && old_state->self_refresh_active)
return;
mutex_lock(&dpu_enc->enc_lock);
dpu_enc->enabled = false;
trace_dpu_enc_disable(DRMID(drm_enc));
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.disable)
phys->ops.disable(phys);
}
/* after phys waits for frame-done, should be no more frames pending */
if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
del_timer_sync(&dpu_enc->frame_done_timer);
}
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
dpu_enc->connector = NULL;
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
mutex_unlock(&dpu_enc->enc_lock);
}
static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
struct dpu_rm *dpu_rm,
enum dpu_intf_type type, u32 controller_id)
{
int i = 0;
if (type == INTF_WB)
return NULL;
for (i = 0; i < catalog->intf_count; i++) {
if (catalog->intf[i].type == type
&& catalog->intf[i].controller_id == controller_id) {
return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
}
}
return NULL;
}
void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
unsigned long lock_flags;
if (!drm_enc || !phy_enc)
return;
DPU_ATRACE_BEGIN("encoder_vblank_callback");
dpu_enc = to_dpu_encoder_virt(drm_enc);
atomic_inc(&phy_enc->vsync_cnt);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
if (dpu_enc->crtc)
dpu_crtc_vblank_callback(dpu_enc->crtc);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
DPU_ATRACE_END("encoder_vblank_callback");
}
void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
if (!phy_enc)
return;
DPU_ATRACE_BEGIN("encoder_underrun_callback");
atomic_inc(&phy_enc->underrun_cnt);
/* trigger dump only on the first underrun */
if (atomic_read(&phy_enc->underrun_cnt) == 1)
msm_disp_snapshot_state(drm_enc->dev);
trace_dpu_enc_underrun_cb(DRMID(drm_enc),
atomic_read(&phy_enc->underrun_cnt));
DPU_ATRACE_END("encoder_underrun_callback");
}
void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
/* crtc should always be cleared before re-assigning */
WARN_ON(crtc && dpu_enc->crtc);
dpu_enc->crtc = crtc;
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
struct drm_crtc *crtc, bool enable)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
int i;
trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
if (dpu_enc->crtc != crtc) {
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
return;
}
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.control_vblank_irq)
phys->ops.control_vblank_irq(phys, enable);
}
}
void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
void (*frame_event_cb)(void *, u32 event),
void *frame_event_cb_data)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
bool enable;
enable = frame_event_cb ? true : false;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
dpu_enc->crtc_frame_event_cb = frame_event_cb;
dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
void dpu_encoder_frame_done_callback(
struct drm_encoder *drm_enc,
struct dpu_encoder_phys *ready_phys, u32 event)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned int i;
if (event & (DPU_ENCODER_FRAME_EVENT_DONE
| DPU_ENCODER_FRAME_EVENT_ERROR
| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
if (!dpu_enc->frame_busy_mask[0]) {
/**
* suppress frame_done without waiter,
* likely autorefresh
*/
trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
return;
}
/* One of the physical encoders has become idle */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] == ready_phys) {
trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
dpu_enc->frame_busy_mask[0]);
clear_bit(i, dpu_enc->frame_busy_mask);
}
}
if (!dpu_enc->frame_busy_mask[0]) {
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
del_timer(&dpu_enc->frame_done_timer);
dpu_encoder_resource_control(drm_enc,
DPU_ENC_RC_EVENT_FRAME_DONE);
if (dpu_enc->crtc_frame_event_cb)
dpu_enc->crtc_frame_event_cb(
dpu_enc->crtc_frame_event_cb_data,
event);
}
} else {
if (dpu_enc->crtc_frame_event_cb)
dpu_enc->crtc_frame_event_cb(
dpu_enc->crtc_frame_event_cb_data, event);
}
}
static void dpu_encoder_off_work(struct work_struct *work)
{
struct dpu_encoder_virt *dpu_enc = container_of(work,
struct dpu_encoder_virt, delayed_off_work.work);
dpu_encoder_resource_control(&dpu_enc->base,
DPU_ENC_RC_EVENT_ENTER_IDLE);
dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
DPU_ENCODER_FRAME_EVENT_IDLE);
}
/**
* _dpu_encoder_trigger_flush - trigger flush for a physical encoder
* @drm_enc: Pointer to drm encoder structure
* @phys: Pointer to physical encoder structure
* @extra_flush_bits: Additional bit mask to include in flush trigger
*/
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
{
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
u32 ret = UINT_MAX;
if (!phys->hw_pp) {
DPU_ERROR("invalid pingpong hw\n");
return;
}
ctl = phys->hw_ctl;
if (!ctl->ops.trigger_flush) {
DPU_ERROR("missing trigger cb\n");
return;
}
pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
ctl->ops.trigger_flush(ctl);
if (ctl->ops.get_pending_flush)
ret = ctl->ops.get_pending_flush(ctl);
trace_dpu_enc_trigger_flush(DRMID(drm_enc),
dpu_encoder_helper_get_intf_type(phys->intf_mode),
phys->hw_intf ? phys->hw_intf->idx : -1,
phys->hw_wb ? phys->hw_wb->idx : -1,
pending_kickoff_cnt, ctl->idx,
extra_flush_bits, ret);
}
/**
* _dpu_encoder_trigger_start - trigger start for a physical encoder
* @phys: Pointer to physical encoder structure
*/
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
return;
}
if (!phys->hw_pp) {
DPU_ERROR("invalid pingpong hw\n");
return;
}
if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
phys->ops.trigger_start(phys);
}
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
ctl = phys_enc->hw_ctl;
if (ctl->ops.trigger_start) {
ctl->ops.trigger_start(ctl);
trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
}
}
static int dpu_encoder_helper_wait_event_timeout(
int32_t drm_id,
u32 irq_idx,
struct dpu_encoder_wait_info *info)
{
int rc = 0;
s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
s64 jiffies = msecs_to_jiffies(info->timeout_ms);
s64 time;
do {
rc = wait_event_timeout(*(info->wq),
atomic_read(info->atomic_cnt) == 0, jiffies);
time = ktime_to_ms(ktime_get());
trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
expected_time,
atomic_read(info->atomic_cnt));
/* If we timed out, counter is valid and time is less, wait again */
} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
(time < expected_time));
return rc;
}
static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_hw_ctl *ctl;
int rc;
struct drm_encoder *drm_enc;
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
ctl = phys_enc->hw_ctl;
drm_enc = phys_enc->parent;
if (!ctl->ops.reset)
return;
DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
ctl->idx);
rc = ctl->ops.reset(ctl);
if (rc) {
DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
msm_disp_snapshot_state(drm_enc->dev);
}
phys_enc->enable_state = DPU_ENC_ENABLED;
}
/**
* _dpu_encoder_kickoff_phys - handle physical encoder kickoff
* Iterate through the physical encoders and perform consolidated flush
* and/or control start triggering as needed. This is done in the virtual
* encoder rather than the individual physical ones in order to handle
* use cases that require visibility into multiple physical encoders at
* a time.
* @dpu_enc: Pointer to virtual encoder structure
*/
static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
{
struct dpu_hw_ctl *ctl;
uint32_t i, pending_flush;
unsigned long lock_flags;
pending_flush = 0x0;
/* update pending counts and trigger kickoff ctl flush atomically */
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
/* don't perform flush/start operations for slave encoders */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->enable_state == DPU_ENC_DISABLED)
continue;
ctl = phys->hw_ctl;
/*
* This is cleared in frame_done worker, which isn't invoked
* for async commits. So don't set this for async, since it'll
* roll over to the next commit.
*/
if (phys->split_role != ENC_ROLE_SLAVE)
set_bit(i, dpu_enc->frame_busy_mask);
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
else if (ctl->ops.get_pending_flush)
pending_flush |= ctl->ops.get_pending_flush(ctl);
}
/* for split flush, combine pending flush masks and send to master */
if (pending_flush && dpu_enc->cur_master) {
_dpu_encoder_trigger_flush(
&dpu_enc->base,
dpu_enc->cur_master,
pending_flush);
}
_dpu_encoder_trigger_start(dpu_enc->cur_master);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
unsigned int i;
struct dpu_hw_ctl *ctl;
struct msm_display_info *disp_info;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
disp_info = &dpu_enc->disp_info;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
ctl = phys->hw_ctl;
if (ctl->ops.clear_pending_flush)
ctl->ops.clear_pending_flush(ctl);
/* update only for command mode primary ctl */
if ((phys == dpu_enc->cur_master) &&
disp_info->is_cmd_mode
&& ctl->ops.trigger_pending)
ctl->ops.trigger_pending(ctl);
}
}
static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
struct drm_display_mode *mode)
{
u64 pclk_rate;
u32 pclk_period;
u32 line_time;
/*
* For linetime calculation, only operate on master encoder.
*/
if (!dpu_enc->cur_master)
return 0;
if (!dpu_enc->cur_master->ops.get_line_count) {
DPU_ERROR("get_line_count function not defined\n");
return 0;
}
pclk_rate = mode->clock; /* pixel clock in kHz */
if (pclk_rate == 0) {
DPU_ERROR("pclk is 0, cannot calculate line time\n");
return 0;
}
pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
if (pclk_period == 0) {
DPU_ERROR("pclk period is 0\n");
return 0;
}
/*
* Line time calculation based on Pixel clock and HTOTAL.
* Final unit is in ns.
*/
line_time = (pclk_period * mode->htotal) / 1000;
if (line_time == 0) {
DPU_ERROR("line time calculation is 0\n");
return 0;
}
DPU_DEBUG_ENC(dpu_enc,
"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
pclk_rate, pclk_period, line_time);
return line_time;
}
int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
{
struct drm_display_mode *mode;
struct dpu_encoder_virt *dpu_enc;
u32 cur_line;
u32 line_time;
u32 vtotal, time_to_vsync;
ktime_t cur_time;
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!drm_enc->crtc || !drm_enc->crtc->state) {
DPU_ERROR("crtc/crtc state object is NULL\n");
return -EINVAL;
}
mode = &drm_enc->crtc->state->adjusted_mode;
line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
if (!line_time)
return -EINVAL;
cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
vtotal = mode->vtotal;
if (cur_line >= vtotal)
time_to_vsync = line_time * vtotal;
else
time_to_vsync = line_time * (vtotal - cur_line);
if (time_to_vsync == 0) {
DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
vtotal);
return -EINVAL;
}
cur_time = ktime_get();
*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
DPU_DEBUG_ENC(dpu_enc,
"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
cur_line, vtotal, time_to_vsync,
ktime_to_ms(cur_time),
ktime_to_ms(*wakeup_time));
return 0;
}
static u32
dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
u32 enc_ip_width)
{
int ssm_delay, total_pixels, soft_slice_per_enc;
soft_slice_per_enc = enc_ip_width / dsc->slice_width;
/*
* minimum number of initial line pixels is a sum of:
* 1. sub-stream multiplexer delay (83 groups for 8bpc,
* 91 for 10 bpc) * 3
* 2. for two soft slice cases, add extra sub-stream multiplexer * 3
* 3. the initial xmit delay
* 4. total pipeline delay through the "lock step" of encoder (47)
* 5. 6 additional pixels as the output of the rate buffer is
* 48 bits wide
*/
ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
if (soft_slice_per_enc > 1)
total_pixels += (ssm_delay * 3);
return DIV_ROUND_UP(total_pixels, dsc->slice_width);
}
static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
struct dpu_hw_dsc *hw_dsc,
struct dpu_hw_pingpong *hw_pp,
struct drm_dsc_config *dsc,
u32 common_mode,
u32 initial_lines)
{
if (hw_dsc->ops.dsc_config)
hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
if (hw_dsc->ops.dsc_config_thresh)
hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
if (hw_pp->ops.setup_dsc)
hw_pp->ops.setup_dsc(hw_pp);
if (hw_dsc->ops.dsc_bind_pingpong_blk)
hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
if (hw_pp->ops.enable_dsc)
hw_pp->ops.enable_dsc(hw_pp);
if (ctl->ops.update_pending_flush_dsc)
ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
}
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
struct drm_dsc_config *dsc)
{
/* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
int this_frame_slices;
int intf_ip_w, enc_ip_w;
int dsc_common_mode;
int pic_width;
u32 initial_lines;
int i;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
hw_pp[i] = dpu_enc->hw_pp[i];
hw_dsc[i] = dpu_enc->hw_dsc[i];
if (!hw_pp[i] || !hw_dsc[i]) {
DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
return;
}
}
dsc_common_mode = 0;
pic_width = dsc->pic_width;
dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
this_frame_slices = pic_width / dsc->slice_width;
intf_ip_w = this_frame_slices * dsc->slice_width;
/*
* dsc merge case: when using 2 encoders for the same stream,
* no. of slices need to be same on both the encoders.
*/
enc_ip_w = intf_ip_w / 2;
initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
dsc, dsc_common_mode, initial_lines);
}
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
bool needs_hw_reset = false;
unsigned int i;
dpu_enc = to_dpu_encoder_virt(drm_enc);
trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
/* prepare for next kickoff, may include waiting on previous kickoff */
DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
if (phys->ops.prepare_for_kickoff)
phys->ops.prepare_for_kickoff(phys);
if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
needs_hw_reset = true;
}
DPU_ATRACE_END("enc_prepare_for_kickoff");
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
/* if any phys needs reset, reset all phys, in-order */
if (needs_hw_reset) {
trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
if (dpu_enc->dsc)
dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
}
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
unsigned int i;
struct dpu_encoder_phys *phys;
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
DPU_DEBUG("invalid FB not kicking off\n");
return false;
}
}
}
return true;
}
void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
unsigned long timeout_ms;
unsigned int i;
DPU_ATRACE_BEGIN("encoder_kickoff");
dpu_enc = to_dpu_encoder_virt(drm_enc);
trace_dpu_enc_kickoff(DRMID(drm_enc));
timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
mod_timer(&dpu_enc->frame_done_timer,
jiffies + msecs_to_jiffies(timeout_ms));
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc);
/* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
if (phys->ops.handle_post_kickoff)
phys->ops.handle_post_kickoff(phys);
}
DPU_ATRACE_END("encoder_kickoff");
}
static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_mixer_cfg mixer;
int i, num_lm;
struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_lm[2];
struct dpu_hw_mixer *hw_mixer[2];
struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
memset(&mixer, 0, sizeof(mixer));
/* reset all mixers for this encoder */
if (phys_enc->hw_ctl->ops.clear_all_blendstages)
phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
if (phys_enc->hw_ctl->ops.setup_blendstage)
phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
}
}
static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
struct dpu_hw_dsc *hw_dsc,
struct dpu_hw_pingpong *hw_pp)
{
if (hw_dsc->ops.dsc_disable)
hw_dsc->ops.dsc_disable(hw_dsc);
if (hw_pp->ops.disable_dsc)
hw_pp->ops.disable_dsc(hw_pp);
if (hw_dsc->ops.dsc_bind_pingpong_blk)
hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
if (ctl->ops.update_pending_flush_dsc)
ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
}
static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
{
/* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
int i;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
hw_pp[i] = dpu_enc->hw_pp[i];
hw_dsc[i] = dpu_enc->hw_dsc[i];
if (hw_pp[i] && hw_dsc[i])
dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
}
}
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
int i;
struct dpu_encoder_virt *dpu_enc;
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
phys_enc->hw_ctl->ops.reset(ctl);
dpu_encoder_helper_reset_mixers(phys_enc);
/*
* TODO: move the once-only operation like CTL flush/trigger
* into dpu_encoder_virt_disable() and all operations which need
* to be done per phys encoder into the phys_disable() op.
*/
if (phys_enc->hw_wb) {
/* disable the PP block */
if (phys_enc->hw_wb->ops.bind_pingpong_blk)
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
/* mark WB flush as pending */
if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
} else {
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
phys_enc->hw_intf->ops.bind_pingpong_blk(
dpu_enc->phys_encs[i]->hw_intf,
PINGPONG_NONE);
/* mark INTF flush as pending */
if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
dpu_enc->phys_encs[i]->hw_intf->idx);
}
}
/* reset the merge 3D HW block */
if (phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
phys_enc->hw_pp->merge_3d->idx);
}
if (dpu_enc->dsc) {
dpu_encoder_unprep_dsc(dpu_enc);
dpu_enc->dsc = NULL;
}
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
if (phys_enc->hw_intf)
intf_cfg.intf = phys_enc->hw_intf->idx;
if (phys_enc->hw_wb)
intf_cfg.wb = phys_enc->hw_wb->idx;
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
if (ctl->ops.reset_intf_cfg)
ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
ctl->ops.trigger_flush(ctl);
ctl->ops.trigger_start(ctl);
ctl->ops.clear_pending_flush(ctl);
}
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
struct dpu_encoder_virt *dpu_enc = s->private;
int i;
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
atomic_read(&phys->vsync_cnt),
atomic_read(&phys->underrun_cnt));
seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
}
mutex_unlock(&dpu_enc->enc_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
char name[12];
if (!drm_enc->dev) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id);
/* create overall sub-directory for the encoder */
dpu_enc->debugfs_root = debugfs_create_dir(name,
drm_enc->dev->primary->debugfs_root);
/* don't error check these */
debugfs_create_file("status", 0600,
dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
return 0;
}
#else
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
return 0;
}
#endif
static int dpu_encoder_late_register(struct drm_encoder *encoder)
{
return _dpu_encoder_init_debugfs(encoder);
}
static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
debugfs_remove_recursive(dpu_enc->debugfs_root);
}
static int dpu_encoder_virt_add_phys_encs(
struct msm_display_info *disp_info,
struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params *params)
{
struct dpu_encoder_phys *enc = NULL;
DPU_DEBUG_ENC(dpu_enc, "\n");
/*
* We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
* in this function, check up-front.
*/
if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
ARRAY_SIZE(dpu_enc->phys_encs)) {
DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
dpu_enc->num_phys_encs);
return -EINVAL;
}
if (disp_info->intf_type == INTF_WB) {
enc = dpu_encoder_phys_wb_init(params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
PTR_ERR(enc));
return PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
} else if (disp_info->is_cmd_mode) {
enc = dpu_encoder_phys_cmd_init(params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
PTR_ERR(enc));
return PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
} else {
enc = dpu_encoder_phys_vid_init(params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
PTR_ERR(enc));
return PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
}
if (params->split_role == ENC_ROLE_SLAVE)
dpu_enc->cur_slave = enc;
else
dpu_enc->cur_master = enc;
return 0;
}
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct msm_display_info *disp_info)
{
int ret = 0;
int i = 0;
struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc) {
DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
return -EINVAL;
}
dpu_enc->cur_master = NULL;
memset(&phys_params, 0, sizeof(phys_params));
phys_params.dpu_kms = dpu_kms;
phys_params.parent = &dpu_enc->base;
phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
WARN_ON(disp_info->num_of_h_tiles < 1);
DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
if (disp_info->intf_type != INTF_WB)
dpu_enc->idle_pc_supported =
dpu_kms->catalog->caps->has_idle_pc;
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
/*
* Left-most tile is at index 0, content is controller id
* h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
* h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
*/
u32 controller_id = disp_info->h_tile_instance[i];
if (disp_info->num_of_h_tiles > 1) {
if (i == 0)
phys_params.split_role = ENC_ROLE_MASTER;
else
phys_params.split_role = ENC_ROLE_SLAVE;
} else {
phys_params.split_role = ENC_ROLE_SOLO;
}
DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
i, controller_id, phys_params.split_role);
phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
disp_info->intf_type,
controller_id);
if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
if (!phys_params.hw_intf && !phys_params.hw_wb) {
DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
ret = -EINVAL;
break;
}
if (phys_params.hw_intf && phys_params.hw_wb) {
DPU_ERROR_ENC(dpu_enc,
"invalid phys both intf and wb block at idx: %d\n", i);
ret = -EINVAL;
break;
}
ret = dpu_encoder_virt_add_phys_encs(disp_info,
dpu_enc, &phys_params);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
break;
}
}
mutex_unlock(&dpu_enc->enc_lock);
return ret;
}
static void dpu_encoder_frame_done_timeout(struct timer_list *t)
{
struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
frame_done_timer);
struct drm_encoder *drm_enc = &dpu_enc->base;
u32 event;
if (!drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
return;
}
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
return;
} else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
return;
}
DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
event = DPU_ENCODER_FRAME_EVENT_ERROR;
trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
}
static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
.atomic_disable = dpu_encoder_virt_atomic_disable,
.atomic_enable = dpu_encoder_virt_atomic_enable,
.atomic_check = dpu_encoder_virt_atomic_check,
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
.destroy = dpu_encoder_destroy,
.late_register = dpu_encoder_late_register,
.early_unregister = dpu_encoder_early_unregister,
};
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info)
{
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *drm_enc = NULL;
struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0;
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
if (!dpu_enc)
return ERR_PTR(-ENOMEM);
ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
drm_enc_mode, NULL);
if (ret) {
devm_kfree(dev->dev, dpu_enc);
return ERR_PTR(ret);
}
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
spin_lock_init(&dpu_enc->enc_spinlock);
dpu_enc->enabled = false;
mutex_init(&dpu_enc->enc_lock);
mutex_init(&dpu_enc->rc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
if (disp_info->intf_type == INTF_DP)
dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
priv->dp[disp_info->h_tile_instance[0]]);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
DPU_DEBUG_ENC(dpu_enc, "created\n");
return &dpu_enc->base;
fail:
DPU_ERROR("failed to create encoder\n");
if (drm_enc)
dpu_encoder_destroy(drm_enc);
return ERR_PTR(ret);
}
int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
enum msm_event_wait event)
{
int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
struct dpu_encoder_virt *dpu_enc = NULL;
int i, ret = 0;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
switch (event) {
case MSM_ENC_COMMIT_DONE:
fn_wait = phys->ops.wait_for_commit_done;
break;
case MSM_ENC_TX_COMPLETE:
fn_wait = phys->ops.wait_for_tx_complete;
break;
case MSM_ENC_VBLANK:
fn_wait = phys->ops.wait_for_vblank;
break;
default:
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
return -EINVAL;
}
if (fn_wait) {
DPU_ATRACE_BEGIN("wait_for_completion_event");
ret = fn_wait(phys);
DPU_ATRACE_END("wait_for_completion_event");
if (ret)
return ret;
}
}
return ret;
}
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
{
struct dpu_encoder_virt *dpu_enc = NULL;
if (!encoder) {
DPU_ERROR("invalid encoder\n");
return INTF_MODE_NONE;
}
dpu_enc = to_dpu_encoder_virt(encoder);
if (dpu_enc->cur_master)
return dpu_enc->cur_master->intf_mode;
if (dpu_enc->num_phys_encs)
return dpu_enc->phys_encs[0]->intf_mode;
return INTF_MODE_NONE;
}
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *encoder = phys_enc->parent;
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
return dpu_enc->dsc_mask;
}
void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
struct dpu_enc_phys_init_params *p)
{
int i;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->hw_intf = p->hw_intf;
phys_enc->hw_wb = p->hw_wb;
phys_enc->parent = p->parent;
phys_enc->dpu_kms = p->dpu_kms;
phys_enc->split_role = p->split_role;
phys_enc->enc_spinlock = p->enc_spinlock;
phys_enc->enable_state = DPU_ENC_DISABLED;
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
atomic_set(&phys_enc->vsync_cnt, 0);
atomic_set(&phys_enc->underrun_cnt, 0);
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2022, Linaro Limited
*/
#include <drm/display/drm_dsc_helper.h>
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_dsc.h"
#define DSC_COMMON_MODE 0x000
#define DSC_ENC 0x004
#define DSC_PICTURE 0x008
#define DSC_SLICE 0x00C
#define DSC_CHUNK_SIZE 0x010
#define DSC_DELAY 0x014
#define DSC_SCALE_INITIAL 0x018
#define DSC_SCALE_DEC_INTERVAL 0x01C
#define DSC_SCALE_INC_INTERVAL 0x020
#define DSC_FIRST_LINE_BPG_OFFSET 0x024
#define DSC_BPG_OFFSET 0x028
#define DSC_DSC_OFFSET 0x02C
#define DSC_FLATNESS 0x030
#define DSC_RC_MODEL_SIZE 0x034
#define DSC_RC 0x038
#define DSC_RC_BUF_THRESH 0x03C
#define DSC_RANGE_MIN_QP 0x074
#define DSC_RANGE_MAX_QP 0x0B0
#define DSC_RANGE_BPG_OFFSET 0x0EC
#define DSC_CTL(m) (0x1800 - 0x3FC * (m - DSC_0))
static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
{
struct dpu_hw_blk_reg_map *c = &dsc->hw;
DPU_REG_WRITE(c, DSC_COMMON_MODE, 0);
}
static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines)
{
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 data;
u32 slice_last_group_size;
u32 det_thresh_flatness;
bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
if (is_cmd_mode)
initial_lines += 1;
slice_last_group_size = (dsc->slice_width + 2) % 3;
data = (initial_lines << 20);
data |= (slice_last_group_size << 18);
/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
data |= (dsc->bits_per_pixel << 8);
data |= (dsc->block_pred_enable << 7);
data |= (dsc->line_buf_depth << 3);
data |= (dsc->simple_422 << 2);
data |= (dsc->convert_rgb << 1);
data |= dsc->bits_per_component;
DPU_REG_WRITE(c, DSC_ENC, data);
data = dsc->pic_width << 16;
data |= dsc->pic_height;
DPU_REG_WRITE(c, DSC_PICTURE, data);
data = dsc->slice_width << 16;
data |= dsc->slice_height;
DPU_REG_WRITE(c, DSC_SLICE, data);
data = dsc->slice_chunk_size << 16;
DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
data = dsc->initial_dec_delay << 16;
data |= dsc->initial_xmit_delay;
DPU_REG_WRITE(c, DSC_DELAY, data);
data = dsc->initial_scale_value;
DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
data = dsc->scale_decrement_interval;
DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
data = dsc->scale_increment_interval;
DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
data = dsc->first_line_bpg_offset;
DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
data = dsc->nfl_bpg_offset << 16;
data |= dsc->slice_bpg_offset;
DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
data = dsc->initial_offset << 16;
data |= dsc->final_offset;
DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
det_thresh_flatness = drm_dsc_flatness_det_thresh(dsc);
data = det_thresh_flatness << 10;
data |= dsc->flatness_max_qp << 5;
data |= dsc->flatness_min_qp;
DPU_REG_WRITE(c, DSC_FLATNESS, data);
data = dsc->rc_model_size;
DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
data = dsc->rc_tgt_offset_low << 18;
data |= dsc->rc_tgt_offset_high << 14;
data |= dsc->rc_quant_incr_limit1 << 9;
data |= dsc->rc_quant_incr_limit0 << 4;
data |= dsc->rc_edge_factor;
DPU_REG_WRITE(c, DSC_RC, data);
}
static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
struct drm_dsc_config *dsc)
{
struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params;
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 off;
int i;
off = DSC_RC_BUF_THRESH;
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]);
off += 4;
}
off = DSC_RANGE_MIN_QP;
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
DPU_REG_WRITE(c, off, rc[i].range_min_qp);
off += 4;
}
off = DSC_RANGE_MAX_QP;
for (i = 0; i < 15; i++) {
DPU_REG_WRITE(c, off, rc[i].range_max_qp);
off += 4;
}
off = DSC_RANGE_BPG_OFFSET;
for (i = 0; i < 15; i++) {
DPU_REG_WRITE(c, off, rc[i].range_bpg_offset);
off += 4;
}
}
static void dpu_hw_dsc_bind_pingpong_blk(
struct dpu_hw_dsc *hw_dsc,
const enum dpu_pingpong pp)
{
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
int mux_cfg = 0xF;
u32 dsc_ctl_offset;
dsc_ctl_offset = DSC_CTL(hw_dsc->idx);
if (pp)
mux_cfg = (pp - PINGPONG_0) & 0x7;
if (pp)
DRM_DEBUG_KMS("Binding dsc:%d to pp:%d\n",
hw_dsc->idx - DSC_0, pp - PINGPONG_0);
else
DRM_DEBUG_KMS("Unbinding dsc:%d from any pp\n",
hw_dsc->idx - DSC_0);
DPU_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
}
static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
unsigned long cap)
{
ops->dsc_disable = dpu_hw_dsc_disable;
ops->dsc_config = dpu_hw_dsc_config;
ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
if (cap & BIT(DPU_DSC_OUTPUT_CTRL))
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
};
struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dsc *c;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_DSC;
c->idx = cfg->id;
c->caps = cfg;
_setup_dsc_ops(&c->ops, c->caps->features);
return c;
}
void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
{
kfree(dsc);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT(lm) \
(0x40 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT2(lm) \
(0x70 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT3(lm) \
(0xA0 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT4(lm) \
(0xB8 + (((lm) - LM_0) * 0x004))
#define CTL_TOP 0x014
#define CTL_FLUSH 0x018
#define CTL_START 0x01C
#define CTL_PREPARE 0x0d0
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_MERGE_3D_ACTIVE 0x0E4
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_WB_ACTIVE 0x0EC
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
#define CTL_INTF_MASTER 0x134
#define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4))
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_CTL BIT(17)
#define DPU_REG_RESET_TIMEOUT_US 2000
#define MERGE_3D_IDX 23
#define DSC_IDX 22
#define INTF_IDX 31
#define WB_IDX 16
#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
1, 2, 3, 4, 5};
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm)
{
int i;
int stages = -EINVAL;
for (i = 0; i < count; i++) {
if (lm == mixer[i].id) {
stages = mixer[i].sblk->maxblendstages;
break;
}
}
return stages;
}
static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
return DPU_REG_READ(c, CTL_FLUSH);
}
static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
{
return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
}
static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
}
static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
ctx->pending_flush_mask = 0x0;
ctx->pending_intf_flush_mask = 0;
ctx->pending_wb_flush_mask = 0;
ctx->pending_merge_3d_flush_mask = 0;
ctx->pending_dsc_flush_mask = 0;
memset(ctx->pending_dspp_flush_mask, 0,
sizeof(ctx->pending_dspp_flush_mask));
}
static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
u32 flushbits)
{
trace_dpu_hw_ctl_update_pending_flush(flushbits,
ctx->pending_flush_mask);
ctx->pending_flush_mask |= flushbits;
}
static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
{
return ctx->pending_flush_mask;
}
static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
{
int dspp;
if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
ctx->pending_merge_3d_flush_mask);
if (ctx->pending_flush_mask & BIT(INTF_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
ctx->pending_intf_flush_mask);
if (ctx->pending_flush_mask & BIT(WB_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
ctx->pending_wb_flush_mask);
if (ctx->pending_flush_mask & BIT(DSPP_IDX))
for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
DPU_REG_WRITE(&ctx->hw,
CTL_DSPP_n_FLUSH(dspp - DSPP_0),
ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
}
if (ctx->pending_flush_mask & BIT(DSC_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
ctx->pending_dsc_flush_mask);
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
switch (sspp) {
case SSPP_VIG0:
ctx->pending_flush_mask |= BIT(0);
break;
case SSPP_VIG1:
ctx->pending_flush_mask |= BIT(1);
break;
case SSPP_VIG2:
ctx->pending_flush_mask |= BIT(2);
break;
case SSPP_VIG3:
ctx->pending_flush_mask |= BIT(18);
break;
case SSPP_RGB0:
ctx->pending_flush_mask |= BIT(3);
break;
case SSPP_RGB1:
ctx->pending_flush_mask |= BIT(4);
break;
case SSPP_RGB2:
ctx->pending_flush_mask |= BIT(5);
break;
case SSPP_RGB3:
ctx->pending_flush_mask |= BIT(19);
break;
case SSPP_DMA0:
ctx->pending_flush_mask |= BIT(11);
break;
case SSPP_DMA1:
ctx->pending_flush_mask |= BIT(12);
break;
case SSPP_DMA2:
ctx->pending_flush_mask |= BIT(24);
break;
case SSPP_DMA3:
ctx->pending_flush_mask |= BIT(25);
break;
case SSPP_DMA4:
ctx->pending_flush_mask |= BIT(13);
break;
case SSPP_DMA5:
ctx->pending_flush_mask |= BIT(14);
break;
case SSPP_CURSOR0:
ctx->pending_flush_mask |= BIT(22);
break;
case SSPP_CURSOR1:
ctx->pending_flush_mask |= BIT(23);
break;
default:
break;
}
}
static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
switch (lm) {
case LM_0:
ctx->pending_flush_mask |= BIT(6);
break;
case LM_1:
ctx->pending_flush_mask |= BIT(7);
break;
case LM_2:
ctx->pending_flush_mask |= BIT(8);
break;
case LM_3:
ctx->pending_flush_mask |= BIT(9);
break;
case LM_4:
ctx->pending_flush_mask |= BIT(10);
break;
case LM_5:
ctx->pending_flush_mask |= BIT(20);
break;
default:
break;
}
ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
}
static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
switch (intf) {
case INTF_0:
ctx->pending_flush_mask |= BIT(31);
break;
case INTF_1:
ctx->pending_flush_mask |= BIT(30);
break;
case INTF_2:
ctx->pending_flush_mask |= BIT(29);
break;
case INTF_3:
ctx->pending_flush_mask |= BIT(28);
break;
default:
break;
}
}
static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
enum dpu_wb wb)
{
switch (wb) {
case WB_0:
case WB_1:
case WB_2:
ctx->pending_flush_mask |= BIT(WB_IDX);
break;
default:
break;
}
}
static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
enum dpu_wb wb)
{
ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
ctx->pending_flush_mask |= BIT(WB_IDX);
}
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
ctx->pending_flush_mask |= BIT(INTF_IDX);
}
static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
enum dpu_merge_3d merge_3d)
{
ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
}
static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
enum dpu_dsc dsc_num)
{
ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
ctx->pending_flush_mask |= BIT(DSC_IDX);
}
static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp, u32 dspp_sub_blk)
{
switch (dspp) {
case DSPP_0:
ctx->pending_flush_mask |= BIT(13);
break;
case DSPP_1:
ctx->pending_flush_mask |= BIT(14);
break;
case DSPP_2:
ctx->pending_flush_mask |= BIT(15);
break;
case DSPP_3:
ctx->pending_flush_mask |= BIT(21);
break;
default:
break;
}
}
static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
struct dpu_hw_ctl *ctx, enum dpu_dspp dspp, u32 dspp_sub_blk)
{
if (dspp >= DSPP_MAX)
return;
switch (dspp_sub_blk) {
case DPU_DSPP_PCC:
ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
break;
default:
return;
}
ctx->pending_flush_mask |= BIT(DSPP_IDX);
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
ktime_t timeout;
u32 status;
timeout = ktime_add_us(ktime_get(), timeout_us);
/*
* it takes around 30us to have mdp finish resetting its ctl path
* poll every 50us so that reset should be completed at 1st poll
*/
do {
status = DPU_REG_READ(c, CTL_SW_RESET);
status &= 0x1;
if (status)
usleep_range(20, 50);
} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
return status;
}
static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
return -EINVAL;
return 0;
}
static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 status;
status = DPU_REG_READ(c, CTL_SW_RESET);
status &= 0x01;
if (!status)
return 0;
pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
return -EINVAL;
}
return 0;
}
static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int i;
for (i = 0; i < ctx->mixer_count; i++) {
enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
}
DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
}
struct ctl_blend_config {
int idx, shift, ext_shift;
};
static const struct ctl_blend_config ctl_blend_config[][2] = {
[SSPP_NONE] = { { -1 }, { -1 } },
[SSPP_MAX] = { { -1 }, { -1 } },
[SSPP_VIG0] = { { 0, 0, 0 }, { 3, 0 } },
[SSPP_VIG1] = { { 0, 3, 2 }, { 3, 4 } },
[SSPP_VIG2] = { { 0, 6, 4 }, { 3, 8 } },
[SSPP_VIG3] = { { 0, 26, 6 }, { 3, 12 } },
[SSPP_RGB0] = { { 0, 9, 8 }, { -1 } },
[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
[SSPP_DMA2] = { { 2, 0 }, { 2, 16 } },
[SSPP_DMA3] = { { 2, 4 }, { 2, 20 } },
[SSPP_DMA4] = { { 4, 0 }, { 4, 8 } },
[SSPP_DMA5] = { { 4, 4 }, { 4, 12 } },
[SSPP_CURSOR0] = { { 1, 20 }, { -1 } },
[SSPP_CURSOR1] = { { 1, 26 }, { -1 } },
};
static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 mix, ext, mix_ext;
u32 mixercfg[5] = { 0 };
int i, j;
int stages;
int pipes_per_stage;
stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
if (stages < 0)
return;
if (test_bit(DPU_MIXER_SOURCESPLIT,
&ctx->mixer_hw_caps->features))
pipes_per_stage = PIPES_PER_STAGE;
else
pipes_per_stage = 1;
mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
if (!stage_cfg)
goto exit;
for (i = 0; i <= stages; i++) {
/* overflow to ext register if 'i + 1 > 7' */
mix = (i + 1) & 0x7;
ext = i >= 7;
mix_ext = (i + 1) & 0xf;
for (j = 0 ; j < pipes_per_stage; j++) {
enum dpu_sspp_multirect_index rect_index =
stage_cfg->multirect_index[i][j];
enum dpu_sspp pipe = stage_cfg->stage[i][j];
const struct ctl_blend_config *cfg =
&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
/*
* CTL_LAYER has 3-bit field (and extra bits in EXT register),
* all EXT registers has 4-bit fields.
*/
if (cfg->idx == -1) {
continue;
} else if (cfg->idx == 0) {
mixercfg[0] |= mix << cfg->shift;
mixercfg[1] |= ext << cfg->ext_shift;
} else {
mixercfg[cfg->idx] |= mix_ext << cfg->shift;
}
}
}
exit:
DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
}
static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
* per VM. Explicitly disable it until VM support is
* added in SW. Power on reset value is not disable.
*/
if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
mode_sel |= BIT(17);
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
if (cfg->dsc)
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_cfg = 0;
intf_cfg |= (cfg->intf & 0xF) << 4;
if (cfg->mode_3d) {
intf_cfg |= BIT(19);
intf_cfg |= (cfg->mode_3d - 0x1) << 20;
}
if (cfg->wb)
intf_cfg |= (cfg->wb & 0x3) + 2;
switch (cfg->intf_mode_sel) {
case DPU_CTL_MODE_SEL_VID:
intf_cfg &= ~BIT(17);
intf_cfg &= ~(0x3 << 15);
break;
case DPU_CTL_MODE_SEL_CMD:
intf_cfg |= BIT(17);
intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
break;
default:
pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
return;
}
DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
}
static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
u32 merge3d_active = 0;
u32 dsc_active;
/*
* This API resets each portion of the CTL path namely,
* clearing the sspps staged on the lm, merge_3d block,
* interfaces , writeback etc to ensure clean teardown of the pipeline.
* This will be used for writeback to begin with to have a
* proper teardown of the writeback session but upon further
* validation, this can be extended to all interfaces.
*/
if (cfg->merge_3d) {
merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
merge3d_active);
}
dpu_hw_ctl_clear_all_blendstages(ctx);
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
if (cfg->wb) {
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
wb_active &= ~BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
}
if (cfg->dsc) {
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
dsc_active &= ~cfg->dsc;
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
}
}
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active)
{
int i;
u32 val = 0;
if (fetch_active) {
for (i = 0; i < SSPP_MAX; i++) {
if (test_bit(i, fetch_active) &&
fetch_tbl[i] != CTL_INVALID_BIT)
val |= BIT(fetch_tbl[i]);
}
}
DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
}
static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
unsigned long cap)
{
if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf_v1;
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
ops->update_pending_flush_dsc =
dpu_hw_ctl_update_pending_flush_dsc_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
ops->get_flush_register = dpu_hw_ctl_get_flush_register;
ops->trigger_start = dpu_hw_ctl_trigger_start;
ops->is_started = dpu_hw_ctl_is_started;
ops->trigger_pending = dpu_hw_ctl_trigger_pending;
ops->reset = dpu_hw_ctl_reset_control;
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
else
ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
u32 mixer_count,
const struct dpu_lm_cfg *mixer)
{
struct dpu_hw_ctl *c;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_CTL;
c->caps = cfg;
_setup_ctl_ops(&c->ops, c->caps->features);
c->idx = cfg->id;
c->mixer_count = mixer_count;
c->mixer_hw_caps = mixer;
return c;
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
{
kfree(ctx);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include "dpu_core_irq.h"
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_util.h"
#include "dpu_hw_mdss.h"
#include "dpu_trace.h"
/*
* Register offsets in MDSS register file for the interrupt registers
* w.r.t. the MDP base
*/
#define MDP_INTF_OFF(intf) (0x6A000 + 0x800 * (intf))
#define MDP_INTF_INTR_EN(intf) (MDP_INTF_OFF(intf) + 0x1c0)
#define MDP_INTF_INTR_STATUS(intf) (MDP_INTF_OFF(intf) + 0x1c4)
#define MDP_INTF_INTR_CLEAR(intf) (MDP_INTF_OFF(intf) + 0x1c8)
#define MDP_INTF_TEAR_OFF(intf) (0x6D700 + 0x100 * (intf))
#define MDP_INTF_INTR_TEAR_EN(intf) (MDP_INTF_TEAR_OFF(intf) + 0x000)
#define MDP_INTF_INTR_TEAR_STATUS(intf) (MDP_INTF_TEAR_OFF(intf) + 0x004)
#define MDP_INTF_INTR_TEAR_CLEAR(intf) (MDP_INTF_TEAR_OFF(intf) + 0x008)
#define MDP_AD4_OFF(ad4) (0x7C000 + 0x1000 * (ad4))
#define MDP_AD4_INTR_EN_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x41c)
#define MDP_AD4_INTR_CLEAR_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x424)
#define MDP_AD4_INTR_STATUS_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x420)
#define MDP_INTF_REV_7xxx_OFF(intf) (0x34000 + 0x1000 * (intf))
#define MDP_INTF_REV_7xxx_INTR_EN(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
#define MDP_INTF_REV_7xxx_INTR_STATUS(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
#define MDP_INTF_REV_7xxx_INTR_CLEAR(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
#define MDP_INTF_REV_7xxx_TEAR_OFF(intf) (0x34800 + 0x1000 * (intf))
#define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
#define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
#define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
/**
* struct dpu_intr_reg - array of DPU register sets
* @clr_off: offset to CLEAR reg
* @en_off: offset to ENABLE reg
* @status_off: offset to STATUS reg
*/
struct dpu_intr_reg {
u32 clr_off;
u32 en_off;
u32 status_off;
};
/*
* dpu_intr_set_legacy - List of DPU interrupt registers for DPU <= 6.x
*/
static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
[MDP_SSPP_TOP0_INTR] = {
INTR_CLEAR,
INTR_EN,
INTR_STATUS
},
[MDP_SSPP_TOP0_INTR2] = {
INTR2_CLEAR,
INTR2_EN,
INTR2_STATUS
},
[MDP_SSPP_TOP0_HIST_INTR] = {
HIST_INTR_CLEAR,
HIST_INTR_EN,
HIST_INTR_STATUS
},
[MDP_INTF0_INTR] = {
MDP_INTF_INTR_CLEAR(0),
MDP_INTF_INTR_EN(0),
MDP_INTF_INTR_STATUS(0)
},
[MDP_INTF1_INTR] = {
MDP_INTF_INTR_CLEAR(1),
MDP_INTF_INTR_EN(1),
MDP_INTF_INTR_STATUS(1)
},
[MDP_INTF2_INTR] = {
MDP_INTF_INTR_CLEAR(2),
MDP_INTF_INTR_EN(2),
MDP_INTF_INTR_STATUS(2)
},
[MDP_INTF3_INTR] = {
MDP_INTF_INTR_CLEAR(3),
MDP_INTF_INTR_EN(3),
MDP_INTF_INTR_STATUS(3)
},
[MDP_INTF4_INTR] = {
MDP_INTF_INTR_CLEAR(4),
MDP_INTF_INTR_EN(4),
MDP_INTF_INTR_STATUS(4)
},
[MDP_INTF5_INTR] = {
MDP_INTF_INTR_CLEAR(5),
MDP_INTF_INTR_EN(5),
MDP_INTF_INTR_STATUS(5)
},
[MDP_INTF1_TEAR_INTR] = {
MDP_INTF_INTR_TEAR_CLEAR(1),
MDP_INTF_INTR_TEAR_EN(1),
MDP_INTF_INTR_TEAR_STATUS(1)
},
[MDP_INTF2_TEAR_INTR] = {
MDP_INTF_INTR_TEAR_CLEAR(2),
MDP_INTF_INTR_TEAR_EN(2),
MDP_INTF_INTR_TEAR_STATUS(2)
},
[MDP_AD4_0_INTR] = {
MDP_AD4_INTR_CLEAR_OFF(0),
MDP_AD4_INTR_EN_OFF(0),
MDP_AD4_INTR_STATUS_OFF(0),
},
[MDP_AD4_1_INTR] = {
MDP_AD4_INTR_CLEAR_OFF(1),
MDP_AD4_INTR_EN_OFF(1),
MDP_AD4_INTR_STATUS_OFF(1),
},
};
/*
* dpu_intr_set_7xxx - List of DPU interrupt registers for DPU >= 7.0
*/
static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
[MDP_SSPP_TOP0_INTR] = {
INTR_CLEAR,
INTR_EN,
INTR_STATUS
},
[MDP_SSPP_TOP0_INTR2] = {
INTR2_CLEAR,
INTR2_EN,
INTR2_STATUS
},
[MDP_SSPP_TOP0_HIST_INTR] = {
HIST_INTR_CLEAR,
HIST_INTR_EN,
HIST_INTR_STATUS
},
[MDP_INTF0_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(0),
MDP_INTF_REV_7xxx_INTR_EN(0),
MDP_INTF_REV_7xxx_INTR_STATUS(0)
},
[MDP_INTF1_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(1),
MDP_INTF_REV_7xxx_INTR_EN(1),
MDP_INTF_REV_7xxx_INTR_STATUS(1)
},
[MDP_INTF1_TEAR_INTR] = {
MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
},
[MDP_INTF2_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(2),
MDP_INTF_REV_7xxx_INTR_EN(2),
MDP_INTF_REV_7xxx_INTR_STATUS(2)
},
[MDP_INTF2_TEAR_INTR] = {
MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
},
[MDP_INTF3_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(3),
MDP_INTF_REV_7xxx_INTR_EN(3),
MDP_INTF_REV_7xxx_INTR_STATUS(3)
},
[MDP_INTF4_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(4),
MDP_INTF_REV_7xxx_INTR_EN(4),
MDP_INTF_REV_7xxx_INTR_STATUS(4)
},
[MDP_INTF5_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(5),
MDP_INTF_REV_7xxx_INTR_EN(5),
MDP_INTF_REV_7xxx_INTR_STATUS(5)
},
[MDP_INTF6_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(6),
MDP_INTF_REV_7xxx_INTR_EN(6),
MDP_INTF_REV_7xxx_INTR_STATUS(6)
},
[MDP_INTF7_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(7),
MDP_INTF_REV_7xxx_INTR_EN(7),
MDP_INTF_REV_7xxx_INTR_STATUS(7)
},
[MDP_INTF8_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(8),
MDP_INTF_REV_7xxx_INTR_EN(8),
MDP_INTF_REV_7xxx_INTR_STATUS(8)
},
};
#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
/**
* dpu_core_irq_callback_handler - dispatch core interrupts
* @dpu_kms: Pointer to DPU's KMS structure
* @irq_idx: interrupt index
*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
VERB("irq_idx=%d\n", irq_idx);
if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
/*
* Perform registered function callback
*/
dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
}
irqreturn_t dpu_core_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
int irq_idx;
u32 irq_status;
u32 enable_mask;
int bit;
unsigned long irq_flags;
if (!intr)
return IRQ_NONE;
spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
if (!test_bit(reg_idx, &intr->irq_mask))
continue;
/* Read interrupt status */
irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
/* Read enable mask */
enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
/* and clear the interrupt */
if (irq_status)
DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
irq_status);
/* Finally update IRQ status based on enable mask */
irq_status &= enable_mask;
if (!irq_status)
continue;
/*
* Search through matching intr status.
*/
while ((bit = ffs(irq_status)) != 0) {
irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
dpu_core_irq_callback_handler(dpu_kms, irq_idx);
/*
* When callback finish, clear the irq_status
* with the matching mask. Once irq_status
* is all cleared, the search can be stopped.
*/
irq_status &= ~BIT(bit - 1);
}
}
/* ensure register writes go through */
wmb();
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
return IRQ_HANDLED;
}
static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
{
int reg_idx;
const struct dpu_intr_reg *reg;
const char *dbgstr = NULL;
uint32_t cache_irq_mask;
if (!intr)
return -EINVAL;
if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
/*
* The cache_irq_mask and hardware RMW operations needs to be done
* under irq_lock and it's the caller's responsibility to ensure that's
* held.
*/
assert_spin_locked(&intr->irq_lock);
reg_idx = DPU_IRQ_REG(irq_idx);
reg = &intr->intr_set[reg_idx];
/* Is this interrupt register supported on the platform */
if (WARN_ON(!reg->en_off))
return -EINVAL;
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
dbgstr = "already ";
} else {
dbgstr = "";
cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
/* Cleaning any pending interrupt */
DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
/* Enabling interrupts with the new mask */
DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
/* ensure register write goes through */
wmb();
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
pr_debug("DPU IRQ %d %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
}
static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
{
int reg_idx;
const struct dpu_intr_reg *reg;
const char *dbgstr = NULL;
uint32_t cache_irq_mask;
if (!intr)
return -EINVAL;
if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
/*
* The cache_irq_mask and hardware RMW operations needs to be done
* under irq_lock and it's the caller's responsibility to ensure that's
* held.
*/
assert_spin_locked(&intr->irq_lock);
reg_idx = DPU_IRQ_REG(irq_idx);
reg = &intr->intr_set[reg_idx];
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
dbgstr = "already ";
} else {
dbgstr = "";
cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
/* Disable interrupts based on the new mask */
DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
/* Cleaning any pending interrupt */
DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
/* ensure register write goes through */
wmb();
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
pr_debug("DPU IRQ %d %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
}
static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int i;
if (!intr)
return;
for (i = 0; i < MDP_INTR_MAX; i++) {
if (test_bit(i, &intr->irq_mask))
DPU_REG_WRITE(&intr->hw,
intr->intr_set[i].clr_off, 0xffffffff);
}
/* ensure register writes go through */
wmb();
}
static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int i;
if (!intr)
return;
for (i = 0; i < MDP_INTR_MAX; i++) {
if (test_bit(i, &intr->irq_mask))
DPU_REG_WRITE(&intr->hw,
intr->intr_set[i].en_off, 0x00000000);
}
/* ensure register writes go through */
wmb();
}
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
unsigned long irq_flags;
u32 intr_status;
if (!intr)
return 0;
if (irq_idx < 0) {
DPU_ERROR("[%pS] invalid irq_idx=%d\n",
__builtin_return_address(0), irq_idx);
return 0;
}
if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return 0;
}
spin_lock_irqsave(&intr->irq_lock, irq_flags);
reg_idx = DPU_IRQ_REG(irq_idx);
intr_status = DPU_REG_READ(&intr->hw,
intr->intr_set[reg_idx].status_off) &
DPU_IRQ_MASK(irq_idx);
if (intr_status)
DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
intr_status);
/* ensure register writes go through */
wmb();
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
return intr_status;
}
struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
int nirq = MDP_INTR_MAX * 32;
unsigned int i;
if (!addr || !m)
return ERR_PTR(-EINVAL);
intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
if (m->mdss_ver->core_major_ver >= 7)
intr->intr_set = dpu_intr_set_7xxx;
else
intr->intr_set = dpu_intr_set_legacy;
intr->hw.blk_addr = addr + m->mdp[0].base;
intr->total_irqs = nirq;
intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
BIT(MDP_SSPP_TOP0_INTR2) |
BIT(MDP_SSPP_TOP0_HIST_INTR);
for (i = 0; i < m->intf_count; i++) {
const struct dpu_intf_cfg *intf = &m->intf[i];
if (intf->type == INTF_NONE)
continue;
intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
if (intf->intr_tear_rd_ptr != -1)
intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
}
spin_lock_init(&intr->irq_lock);
return intr;
}
void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
kfree(intr);
}
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
void (*irq_cb)(void *arg, int irq_idx),
void *irq_arg)
{
unsigned long irq_flags;
int ret;
if (!irq_cb) {
DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
return -EBUSY;
}
trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_irq_register_success(irq_idx);
return 0;
}
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;
int ret;
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_core_irq_unregister_callback(irq_idx);
ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
if (ret)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
irq_idx, ret);
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_irq_unregister_success(irq_idx);
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
unsigned long irq_flags;
int i, irq_count;
void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
cb = dpu_kms->hw_intr->irq_tbl[i].cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
if (irq_count || cb)
seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
debugfs_create_file("core_irq", 0600, parent, dpu_kms,
&dpu_debugfs_core_irq_fops);
}
#endif
void dpu_core_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_clear_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
}
void dpu_core_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
if (!dpu_kms->hw_intr)
return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
if (dpu_kms->hw_intr->irq_tbl[i].cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_dspp.h"
#include "dpu_kms.h"
/* DSPP_PCC */
#define PCC_EN BIT(0)
#define PCC_DIS 0
#define PCC_RED_R_OFF 0x10
#define PCC_RED_G_OFF 0x1C
#define PCC_RED_B_OFF 0x28
#define PCC_GREEN_R_OFF 0x14
#define PCC_GREEN_G_OFF 0x20
#define PCC_GREEN_B_OFF 0x2C
#define PCC_BLUE_R_OFF 0x18
#define PCC_BLUE_G_OFF 0x24
#define PCC_BLUE_B_OFF 0x30
static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
struct dpu_hw_pcc_cfg *cfg)
{
u32 base;
if (!ctx) {
DRM_ERROR("invalid ctx %pK\n", ctx);
return;
}
base = ctx->cap->sblk->pcc.base;
if (!base) {
DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
return;
}
if (!cfg) {
DRM_DEBUG_DRIVER("disable pcc feature\n");
DPU_REG_WRITE(&ctx->hw, base, PCC_DIS);
return;
}
DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r);
DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g);
DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b);
DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r);
DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g);
DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b);
DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r);
DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g);
DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b);
DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
}
static void _setup_dspp_ops(struct dpu_hw_dspp *c,
unsigned long features)
{
if (test_bit(DPU_DSPP_PCC, &features))
c->ops.setup_pcc = dpu_setup_dspp_pcc;
}
struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dspp *c;
if (!addr)
return ERR_PTR(-EINVAL);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_DSPP;
/* Assign ops */
c->idx = cfg->id;
c->cap = cfg;
_setup_dspp_ops(c, c->cap->features);
return c;
}
void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
{
kfree(dspp);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "dpu_hw_mdss.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_catalog.h"
#include "dpu_kms.h"
#define VIG_BASE_MASK \
(BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_CDP) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_MASK \
(VIG_BASE_MASK | \
BIT(DPU_SSPP_CSC_10BIT))
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
#define VIG_SDM845_MASK \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
#define VIG_SDM845_MASK_SDMA \
(VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
#define VIG_SC7180_MASK \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
#define VIG_SM6125_MASK \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
#define VIG_SC7180_MASK_SDMA \
(VIG_SC7180_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
#define DMA_MSM8998_MASK \
(BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
(VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
#define VIG_SC7280_MASK_SDMA \
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define DMA_CURSOR_SDM845_MASK \
(DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
#define DMA_SDM845_MASK_SDMA \
(DMA_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_SDM845_MASK_SDMA \
(DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_MSM8998_MASK \
(DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
#define MIXER_MSM8998_MASK \
(BIT(DPU_MIXER_SOURCESPLIT))
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
#define MIXER_QCM2290_MASK \
(BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
#define PINGPONG_SDM845_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_TE) | BIT(DPU_PINGPONG_DSC))
#define PINGPONG_SDM845_TE2_MASK \
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
#define PINGPONG_SM8150_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
#define CTL_SC7280_MASK \
(BIT(DPU_CTL_ACTIVE_CFG) | \
BIT(DPU_CTL_FETCH_ACTIVE) | \
BIT(DPU_CTL_VM_CFG) | \
BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
#define CTL_SM8550_MASK \
(CTL_SC7280_MASK | BIT(DPU_CTL_HAS_LAYER_EXT4))
#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
#define INTF_SC7180_MASK \
(BIT(DPU_INTF_INPUT_CTRL) | \
BIT(DPU_INTF_TE) | \
BIT(DPU_INTF_STATUS_SUPPORTED) | \
BIT(DPU_DATA_HCTL_EN))
#define INTF_SC7280_MASK (INTF_SC7180_MASK)
#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
BIT(DPU_WB_PIPE_ALPHA) | \
BIT(DPU_WB_XY_ROI_OFFSET) | \
BIT(DPU_WB_QOS) | \
BIT(DPU_WB_QOS_8LVL) | \
BIT(DPU_WB_CDP) | \
BIT(DPU_WB_INPUT_CTRL))
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
#define MAX_HORZ_DECIMATION 4
#define MAX_VERT_DECIMATION 4
#define MAX_UPSCALE_RATIO 20
#define MAX_DOWNSCALE_RATIO 4
#define SSPP_UNITY_SCALE 1
#define STRCAT(X, Y) (X Y)
static const uint32_t plane_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XBGR4444,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_BGRX4444,
};
static const uint32_t plane_formats_yuv[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XBGR4444,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_BGRX4444,
DRM_FORMAT_P010,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV16,
DRM_FORMAT_NV61,
DRM_FORMAT_VYUY,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
};
static const u32 rotation_v2_formats[] = {
DRM_FORMAT_NV12,
/* TODO add formats after validation */
};
static const uint32_t wb2_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_BGRX4444,
DRM_FORMAT_XBGR4444,
};
/*************************************************************
* SSPP sub blocks config
*************************************************************/
/* SSPP common configuration */
#define _VIG_SBLK(sdma_pri, qseed_ver) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
.smart_dma_priority = sdma_pri, \
.scaler_blk = {.name = "scaler", \
.id = qseed_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}
#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
.smart_dma_priority = sdma_pri, \
.scaler_blk = {.name = "scaler", \
.id = qseed_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = rot_cfg, \
}
#define _DMA_SBLK(sdma_pri) \
{ \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
.smart_dma_priority = sdma_pri, \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
.rot_maxheight = 1088,
.rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
.rot_format_list = rotation_v2_formats,
};
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
_VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
_VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
_VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
_VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
_VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
_VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
_VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
_VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
_VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
_VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
#define _VIG_SBLK_NOSCALE(sdma_pri) \
{ \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
.smart_dma_priority = sdma_pri, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE(2);
static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK(1);
/*************************************************************
* MIXER sub blocks config
*************************************************************/
/* MSM8998 */
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x50, 0x80, 0xb0, 0x230,
0x260, 0x290
},
};
/* SDM845 */
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
0xb0, 0xc8, 0xe0, 0xf8, 0x110
},
};
/* SC7180 */
static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
},
};
/* QCM2290 */
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
.maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68
},
};
/*************************************************************
* DSPP sub blocks config
*************************************************************/
static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
.pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
.len = 0x90, .version = 0x10007},
};
static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
.pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
.len = 0x90, .version = 0x40000},
};
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
.te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
.version = 0x1},
.dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
.dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0,
.len = 0x20, .version = 0x20000},
};
/*************************************************************
* DSC sub blocks config
*************************************************************/
static const struct dpu_dsc_sub_blks dsc_sblk_0 = {
.enc = {.name = "enc", .base = 0x100, .len = 0x9c},
.ctl = {.name = "ctl", .base = 0xF00, .len = 0x10},
};
static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
.enc = {.name = "enc", .base = 0x200, .len = 0x9c},
.ctl = {.name = "ctl", .base = 0xF80, .len = 0x10},
};
/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
static const u32 msm8998_rt_pri_lvl[] = {1, 2, 2, 2};
static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1};
static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
{
.pps = 1920 * 1080 * 30,
.ot_limit = 2,
},
{
.pps = 1920 * 1080 * 60,
.ot_limit = 4,
},
{
.pps = 3840 * 2160 * 30,
.ot_limit = 16,
},
};
static const struct dpu_vbif_cfg msm8998_vbif[] = {
{
.name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.default_ot_rd_limit = 32,
.default_ot_wr_limit = 32,
.features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM),
.xin_halt_timeout = 0x4000,
.qos_rp_remap_size = 0x20,
.dynamic_ot_rd_tbl = {
.count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
.cfg = msm8998_ot_rdwr_cfg,
},
.dynamic_ot_wr_tbl = {
.count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
.cfg = msm8998_ot_rdwr_cfg,
},
.qos_rt_tbl = {
.npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl),
.priority_lvl = msm8998_rt_pri_lvl,
},
.qos_nrt_tbl = {
.npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl),
.priority_lvl = msm8998_nrt_pri_lvl,
},
.memtype_count = 14,
.memtype = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
},
};
static const struct dpu_vbif_cfg sdm845_vbif[] = {
{
.name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.features = BIT(DPU_VBIF_QOS_REMAP),
.xin_halt_timeout = 0x4000,
.qos_rp_remap_size = 0x40,
.qos_rt_tbl = {
.npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
.priority_lvl = sdm845_rt_pri_lvl,
},
.qos_nrt_tbl = {
.npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
.priority_lvl = sdm845_nrt_pri_lvl,
},
.memtype_count = 14,
.memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
},
};
static const struct dpu_vbif_cfg sm8550_vbif[] = {
{
.name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.features = BIT(DPU_VBIF_QOS_REMAP),
.xin_halt_timeout = 0x4000,
.qos_rp_remap_size = 0x40,
.qos_rt_tbl = {
.npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
.priority_lvl = sdm845_rt_pri_lvl,
},
.qos_nrt_tbl = {
.npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
.priority_lvl = sdm845_nrt_pri_lvl,
},
.memtype_count = 16,
.memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
},
};
/*************************************************************
* PERF data config
*************************************************************/
/* SSPP QOS LUTs */
static const struct dpu_qos_lut_entry msm8998_qos_linear[] = {
{.fl = 4, .lut = 0x1b},
{.fl = 5, .lut = 0x5b},
{.fl = 6, .lut = 0x15b},
{.fl = 7, .lut = 0x55b},
{.fl = 8, .lut = 0x155b},
{.fl = 9, .lut = 0x555b},
{.fl = 10, .lut = 0x1555b},
{.fl = 11, .lut = 0x5555b},
{.fl = 12, .lut = 0x15555b},
{.fl = 0, .lut = 0x55555b}
};
static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
{.fl = 4, .lut = 0x357},
{.fl = 5, .lut = 0x3357},
{.fl = 6, .lut = 0x23357},
{.fl = 7, .lut = 0x223357},
{.fl = 8, .lut = 0x2223357},
{.fl = 9, .lut = 0x22223357},
{.fl = 10, .lut = 0x222223357},
{.fl = 11, .lut = 0x2222223357},
{.fl = 12, .lut = 0x22222223357},
{.fl = 13, .lut = 0x222222223357},
{.fl = 14, .lut = 0x1222222223357},
{.fl = 0, .lut = 0x11222222223357}
};
static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = {
{.fl = 10, .lut = 0x1aaff},
{.fl = 11, .lut = 0x5aaff},
{.fl = 12, .lut = 0x15aaff},
{.fl = 0, .lut = 0x55aaff},
};
static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
{.fl = 0, .lut = 0x0011222222335777},
};
static const struct dpu_qos_lut_entry sm6350_qos_linear_macrotile[] = {
{.fl = 0, .lut = 0x0011223445566777 },
};
static const struct dpu_qos_lut_entry sm8150_qos_linear[] = {
{.fl = 0, .lut = 0x0011222222223357 },
};
static const struct dpu_qos_lut_entry sc8180x_qos_linear[] = {
{.fl = 4, .lut = 0x0000000000000357 },
};
static const struct dpu_qos_lut_entry qcm2290_qos_linear[] = {
{.fl = 0, .lut = 0x0011222222335777},
};
static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 10, .lut = 0x344556677},
{.fl = 11, .lut = 0x3344556677},
{.fl = 12, .lut = 0x23344556677},
{.fl = 13, .lut = 0x223344556677},
{.fl = 14, .lut = 0x1223344556677},
{.fl = 0, .lut = 0x112233344556677},
};
static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = {
{.fl = 0, .lut = 0x0011223344556677},
};
static const struct dpu_qos_lut_entry sc8180x_qos_macrotile[] = {
{.fl = 10, .lut = 0x0000000344556677},
};
static const struct dpu_qos_lut_entry msm8998_qos_nrt[] = {
{.fl = 0, .lut = 0x0},
};
static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
{.fl = 0, .lut = 0x0},
};
static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
{.fl = 0, .lut = 0x0},
};
/*************************************************************
* Hardware catalog
*************************************************************/
#include "catalog/dpu_3_0_msm8998.h"
#include "catalog/dpu_4_0_sdm845.h"
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
#include "catalog/dpu_5_4_sm6125.h"
#include "catalog/dpu_6_0_sm8250.h"
#include "catalog/dpu_6_2_sc7180.h"
#include "catalog/dpu_6_3_sm6115.h"
#include "catalog/dpu_6_4_sm6350.h"
#include "catalog/dpu_6_5_qcm2290.h"
#include "catalog/dpu_6_9_sm6375.h"
#include "catalog/dpu_7_0_sm8350.h"
#include "catalog/dpu_7_2_sc7280.h"
#include "catalog/dpu_8_0_sc8280xp.h"
#include "catalog/dpu_8_1_sm8450.h"
#include "catalog/dpu_9_0_sm8550.h"
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
*/
#include <drm/display/drm_dsc_helper.h>
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_dsc.h"
#define DSC_CMN_MAIN_CNF 0x00
/* DPU_DSC_ENC register offsets */
#define ENC_DF_CTRL 0x00
#define ENC_GENERAL_STATUS 0x04
#define ENC_HSLICE_STATUS 0x08
#define ENC_OUT_STATUS 0x0C
#define ENC_INT_STAT 0x10
#define ENC_INT_CLR 0x14
#define ENC_INT_MASK 0x18
#define DSC_MAIN_CONF 0x30
#define DSC_PICTURE_SIZE 0x34
#define DSC_SLICE_SIZE 0x38
#define DSC_MISC_SIZE 0x3C
#define DSC_HRD_DELAYS 0x40
#define DSC_RC_SCALE 0x44
#define DSC_RC_SCALE_INC_DEC 0x48
#define DSC_RC_OFFSETS_1 0x4C
#define DSC_RC_OFFSETS_2 0x50
#define DSC_RC_OFFSETS_3 0x54
#define DSC_RC_OFFSETS_4 0x58
#define DSC_FLATNESS_QP 0x5C
#define DSC_RC_MODEL_SIZE 0x60
#define DSC_RC_CONFIG 0x64
#define DSC_RC_BUF_THRESH_0 0x68
#define DSC_RC_BUF_THRESH_1 0x6C
#define DSC_RC_BUF_THRESH_2 0x70
#define DSC_RC_BUF_THRESH_3 0x74
#define DSC_RC_MIN_QP_0 0x78
#define DSC_RC_MIN_QP_1 0x7C
#define DSC_RC_MIN_QP_2 0x80
#define DSC_RC_MAX_QP_0 0x84
#define DSC_RC_MAX_QP_1 0x88
#define DSC_RC_MAX_QP_2 0x8C
#define DSC_RC_RANGE_BPG_OFFSETS_0 0x90
#define DSC_RC_RANGE_BPG_OFFSETS_1 0x94
#define DSC_RC_RANGE_BPG_OFFSETS_2 0x98
/* DPU_DSC_CTL register offsets */
#define DSC_CTL 0x00
#define DSC_CFG 0x04
#define DSC_DATA_IN_SWAP 0x08
#define DSC_CLK_CTRL 0x0C
static int _dsc_calc_output_buf_max_addr(struct dpu_hw_dsc *hw_dsc, int num_softslice)
{
int max_addr = 2400 / num_softslice;
if (hw_dsc->caps->features & BIT(DPU_DSC_NATIVE_42x_EN))
max_addr /= 2;
return max_addr - 1;
};
static void dpu_hw_dsc_disable_1_2(struct dpu_hw_dsc *hw_dsc)
{
struct dpu_hw_blk_reg_map *hw;
const struct dpu_dsc_sub_blks *sblk;
if (!hw_dsc)
return;
hw = &hw_dsc->hw;
sblk = hw_dsc->caps->sblk;
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CFG, 0);
DPU_REG_WRITE(hw, sblk->enc.base + ENC_DF_CTRL, 0);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_MAIN_CONF, 0);
}
static void dpu_hw_dsc_config_1_2(struct dpu_hw_dsc *hw_dsc,
struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines)
{
struct dpu_hw_blk_reg_map *hw;
const struct dpu_dsc_sub_blks *sblk;
u32 data = 0;
u32 det_thresh_flatness;
u32 num_active_slice_per_enc;
u32 bpp;
if (!hw_dsc || !dsc)
return;
hw = &hw_dsc->hw;
sblk = hw_dsc->caps->sblk;
if (mode & DSC_MODE_SPLIT_PANEL)
data |= BIT(0);
if (mode & DSC_MODE_MULTIPLEX)
data |= BIT(1);
num_active_slice_per_enc = dsc->slice_count;
if (mode & DSC_MODE_MULTIPLEX)
num_active_slice_per_enc = dsc->slice_count / 2;
data |= (num_active_slice_per_enc & 0x3) << 7;
DPU_REG_WRITE(hw, DSC_CMN_MAIN_CNF, data);
data = (initial_lines & 0xff);
if (mode & DSC_MODE_VIDEO)
data |= BIT(9);
data |= (_dsc_calc_output_buf_max_addr(hw_dsc, num_active_slice_per_enc) << 18);
DPU_REG_WRITE(hw, sblk->enc.base + ENC_DF_CTRL, data);
data = (dsc->dsc_version_minor & 0xf) << 28;
if (dsc->dsc_version_minor == 0x2) {
if (dsc->native_422)
data |= BIT(22);
if (dsc->native_420)
data |= BIT(21);
}
bpp = dsc->bits_per_pixel;
/* as per hw requirement bpp should be programmed
* twice the actual value in case of 420 or 422 encoding
*/
if (dsc->native_422 || dsc->native_420)
bpp = 2 * bpp;
data |= bpp << 10;
if (dsc->block_pred_enable)
data |= BIT(20);
if (dsc->convert_rgb)
data |= BIT(4);
data |= (dsc->line_buf_depth & 0xf) << 6;
data |= dsc->bits_per_component & 0xf;
DPU_REG_WRITE(hw, sblk->enc.base + DSC_MAIN_CONF, data);
data = (dsc->pic_width & 0xffff) |
((dsc->pic_height & 0xffff) << 16);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_PICTURE_SIZE, data);
data = (dsc->slice_width & 0xffff) |
((dsc->slice_height & 0xffff) << 16);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_SLICE_SIZE, data);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_MISC_SIZE,
(dsc->slice_chunk_size) & 0xffff);
data = (dsc->initial_xmit_delay & 0xffff) |
((dsc->initial_dec_delay & 0xffff) << 16);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_HRD_DELAYS, data);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_SCALE,
dsc->initial_scale_value & 0x3f);
data = (dsc->scale_increment_interval & 0xffff) |
((dsc->scale_decrement_interval & 0x7ff) << 16);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_SCALE_INC_DEC, data);
data = (dsc->first_line_bpg_offset & 0x1f) |
((dsc->second_line_bpg_offset & 0x1f) << 5);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_1, data);
data = (dsc->nfl_bpg_offset & 0xffff) |
((dsc->slice_bpg_offset & 0xffff) << 16);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_2, data);
data = (dsc->initial_offset & 0xffff) |
((dsc->final_offset & 0xffff) << 16);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_3, data);
data = (dsc->nsl_bpg_offset & 0xffff) |
((dsc->second_line_offset_adj & 0xffff) << 16);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_4, data);
det_thresh_flatness = drm_dsc_flatness_det_thresh(dsc);
data = (dsc->flatness_min_qp & 0x1f) |
((dsc->flatness_max_qp & 0x1f) << 5) |
((det_thresh_flatness & 0xff) << 10);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_FLATNESS_QP, data);
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MODEL_SIZE,
(dsc->rc_model_size) & 0xffff);
data = dsc->rc_edge_factor & 0xf;
data |= (dsc->rc_quant_incr_limit0 & 0x1f) << 8;
data |= (dsc->rc_quant_incr_limit1 & 0x1f) << 13;
data |= (dsc->rc_tgt_offset_high & 0xf) << 20;
data |= (dsc->rc_tgt_offset_low & 0xf) << 24;
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_CONFIG, data);
/* program the dsc wrapper */
data = BIT(0); /* encoder enable */
if (dsc->native_422)
data |= BIT(8);
else if (dsc->native_420)
data |= BIT(9);
if (!dsc->convert_rgb)
data |= BIT(10);
if (dsc->bits_per_component == 8)
data |= BIT(11);
if (mode & DSC_MODE_SPLIT_PANEL)
data |= BIT(12);
if (mode & DSC_MODE_MULTIPLEX)
data |= BIT(13);
if (!(mode & DSC_MODE_VIDEO))
data |= BIT(17);
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CFG, data);
}
static void dpu_hw_dsc_config_thresh_1_2(struct dpu_hw_dsc *hw_dsc,
struct drm_dsc_config *dsc)
{
struct dpu_hw_blk_reg_map *hw;
const struct dpu_dsc_sub_blks *sblk;
struct drm_dsc_rc_range_parameters *rc;
if (!hw_dsc || !dsc)
return;
hw = &hw_dsc->hw;
sblk = hw_dsc->caps->sblk;
rc = dsc->rc_range_params;
/*
* With BUF_THRESH -- 14 in total
* each register contains 4 thresh values with the last register
* containing only 2 thresh values
*/
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_0,
(dsc->rc_buf_thresh[0] << 0) |
(dsc->rc_buf_thresh[1] << 8) |
(dsc->rc_buf_thresh[2] << 16) |
(dsc->rc_buf_thresh[3] << 24));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_1,
(dsc->rc_buf_thresh[4] << 0) |
(dsc->rc_buf_thresh[5] << 8) |
(dsc->rc_buf_thresh[6] << 16) |
(dsc->rc_buf_thresh[7] << 24));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_2,
(dsc->rc_buf_thresh[8] << 0) |
(dsc->rc_buf_thresh[9] << 8) |
(dsc->rc_buf_thresh[10] << 16) |
(dsc->rc_buf_thresh[11] << 24));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_3,
(dsc->rc_buf_thresh[12] << 0) |
(dsc->rc_buf_thresh[13] << 8));
/*
* with min/max_QP -- 5 bits
* each register contains 5 min_qp or max_qp for total of 15
*
* With BPG_OFFSET -- 6 bits
* each register contains 5 BPG_offset for total of 15
*/
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_0,
(rc[0].range_min_qp << 0) |
(rc[1].range_min_qp << 5) |
(rc[2].range_min_qp << 10) |
(rc[3].range_min_qp << 15) |
(rc[4].range_min_qp << 20));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_0,
(rc[0].range_max_qp << 0) |
(rc[1].range_max_qp << 5) |
(rc[2].range_max_qp << 10) |
(rc[3].range_max_qp << 15) |
(rc[4].range_max_qp << 20));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_0,
(rc[0].range_bpg_offset << 0) |
(rc[1].range_bpg_offset << 6) |
(rc[2].range_bpg_offset << 12) |
(rc[3].range_bpg_offset << 18) |
(rc[4].range_bpg_offset << 24));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_1,
(rc[5].range_min_qp << 0) |
(rc[6].range_min_qp << 5) |
(rc[7].range_min_qp << 10) |
(rc[8].range_min_qp << 15) |
(rc[9].range_min_qp << 20));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_1,
(rc[5].range_max_qp << 0) |
(rc[6].range_max_qp << 5) |
(rc[7].range_max_qp << 10) |
(rc[8].range_max_qp << 15) |
(rc[9].range_max_qp << 20));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_1,
(rc[5].range_bpg_offset << 0) |
(rc[6].range_bpg_offset << 6) |
(rc[7].range_bpg_offset << 12) |
(rc[8].range_bpg_offset << 18) |
(rc[9].range_bpg_offset << 24));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_2,
(rc[10].range_min_qp << 0) |
(rc[11].range_min_qp << 5) |
(rc[12].range_min_qp << 10) |
(rc[13].range_min_qp << 15) |
(rc[14].range_min_qp << 20));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_2,
(rc[10].range_max_qp << 0) |
(rc[11].range_max_qp << 5) |
(rc[12].range_max_qp << 10) |
(rc[13].range_max_qp << 15) |
(rc[14].range_max_qp << 20));
DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_2,
(rc[10].range_bpg_offset << 0) |
(rc[11].range_bpg_offset << 6) |
(rc[12].range_bpg_offset << 12) |
(rc[13].range_bpg_offset << 18) |
(rc[14].range_bpg_offset << 24));
}
static void dpu_hw_dsc_bind_pingpong_blk_1_2(struct dpu_hw_dsc *hw_dsc,
const enum dpu_pingpong pp)
{
struct dpu_hw_blk_reg_map *hw;
const struct dpu_dsc_sub_blks *sblk;
int mux_cfg = 0xf; /* Disabled */
hw = &hw_dsc->hw;
sblk = hw_dsc->caps->sblk;
if (pp)
mux_cfg = (pp - PINGPONG_0) & 0x7;
DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CTL, mux_cfg);
}
static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
const unsigned long features)
{
ops->dsc_disable = dpu_hw_dsc_disable_1_2;
ops->dsc_config = dpu_hw_dsc_config_1_2;
ops->dsc_config_thresh = dpu_hw_dsc_config_thresh_1_2;
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2;
}
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dsc *c;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_DSC;
c->idx = cfg->id;
c->caps = cfg;
_setup_dcs_ops_1_2(&c->ops, c->caps->features);
return c;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <drm/drm_edid.h>
#include "dpu_writeback.h"
static int dpu_wb_conn_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
/*
* We should ideally be limiting the modes only to the maxlinewidth but
* on some chipsets this will allow even 4k modes to be added which will
* fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
* and source split support added lets limit the modes based on max_mixer_width
* as 4K modes can then be supported.
*/
return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
dev->mode_config.max_height);
}
static const struct drm_connector_funcs dpu_wb_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int dpu_wb_conn_prepare_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
if (!job->fb)
return 0;
dpu_encoder_prepare_wb_job(dpu_wb_conn->wb_enc, job);
return 0;
}
static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
if (!job->fb)
return;
dpu_encoder_cleanup_wb_job(dpu_wb_conn->wb_enc, job);
}
static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = {
.get_modes = dpu_wb_conn_get_modes,
.prepare_writeback_job = dpu_wb_conn_prepare_job,
.cleanup_writeback_job = dpu_wb_conn_cleanup_job,
};
int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
const u32 *format_list, u32 num_formats)
{
struct dpu_wb_connector *dpu_wb_conn;
int rc = 0;
dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
if (!dpu_wb_conn)
return -ENOMEM;
drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
/* DPU initializes the encoder and sets it up completely for writeback
* cases and hence should use the new API drm_writeback_connector_init_with_encoder
* to initialize the writeback connector
*/
rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
&dpu_wb_conn_funcs, format_list, num_formats);
if (!rc)
dpu_wb_conn->wb_enc = enc;
return rc;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/sort.h>
#include <linux/debugfs.h>
#include <linux/ktime.h>
#include <linux/bits.h>
#include <drm/drm_atomic.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_mode.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
#include <drm/drm_self_refresh_helper.h>
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_dspp.h"
#include "dpu_crtc.h"
#include "dpu_plane.h"
#include "dpu_encoder.h"
#include "dpu_vbif.h"
#include "dpu_core_perf.h"
#include "dpu_trace.h"
/* layer mixer index on dpu_crtc */
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
/* timeout in ms waiting for frame done */
#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
#define CONVERT_S3_15(val) \
(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
return to_dpu_kms(priv->kms);
}
static void dpu_crtc_destroy(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
if (!crtc)
return;
drm_crtc_cleanup(crtc);
kfree(dpu_crtc);
}
static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc)
return encoder;
return NULL;
}
static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
{
if (!src_name ||
!strcmp(src_name, "none"))
return DPU_CRTC_CRC_SOURCE_NONE;
if (!strcmp(src_name, "auto") ||
!strcmp(src_name, "lm"))
return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
if (!strcmp(src_name, "encoder"))
return DPU_CRTC_CRC_SOURCE_ENCODER;
return DPU_CRTC_CRC_SOURCE_INVALID;
}
static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *src_name, size_t *values_cnt)
{
enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
if (source < 0) {
DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
return -EINVAL;
}
if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) {
*values_cnt = crtc_state->num_mixers;
} else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) {
struct drm_encoder *drm_enc;
*values_cnt = 0;
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
*values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc);
}
return 0;
}
static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
{
struct dpu_crtc_mixer *m;
int i;
for (i = 0; i < crtc_state->num_mixers; ++i) {
m = &crtc_state->mixers[i];
if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
continue;
/* Calculate MISR over 1 frame */
m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
}
}
static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc)
{
struct drm_encoder *drm_enc;
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_setup_misr(drm_enc);
}
static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
enum dpu_crtc_crc_source current_source;
struct dpu_crtc_state *crtc_state;
struct drm_device *drm_dev = crtc->dev;
bool was_enabled;
bool enable = false;
int ret = 0;
if (source < 0) {
DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
return -EINVAL;
}
ret = drm_modeset_lock(&crtc->mutex, NULL);
if (ret)
return ret;
enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
crtc_state = to_dpu_crtc_state(crtc->state);
spin_lock_irq(&drm_dev->event_lock);
current_source = crtc_state->crc_source;
spin_unlock_irq(&drm_dev->event_lock);
was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
if (!was_enabled && enable) {
ret = drm_crtc_vblank_get(crtc);
if (ret)
goto cleanup;
} else if (was_enabled && !enable) {
drm_crtc_vblank_put(crtc);
}
spin_lock_irq(&drm_dev->event_lock);
crtc_state->crc_source = source;
spin_unlock_irq(&drm_dev->event_lock);
crtc_state->crc_frame_skip_count = 0;
if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
dpu_crtc_setup_lm_misr(crtc_state);
else if (source == DPU_CRTC_CRC_SOURCE_ENCODER)
dpu_crtc_setup_encoder_misr(crtc);
else
ret = -EINVAL;
cleanup:
drm_modeset_unlock(&crtc->mutex);
return ret;
}
static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
{
struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
if (!encoder) {
DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
return 0;
}
return dpu_encoder_get_vsync_count(encoder);
}
static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
struct dpu_crtc_state *crtc_state)
{
struct dpu_crtc_mixer *m;
u32 crcs[CRTC_DUAL_MIXERS];
int rc = 0;
int i;
BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
for (i = 0; i < crtc_state->num_mixers; ++i) {
m = &crtc_state->mixers[i];
if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
continue;
rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
if (rc) {
if (rc != -ENODATA)
DRM_DEBUG_DRIVER("MISR read failed\n");
return rc;
}
}
return drm_crtc_add_crc_entry(crtc, true,
drm_crtc_accurate_vblank_count(crtc), crcs);
}
static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc)
{
struct drm_encoder *drm_enc;
int rc, pos = 0;
u32 crcs[INTF_MAX];
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) {
rc = dpu_encoder_get_crc(drm_enc, crcs, pos);
if (rc < 0) {
if (rc != -ENODATA)
DRM_DEBUG_DRIVER("MISR read failed\n");
return rc;
}
pos += rc;
}
return drm_crtc_add_crc_entry(crtc, true,
drm_crtc_accurate_vblank_count(crtc), crcs);
}
static int dpu_crtc_get_crc(struct drm_crtc *crtc)
{
struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
/* Skip first 2 frames in case of "uncooked" CRCs */
if (crtc_state->crc_frame_skip_count < 2) {
crtc_state->crc_frame_skip_count++;
return 0;
}
if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
return dpu_crtc_get_lm_crc(crtc, crtc_state);
else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER)
return dpu_crtc_get_encoder_crc(crtc);
return -EINVAL;
}
static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
unsigned int pipe = crtc->index;
struct drm_encoder *encoder;
int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
encoder = get_encoder_from_crtc(crtc);
if (!encoder) {
DRM_ERROR("no encoder found for crtc %d\n", pipe);
return false;
}
vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
/*
* the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
* the end of VFP. Translate the porch values relative to the line
* counter positions.
*/
vactive_start = vsw + vbp + 1;
vactive_end = vactive_start + mode->crtc_vdisplay;
/* last scan line before VSYNC */
vfp_end = mode->crtc_vtotal;
if (stime)
*stime = ktime_get();
line = dpu_encoder_get_linecount(encoder);
if (line < vactive_start)
line -= vactive_start;
else if (line > vactive_end)
line = line - vfp_end - vactive_start;
else
line -= vactive_start;
*vpos = line;
*hpos = 0;
if (etime)
*etime = ktime_get();
return true;
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
struct dpu_plane_state *pstate, struct dpu_format *format)
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
uint32_t blend_op;
uint32_t fg_alpha, bg_alpha;
fg_alpha = pstate->base.alpha >> 8;
bg_alpha = 0xff - fg_alpha;
/* default to opaque blending */
if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
!format->alpha_enable) {
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
DPU_BLEND_BG_ALPHA_BG_CONST;
} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
if (fg_alpha != 0xff) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_BG_MOD_ALPHA |
DPU_BLEND_BG_INV_MOD_ALPHA;
} else {
blend_op |= DPU_BLEND_BG_INV_ALPHA;
}
} else {
/* coverage blending */
blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
if (fg_alpha != 0xff) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_FG_MOD_ALPHA |
DPU_BLEND_FG_INV_MOD_ALPHA |
DPU_BLEND_BG_MOD_ALPHA |
DPU_BLEND_BG_INV_MOD_ALPHA;
} else {
blend_op |= DPU_BLEND_BG_INV_ALPHA;
}
}
lm->ops.setup_blend_config(lm, pstate->stage,
fg_alpha, bg_alpha, blend_op);
DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
&format->base.pixel_format, format->alpha_enable, blend_op);
}
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
{
struct dpu_crtc_state *crtc_state;
int lm_idx, lm_horiz_position;
crtc_state = to_dpu_crtc_state(crtc->state);
lm_horiz_position = 0;
for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
struct dpu_hw_mixer_cfg cfg;
if (!lm_roi || !drm_rect_visible(lm_roi))
continue;
cfg.out_width = drm_rect_width(lm_roi);
cfg.out_height = drm_rect_height(lm_roi);
cfg.right_mixer = lm_horiz_position++;
cfg.flags = 0;
hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
}
}
static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
struct drm_plane *plane,
struct dpu_crtc_mixer *mixer,
u32 num_mixers,
enum dpu_stage stage,
struct dpu_format *format,
uint64_t modifier,
struct dpu_sw_pipe *pipe,
unsigned int stage_idx,
struct dpu_hw_stage_cfg *stage_cfg
)
{
uint32_t lm_idx;
enum dpu_sspp sspp_idx;
struct drm_plane_state *state;
sspp_idx = pipe->sspp->idx;
state = plane->state;
trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
state, to_dpu_plane_state(state), stage_idx,
format->base.pixel_format,
modifier);
DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n",
crtc->base.id,
stage,
plane->base.id,
sspp_idx - SSPP_NONE,
state->fb ? state->fb->base.id : -1,
pipe->multirect_index);
stage_cfg->stage[stage][stage_idx] = sspp_idx;
stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index;
/* blend config update */
for (lm_idx = 0; lm_idx < num_mixers; lm_idx++)
mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx);
}
static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
struct dpu_hw_stage_cfg *stage_cfg)
{
struct drm_plane *plane;
struct drm_framebuffer *fb;
struct drm_plane_state *state;
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_plane_state *pstate = NULL;
struct dpu_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
uint32_t lm_idx;
bool bg_alpha_enable = false;
DECLARE_BITMAP(fetch_active, SSPP_MAX);
memset(fetch_active, 0, sizeof(fetch_active));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
continue;
if (!state->visible)
continue;
pstate = to_dpu_plane_state(state);
fb = state->fb;
format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
set_bit(pstate->pipe.sspp->idx, fetch_active);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
format, fb ? fb->modifier : 0,
&pstate->pipe, 0, stage_cfg);
if (pstate->r_pipe.sspp) {
set_bit(pstate->r_pipe.sspp->idx, fetch_active);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
format, fb ? fb->modifier : 0,
&pstate->r_pipe, 1, stage_cfg);
}
/* blend config update */
for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
_dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format);
if (bg_alpha_enable && !format->alpha_enable)
mixer[lm_idx].mixer_op_mode = 0;
else
mixer[lm_idx].mixer_op_mode |=
1 << pstate->stage;
}
}
if (ctl->ops.set_active_pipes)
ctl->ops.set_active_pipes(ctl, fetch_active);
_dpu_crtc_program_lm_output_roi(crtc);
}
/**
* _dpu_crtc_blend_setup - configure crtc mixers
* @crtc: Pointer to drm crtc structure
*/
static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
struct dpu_hw_stage_cfg stage_cfg;
int i;
DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
for (i = 0; i < cstate->num_mixers; i++) {
mixer[i].mixer_op_mode = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
}
/* initialize stage cfg */
memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
for (i = 0; i < cstate->num_mixers; i++) {
ctl = mixer[i].lm_ctl;
lm = mixer[i].hw_lm;
lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
/* stage config flush mask */
ctl->ops.update_pending_flush_mixer(ctl,
mixer[i].hw_lm->idx);
DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
ctl->idx - CTL_0);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&stage_cfg);
}
}
/**
* _dpu_crtc_complete_flip - signal pending page_flip events
* Any pending vblank events are added to the vblank_event_list
* so that the next vblank interrupt shall signal them.
* However PAGE_FLIP events are not handled through the vblank_event_list.
* This API signals any pending PAGE_FLIP events requested through
* DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
* @crtc: Pointer to drm crtc structure
*/
static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (dpu_crtc->event) {
DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
dpu_crtc->event);
trace_dpu_crtc_complete_flip(DRMID(crtc));
drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
dpu_crtc->event = NULL;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
/*
* TODO: This function is called from dpu debugfs and as part of atomic
* check. When called from debugfs, the crtc->mutex must be held to
* read crtc->state. However reading crtc->state from atomic check isn't
* allowed (unless you have a good reason, a big comment, and a deep
* understanding of how the atomic/modeset locks work (<- and this is
* probably not possible)). So we'll keep the WARN_ON here for now, but
* really we need to figure out a better way to track our operating mode
*/
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
/* TODO: Returns the first INTF_MODE, could there be multiple values? */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
return dpu_encoder_get_intf_mode(encoder);
return INTF_MODE_NONE;
}
void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
/* keep statistics on vblank callback - with auto reset via debugfs */
if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
dpu_crtc->vblank_cb_time = ktime_get();
else
dpu_crtc->vblank_cb_count++;
dpu_crtc_get_crc(crtc);
drm_crtc_handle_vblank(crtc);
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
struct dpu_crtc_frame_event *fevent = container_of(work,
struct dpu_crtc_frame_event, work);
struct drm_crtc *crtc = fevent->crtc;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
unsigned long flags;
bool frame_done = false;
DPU_ATRACE_BEGIN("crtc_frame_event");
DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
ktime_to_ns(fevent->ts));
if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
| DPU_ENCODER_FRAME_EVENT_ERROR
| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
if (atomic_read(&dpu_crtc->frame_pending) < 1) {
/* ignore vblank when not pending */
} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
/* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
fevent->event);
dpu_core_perf_crtc_release_bw(crtc);
} else {
trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
fevent->event);
}
if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
| DPU_ENCODER_FRAME_EVENT_ERROR))
frame_done = true;
}
if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
crtc->base.id, ktime_to_ns(fevent->ts));
if (frame_done)
complete_all(&dpu_crtc->frame_done_comp);
spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
DPU_ATRACE_END("crtc_frame_event");
}
/*
* dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
* registers this API to encoder for all frame event callbacks like
* frame_error, frame_done, idle_timeout, etc. Encoder may call different events
* from different context - IRQ, user thread, commit_thread, etc. Each event
* should be carefully reviewed and should be processed in proper task context
* to avoid schedulin delay or properly manage the irq context's bottom half
* processing.
*/
static void dpu_crtc_frame_event_cb(void *data, u32 event)
{
struct drm_crtc *crtc = (struct drm_crtc *)data;
struct dpu_crtc *dpu_crtc;
struct msm_drm_private *priv;
struct dpu_crtc_frame_event *fevent;
unsigned long flags;
u32 crtc_id;
/* Nothing to do on idle event */
if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
return;
dpu_crtc = to_dpu_crtc(crtc);
priv = crtc->dev->dev_private;
crtc_id = drm_crtc_index(crtc);
trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
struct dpu_crtc_frame_event, list);
if (fevent)
list_del_init(&fevent->list);
spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
if (!fevent) {
DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
return;
}
fevent->event = event;
fevent->crtc = crtc;
fevent->ts = ktime_get();
kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
}
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
trace_dpu_crtc_complete_commit(DRMID(crtc));
dpu_core_perf_crtc_update(crtc, 0);
_dpu_crtc_complete_flip(crtc);
}
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct drm_display_mode *adj_mode = &state->adjusted_mode;
u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
int i;
for (i = 0; i < cstate->num_mixers; i++) {
struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
r->y1 = 0;
r->x2 = r->x1 + crtc_split_width;
r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
}
}
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
struct dpu_hw_pcc_cfg *cfg)
{
struct drm_color_ctm *ctm;
memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
ctm = (struct drm_color_ctm *)state->ctm->data;
if (!ctm)
return;
cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
}
static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
{
struct drm_crtc_state *state = crtc->state;
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_pcc_cfg cfg;
struct dpu_hw_ctl *ctl;
struct dpu_hw_dspp *dspp;
int i;
if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state))
return;
for (i = 0; i < cstate->num_mixers; i++) {
ctl = mixer[i].lm_ctl;
dspp = mixer[i].hw_dspp;
if (!dspp || !dspp->ops.setup_pcc)
continue;
if (!state->ctm) {
dspp->ops.setup_pcc(dspp, NULL);
} else {
_dpu_crtc_get_pcc_coeff(state, &cfg);
dspp->ops.setup_pcc(dspp, &cfg);
}
/* stage config flush mask */
ctl->ops.update_pending_flush_dspp(ctl,
mixer[i].hw_dspp->idx, DPU_DSPP_PCC);
}
}
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
if (!crtc->state->enable) {
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
crtc->base.id, crtc->state->enable);
return;
}
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
/* encoder will trigger pending mask now */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
/*
* If no mixers have been allocated in dpu_crtc_atomic_check(),
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
if (unlikely(!cstate->num_mixers))
return;
_dpu_crtc_blend_setup(crtc);
_dpu_crtc_setup_cp_blocks(crtc);
/*
* PP_DONE irq is only used by command mode for now.
* It is better to request pending before FLUSH and START trigger
* to make sure no pp_done irq missed.
* This is safe because no pp_done will happen before SW trigger
* in command mode.
*/
}
static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct dpu_crtc *dpu_crtc;
struct drm_device *dev;
struct drm_plane *plane;
struct msm_drm_private *priv;
unsigned long flags;
struct dpu_crtc_state *cstate;
if (!crtc->state->enable) {
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
crtc->base.id, crtc->state->enable);
return;
}
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
dev = crtc->dev;
priv = dev->dev_private;
if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
return;
}
WARN_ON(dpu_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
dpu_crtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
/*
* If no mixers has been allocated in dpu_crtc_atomic_check(),
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
if (unlikely(!cstate->num_mixers))
return;
/* update performance setting before crtc kickoff */
dpu_core_perf_crtc_update(crtc, 1);
/*
* Final plane updates: Give each plane a chance to complete all
* required writes/flushing before crtc's "flush
* everything" call below.
*/
drm_atomic_crtc_for_each_plane(plane, crtc) {
if (dpu_crtc->smmu_state.transition_error)
dpu_plane_set_error(plane, true);
dpu_plane_flush(plane);
}
/* Kickoff will be scheduled by outer layer */
}
/**
* dpu_crtc_destroy_state - state destroy hook
* @crtc: drm CRTC
* @state: CRTC state object to release
*/
static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
__drm_atomic_helper_crtc_destroy_state(state);
kfree(cstate);
}
static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
int ret, rc = 0;
if (!atomic_read(&dpu_crtc->frame_pending)) {
DRM_DEBUG_ATOMIC("no frames pending\n");
return 0;
}
DPU_ATRACE_BEGIN("frame done completion wait");
ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
if (!ret) {
DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
rc = -ETIMEDOUT;
}
DPU_ATRACE_END("frame done completion wait");
return rc;
}
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
/*
* If no mixers has been allocated in dpu_crtc_atomic_check(),
* it means we are trying to start a CRTC whose state is disabled:
* nothing else needs to be done.
*/
if (unlikely(!cstate->num_mixers))
return;
DPU_ATRACE_BEGIN("crtc_commit");
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) {
if (!dpu_encoder_is_valid_for_commit(encoder)) {
DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
goto end;
}
}
/*
* Encoder will flush/start now, unless it has a tx pending. If so, it
* may delay and flush at an irq event (e.g. ppdone)
*/
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask)
dpu_encoder_prepare_for_kickoff(encoder);
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
} else
DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
dpu_crtc->play_count++;
dpu_vbif_clear_errors(dpu_kms);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_kickoff(encoder);
reinit_completion(&dpu_crtc->frame_done_comp);
end:
DPU_ATRACE_END("crtc_commit");
}
static void dpu_crtc_reset(struct drm_crtc *crtc)
{
struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (crtc->state)
dpu_crtc_destroy_state(crtc, crtc->state);
if (cstate)
__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
else
__drm_atomic_helper_crtc_reset(crtc, NULL);
}
/**
* dpu_crtc_duplicate_state - state duplicate hook
* @crtc: Pointer to drm crtc structure
*/
static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
if (!cstate) {
DPU_ERROR("failed to allocate state\n");
return NULL;
}
/* duplicate base helper */
__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
return &cstate->base;
}
static void dpu_crtc_atomic_print_state(struct drm_printer *p,
const struct drm_crtc_state *state)
{
const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
int i;
for (i = 0; i < cstate->num_mixers; i++) {
drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
if (cstate->mixers[i].hw_dspp)
drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
}
}
static void dpu_crtc_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
unsigned long flags;
bool release_bandwidth = false;
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
/* If disable is triggered while in self refresh mode,
* reset the encoder software state so that in enable
* it won't trigger a warn while assigning crtc.
*/
if (old_crtc_state->self_refresh_active) {
drm_for_each_encoder_mask(encoder, crtc->dev,
old_crtc_state->encoder_mask) {
dpu_encoder_assign_crtc(encoder, NULL);
}
return;
}
/* Disable/save vblank irq handling */
drm_crtc_vblank_off(crtc);
drm_for_each_encoder_mask(encoder, crtc->dev,
old_crtc_state->encoder_mask) {
/* in video mode, we hold an extra bandwidth reference
* as we cannot drop bandwidth at frame-done if any
* crtc is being used in video mode.
*/
if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
release_bandwidth = true;
/*
* If disable is triggered during psr active(e.g: screen dim in PSR),
* we will need encoder->crtc connection to process the device sleep &
* preserve it during psr sequence.
*/
if (!crtc->state->self_refresh_active)
dpu_encoder_assign_crtc(encoder, NULL);
}
/* wait for frame_event_done completion */
if (_dpu_crtc_wait_for_frame_done(crtc))
DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
crtc->base.id,
atomic_read(&dpu_crtc->frame_pending));
trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
dpu_crtc->enabled = false;
if (atomic_read(&dpu_crtc->frame_pending)) {
trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
atomic_read(&dpu_crtc->frame_pending));
if (release_bandwidth)
dpu_core_perf_crtc_release_bw(crtc);
atomic_set(&dpu_crtc->frame_pending, 0);
}
dpu_core_perf_crtc_update(crtc, 0);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
memset(cstate->mixers, 0, sizeof(cstate->mixers));
cstate->num_mixers = 0;
/* disable clk & bw control until clk & bw properties are set */
cstate->bw_control = false;
cstate->bw_split_vote = false;
if (crtc->state->event && !crtc->state->active) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
pm_runtime_put_sync(crtc->dev->dev);
}
static void dpu_crtc_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *encoder;
bool request_bandwidth = false;
struct drm_crtc_state *old_crtc_state;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
pm_runtime_get_sync(crtc->dev->dev);
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
/* in video mode, we hold an extra bandwidth reference
* as we cannot drop bandwidth at frame-done if any
* crtc is being used in video mode.
*/
if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
request_bandwidth = true;
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
}
if (request_bandwidth)
atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
dpu_crtc->enabled = true;
if (!old_crtc_state->self_refresh_active) {
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_assign_crtc(encoder, crtc);
}
/* Enable/restore vblank irq handling */
drm_crtc_vblank_on(crtc);
}
static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
{
struct drm_crtc *crtc = cstate->crtc;
struct drm_encoder *encoder;
if (cstate->self_refresh_active)
return true;
drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
return true;
}
}
return false;
}
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
const struct drm_plane_state *pstate;
struct drm_plane *plane;
int rc = 0;
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, crtc_state->enable,
crtc_state->active);
memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
return 0;
}
DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
/* force a full mode set if active state changed */
if (crtc_state->active_changed)
crtc_state->mode_changed = true;
if (cstate->num_mixers)
_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
/* FIXME: move this to dpu_plane_atomic_check? */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
if (IS_ERR_OR_NULL(pstate)) {
rc = PTR_ERR(pstate);
DPU_ERROR("%s: failed to get plane%d state, %d\n",
dpu_crtc->name, plane->base.id, rc);
return rc;
}
if (!pstate->visible)
continue;
dpu_pstate->needs_dirtyfb = needs_dirtyfb;
}
atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
rc = dpu_core_perf_crtc_check(crtc, crtc_state);
if (rc) {
DPU_ERROR("crtc%d failed performance check %d\n",
crtc->base.id, rc);
return rc;
}
return 0;
}
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *enc;
trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
/*
* Normally we would iterate through encoder_mask in crtc state to find
* attached encoders. In this case, we might be disabling vblank _after_
* encoder_mask has been cleared.
*
* Instead, we "assign" a crtc to the encoder in enable and clear it in
* disable (which is also after encoder_mask is cleared). So instead of
* using encoder mask, we'll ask the encoder to toggle itself iff it's
* currently assigned to our crtc.
*
* Note also that this function cannot be called while crtc is disabled
* since we use drm_crtc_vblank_on/off. So we don't need to worry
* about the assigned crtcs being inconsistent with the current state
* (which means no need to worry about modeset locks).
*/
list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
dpu_crtc);
dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
}
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
{
struct dpu_crtc *dpu_crtc;
struct dpu_plane_state *pstate = NULL;
struct dpu_crtc_mixer *m;
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
struct drm_plane_state *state;
struct dpu_crtc_state *cstate;
int i, out_width;
dpu_crtc = s->private;
crtc = &dpu_crtc->base;
drm_modeset_lock_all(crtc->dev);
cstate = to_dpu_crtc_state(crtc->state);
mode = &crtc->state->adjusted_mode;
out_width = mode->hdisplay / cstate->num_mixers;
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
seq_puts(s, "\n");
for (i = 0; i < cstate->num_mixers; ++i) {
m = &cstate->mixers[i];
seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
out_width, mode->vdisplay);
}
seq_puts(s, "\n");
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
state = plane->state;
if (!pstate || !state)
continue;
seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
pstate->stage);
if (plane->state->fb) {
fb = plane->state->fb;
seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
fb->base.id, (char *) &fb->format->format,
fb->width, fb->height);
for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
seq_printf(s, "cpp[%d]:%u ",
i, fb->format->cpp[i]);
seq_puts(s, "\n\t");
seq_printf(s, "modifier:%8llu ", fb->modifier);
seq_puts(s, "\n");
seq_puts(s, "\t");
for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
seq_printf(s, "pitches[%d]:%8u ", i,
fb->pitches[i]);
seq_puts(s, "\n");
seq_puts(s, "\t");
for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
seq_printf(s, "offsets[%d]:%8u ", i,
fb->offsets[i]);
seq_puts(s, "\n");
}
seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
state->src_x, state->src_y, state->src_w, state->src_h);
seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
state->crtc_x, state->crtc_y, state->crtc_w,
state->crtc_h);
seq_printf(s, "\tsspp[0]:%s\n",
pstate->pipe.sspp->cap->name);
seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n",
pstate->pipe.multirect_mode, pstate->pipe.multirect_index);
if (pstate->r_pipe.sspp) {
seq_printf(s, "\tsspp[1]:%s\n",
pstate->r_pipe.sspp->cap->name);
seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n",
pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index);
}
seq_puts(s, "\n");
}
if (dpu_crtc->vblank_cb_count) {
ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
s64 diff_ms = ktime_to_ms(diff);
s64 fps = diff_ms ? div_s64(
dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
seq_printf(s,
"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
fps, dpu_crtc->vblank_cb_count,
ktime_to_ms(diff), dpu_crtc->play_count);
/* reset time & count for next measurement */
dpu_crtc->vblank_cb_count = 0;
dpu_crtc->vblank_cb_time = ktime_set(0, 0);
}
drm_modeset_unlock_all(crtc->dev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = s->private;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
seq_printf(s, "max_per_pipe_ib: %llu\n",
dpu_crtc->cur_perf.max_per_pipe_ib);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
debugfs_create_file("status", 0400,
crtc->debugfs_entry,
dpu_crtc, &_dpu_debugfs_status_fops);
debugfs_create_file("state", 0600,
crtc->debugfs_entry,
&dpu_crtc->base,
&dpu_crtc_debugfs_state_fops);
return 0;
}
#else
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
return 0;
}
#endif /* CONFIG_DEBUG_FS */
static int dpu_crtc_late_register(struct drm_crtc *crtc)
{
return _dpu_crtc_init_debugfs(crtc);
}
static const struct drm_crtc_funcs dpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = dpu_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = dpu_crtc_reset,
.atomic_duplicate_state = dpu_crtc_duplicate_state,
.atomic_destroy_state = dpu_crtc_destroy_state,
.atomic_print_state = dpu_crtc_atomic_print_state,
.late_register = dpu_crtc_late_register,
.verify_crc_source = dpu_crtc_verify_crc_source,
.set_crc_source = dpu_crtc_set_crc_source,
.enable_vblank = msm_crtc_enable_vblank,
.disable_vblank = msm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.get_vblank_counter = dpu_crtc_get_vblank_counter,
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
.atomic_disable = dpu_crtc_disable,
.atomic_enable = dpu_crtc_enable,
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
.atomic_flush = dpu_crtc_atomic_flush,
.get_scanout_position = dpu_crtc_get_scanout_position,
};
/* initialize crtc */
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct drm_plane *cursor)
{
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_crtc *crtc = NULL;
struct dpu_crtc *dpu_crtc = NULL;
int i, ret;
dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
if (!dpu_crtc)
return ERR_PTR(-ENOMEM);
crtc = &dpu_crtc->base;
crtc->dev = dev;
spin_lock_init(&dpu_crtc->spin_lock);
atomic_set(&dpu_crtc->frame_pending, 0);
init_completion(&dpu_crtc->frame_done_comp);
INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
list_add(&dpu_crtc->frame_events[i].list,
&dpu_crtc->frame_event_list);
kthread_init_work(&dpu_crtc->frame_events[i].work,
dpu_crtc_frame_event_work);
}
drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
if (dpu_kms->catalog->dspp_count)
drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
/* initialize event handling */
spin_lock_init(&dpu_crtc->event_lock);
ret = drm_self_refresh_helper_init(crtc);
if (ret) {
DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n",
crtc->name, ret);
return ERR_PTR(ret);
}
DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
return crtc;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
#include <linux/iopoll.h>
#define INTF_TIMING_ENGINE_EN 0x000
#define INTF_CONFIG 0x004
#define INTF_HSYNC_CTL 0x008
#define INTF_VSYNC_PERIOD_F0 0x00C
#define INTF_VSYNC_PERIOD_F1 0x010
#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
#define INTF_DISPLAY_V_START_F0 0x01C
#define INTF_DISPLAY_V_START_F1 0x020
#define INTF_DISPLAY_V_END_F0 0x024
#define INTF_DISPLAY_V_END_F1 0x028
#define INTF_ACTIVE_V_START_F0 0x02C
#define INTF_ACTIVE_V_START_F1 0x030
#define INTF_ACTIVE_V_END_F0 0x034
#define INTF_ACTIVE_V_END_F1 0x038
#define INTF_DISPLAY_HCTL 0x03C
#define INTF_ACTIVE_HCTL 0x040
#define INTF_BORDER_COLOR 0x044
#define INTF_UNDERFLOW_COLOR 0x048
#define INTF_HSYNC_SKEW 0x04C
#define INTF_POLARITY_CTL 0x050
#define INTF_TEST_CTL 0x054
#define INTF_TP_COLOR0 0x058
#define INTF_TP_COLOR1 0x05C
#define INTF_CONFIG2 0x060
#define INTF_DISPLAY_DATA_HCTL 0x064
#define INTF_ACTIVE_DATA_HCTL 0x068
#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
#define INTF_PANEL_FORMAT 0x090
#define INTF_FRAME_LINE_COUNT_EN 0x0A8
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
#define INTF_DEFLICKER_CONFIG 0x0F0
#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
#define INTF_TPG_ENABLE 0x100
#define INTF_TPG_MAIN_CONTROL 0x104
#define INTF_TPG_VIDEO_CONFIG 0x108
#define INTF_TPG_COMPONENT_LIMITS 0x10C
#define INTF_TPG_RECTANGLE 0x110
#define INTF_TPG_INITIAL_VALUE 0x114
#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
#define INTF_TPG_RGB_MAPPING 0x11C
#define INTF_PROG_FETCH_START 0x170
#define INTF_PROG_ROT_START 0x174
#define INTF_MISR_CTRL 0x180
#define INTF_MISR_SIGNATURE 0x184
#define INTF_MUX 0x25C
#define INTF_STATUS 0x26C
#define INTF_AVR_CONTROL 0x270
#define INTF_AVR_MODE 0x274
#define INTF_AVR_TRIGGER 0x278
#define INTF_AVR_VTOTAL 0x27C
#define INTF_TEAR_MDP_VSYNC_SEL 0x280
#define INTF_TEAR_TEAR_CHECK_EN 0x284
#define INTF_TEAR_SYNC_CONFIG_VSYNC 0x288
#define INTF_TEAR_SYNC_CONFIG_HEIGHT 0x28C
#define INTF_TEAR_SYNC_WRCOUNT 0x290
#define INTF_TEAR_VSYNC_INIT_VAL 0x294
#define INTF_TEAR_INT_COUNT_VAL 0x298
#define INTF_TEAR_SYNC_THRESH 0x29C
#define INTF_TEAR_START_POS 0x2A0
#define INTF_TEAR_RD_PTR_IRQ 0x2A4
#define INTF_TEAR_WR_PTR_IRQ 0x2A8
#define INTF_TEAR_OUT_LINE_COUNT 0x2AC
#define INTF_TEAR_LINE_COUNT 0x2B0
#define INTF_TEAR_AUTOREFRESH_CONFIG 0x2B4
#define INTF_CFG_ACTIVE_H_EN BIT(29)
#define INTF_CFG_ACTIVE_V_EN BIT(30)
#define INTF_CFG2_DATABUS_WIDEN BIT(0)
#define INTF_CFG2_DATA_HCTL_EN BIT(4)
#define INTF_CFG2_DCE_DATA_COMPRESS BIT(12)
static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
const struct dpu_hw_intf_timing_params *p,
const struct dpu_format *fmt)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
u32 hsync_data_start_x, hsync_data_end_x;
u32 active_h_start, active_h_end;
u32 active_v_start, active_v_end;
u32 active_hctl, display_hctl, hsync_ctl;
u32 polarity_ctl, den_polarity;
u32 panel_format;
u32 intf_cfg, intf_cfg2 = 0;
u32 display_data_hctl = 0, active_data_hctl = 0;
u32 data_width;
bool dp_intf = false;
/* read interface_cfg */
intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
if (ctx->cap->type == INTF_DP)
dp_intf = true;
hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
p->h_front_porch;
vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
p->v_front_porch;
display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
hsync_period) + p->hsync_skew;
display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
p->hsync_skew - 1;
hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
hsync_end_x = hsync_period - p->h_front_porch - 1;
if (p->width != p->xres) { /* border fill added */
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
} else {
active_h_start = 0;
active_h_end = 0;
}
if (p->height != p->yres) { /* border fill added */
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
} else {
active_v_start = 0;
active_v_end = 0;
}
if (active_h_end) {
active_hctl = (active_h_end << 16) | active_h_start;
intf_cfg |= INTF_CFG_ACTIVE_H_EN;
} else {
active_hctl = 0;
}
if (active_v_end)
intf_cfg |= INTF_CFG_ACTIVE_V_EN;
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
/*
* DATA_HCTL_EN controls data timing which can be different from
* video timing. It is recommended to enable it for all cases, except
* if compression is enabled in 1 pixel per clock mode
*/
if (p->wide_bus_en)
intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
data_width = p->width;
hsync_data_start_x = hsync_start_x;
hsync_data_end_x = hsync_start_x + data_width - 1;
display_data_hctl = (hsync_data_end_x << 16) | hsync_data_start_x;
if (dp_intf) {
/* DP timing adjustment */
display_v_start += p->hsync_pulse_width + p->h_back_porch;
display_v_end -= p->h_front_porch;
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
active_hctl = (active_h_end << 16) | active_h_start;
display_hctl = active_hctl;
intf_cfg |= INTF_CFG_ACTIVE_H_EN | INTF_CFG_ACTIVE_V_EN;
}
den_polarity = 0;
polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
(p->vsync_polarity << 1) | /* VSYNC Polarity */
(p->hsync_polarity << 0); /* HSYNC Polarity */
if (!DPU_FORMAT_IS_YUV(fmt))
panel_format = (fmt->bits[C0_G_Y] |
(fmt->bits[C1_B_Cb] << 2) |
(fmt->bits[C2_R_Cr] << 4) |
(0x21 << 8));
else
/* Interface treats all the pixel data in RGB888 format */
panel_format = (COLOR_8BIT |
(COLOR_8BIT << 2) |
(COLOR_8BIT << 4) |
(0x21 << 8));
DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
p->vsync_pulse_width * hsync_period);
DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
}
}
static void dpu_hw_intf_enable_timing_engine(
struct dpu_hw_intf *intf,
u8 enable)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
/* Note: Display interface select is handled in top block hw layer */
DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
}
static void dpu_hw_intf_setup_prg_fetch(
struct dpu_hw_intf *intf,
const struct dpu_hw_intf_prog_fetch *fetch)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
int fetch_enable;
/*
* Fetch should always be outside the active lines. If the fetching
* is programmed within active region, hardware behavior is unknown.
*/
fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
if (fetch->enable) {
fetch_enable |= BIT(31);
DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
fetch->fetch_start);
} else {
fetch_enable &= ~BIT(31);
}
DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
}
static void dpu_hw_intf_bind_pingpong_blk(
struct dpu_hw_intf *intf,
const enum dpu_pingpong pp)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 mux_cfg;
mux_cfg = DPU_REG_READ(c, INTF_MUX);
mux_cfg &= ~0xf;
if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
DPU_REG_WRITE(c, INTF_MUX, mux_cfg);
}
static void dpu_hw_intf_get_status(
struct dpu_hw_intf *intf,
struct dpu_hw_intf_status *s)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
unsigned long cap = intf->cap->features;
if (cap & BIT(DPU_INTF_STATUS_SUPPORTED))
s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0);
else
s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31));
if (s->is_en) {
s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
} else {
s->line_count = 0;
s->frame_count = 0;
}
}
static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c;
if (!intf)
return 0;
c = &intf->hw;
return DPU_REG_READ(c, INTF_LINE_COUNT);
}
static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, bool enable, u32 frame_count)
{
dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, enable, frame_count);
}
static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
{
return dpu_hw_collect_misr(&intf->hw, INTF_MISR_CTRL, INTF_MISR_SIGNATURE, misr_value);
}
static int dpu_hw_intf_enable_te(struct dpu_hw_intf *intf,
struct dpu_hw_tear_check *te)
{
struct dpu_hw_blk_reg_map *c;
int cfg;
if (!intf)
return -EINVAL;
c = &intf->hw;
cfg = BIT(19); /* VSYNC_COUNTER_EN */
if (te->hw_vsync_mode)
cfg |= BIT(20);
cfg |= te->vsync_count;
DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
DPU_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val);
DPU_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq);
DPU_REG_WRITE(c, INTF_TEAR_START_POS, te->start_pos);
DPU_REG_WRITE(c, INTF_TEAR_SYNC_THRESH,
((te->sync_threshold_continue << 16) |
te->sync_threshold_start));
DPU_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
(te->start_pos + te->sync_threshold_start + 1));
DPU_REG_WRITE(c, INTF_TEAR_TEAR_CHECK_EN, 1);
return 0;
}
static void dpu_hw_intf_setup_autorefresh_config(struct dpu_hw_intf *intf,
u32 frame_count, bool enable)
{
struct dpu_hw_blk_reg_map *c;
u32 refresh_cfg;
c = &intf->hw;
refresh_cfg = DPU_REG_READ(c, INTF_TEAR_AUTOREFRESH_CONFIG);
if (enable)
refresh_cfg = BIT(31) | frame_count;
else
refresh_cfg &= ~BIT(31);
DPU_REG_WRITE(c, INTF_TEAR_AUTOREFRESH_CONFIG, refresh_cfg);
}
/*
* dpu_hw_intf_get_autorefresh_config - Get autorefresh config from HW
* @intf: DPU intf structure
* @frame_count: Used to return the current frame count from hw
*
* Returns: True if autorefresh enabled, false if disabled.
*/
static bool dpu_hw_intf_get_autorefresh_config(struct dpu_hw_intf *intf,
u32 *frame_count)
{
u32 val = DPU_REG_READ(&intf->hw, INTF_TEAR_AUTOREFRESH_CONFIG);
if (frame_count != NULL)
*frame_count = val & 0xffff;
return !!((val & BIT(31)) >> 31);
}
static int dpu_hw_intf_disable_te(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c;
if (!intf)
return -EINVAL;
c = &intf->hw;
DPU_REG_WRITE(c, INTF_TEAR_TEAR_CHECK_EN, 0);
return 0;
}
static int dpu_hw_intf_connect_external_te(struct dpu_hw_intf *intf,
bool enable_external_te)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 cfg;
int orig;
if (!intf)
return -EINVAL;
c = &intf->hw;
cfg = DPU_REG_READ(c, INTF_TEAR_SYNC_CONFIG_VSYNC);
orig = (bool)(cfg & BIT(20));
if (enable_external_te)
cfg |= BIT(20);
else
cfg &= ~BIT(20);
DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
trace_dpu_intf_connect_ext_te(intf->idx - INTF_0, cfg);
return orig;
}
static int dpu_hw_intf_get_vsync_info(struct dpu_hw_intf *intf,
struct dpu_hw_pp_vsync_info *info)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 val;
if (!intf || !info)
return -EINVAL;
c = &intf->hw;
val = DPU_REG_READ(c, INTF_TEAR_VSYNC_INIT_VAL);
info->rd_ptr_init_val = val & 0xffff;
val = DPU_REG_READ(c, INTF_TEAR_INT_COUNT_VAL);
info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
info->rd_ptr_line_count = val & 0xffff;
val = DPU_REG_READ(c, INTF_TEAR_LINE_COUNT);
info->wr_ptr_line_count = val & 0xffff;
val = DPU_REG_READ(c, INTF_FRAME_COUNT);
info->intf_frame_count = val;
return 0;
}
static void dpu_hw_intf_vsync_sel(struct dpu_hw_intf *intf,
u32 vsync_source)
{
struct dpu_hw_blk_reg_map *c;
if (!intf)
return;
c = &intf->hw;
DPU_REG_WRITE(c, INTF_TEAR_MDP_VSYNC_SEL, (vsync_source & 0xf));
}
static void dpu_hw_intf_disable_autorefresh(struct dpu_hw_intf *intf,
uint32_t encoder_id, u16 vdisplay)
{
struct dpu_hw_pp_vsync_info info;
int trial = 0;
/* If autorefresh is already disabled, we have nothing to do */
if (!dpu_hw_intf_get_autorefresh_config(intf, NULL))
return;
/*
* If autorefresh is enabled, disable it and make sure it is safe to
* proceed with current frame commit/push. Sequence followed is,
* 1. Disable TE
* 2. Disable autorefresh config
* 4. Poll for frame transfer ongoing to be false
* 5. Enable TE back
*/
dpu_hw_intf_connect_external_te(intf, false);
dpu_hw_intf_setup_autorefresh_config(intf, 0, false);
do {
udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
DPU_ERROR("enc%d intf%d disable autorefresh failed\n",
encoder_id, intf->idx - INTF_0);
break;
}
trial++;
dpu_hw_intf_get_vsync_info(intf, &info);
} while (info.wr_ptr_line_count > 0 &&
info.wr_ptr_line_count < vdisplay);
dpu_hw_intf_connect_external_te(intf, true);
DPU_DEBUG("enc%d intf%d disabled autorefresh\n",
encoder_id, intf->idx - INTF_0);
}
static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx,
struct dpu_hw_intf_cmd_mode_cfg *cmd_mode_cfg)
{
u32 intf_cfg2 = DPU_REG_READ(&ctx->hw, INTF_CONFIG2);
if (cmd_mode_cfg->data_compress)
intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
}
static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
unsigned long cap, const struct dpu_mdss_version *mdss_rev)
{
ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->get_line_count = dpu_hw_intf_get_line_count;
if (cap & BIT(DPU_INTF_INPUT_CTRL))
ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
ops->setup_misr = dpu_hw_intf_setup_misr;
ops->collect_misr = dpu_hw_intf_collect_misr;
if (cap & BIT(DPU_INTF_TE)) {
ops->enable_tearcheck = dpu_hw_intf_enable_te;
ops->disable_tearcheck = dpu_hw_intf_disable_te;
ops->connect_external_te = dpu_hw_intf_connect_external_te;
ops->vsync_sel = dpu_hw_intf_vsync_sel;
ops->disable_autorefresh = dpu_hw_intf_disable_autorefresh;
}
if (mdss_rev->core_major_ver >= 7)
ops->program_intf_cmd_cfg = dpu_hw_intf_program_intf_cmd_cfg;
}
struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_intf *c;
if (cfg->type == INTF_NONE) {
DPU_DEBUG("Skip intf %d with type NONE\n", cfg->id - INTF_0);
return NULL;
}
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_INTF;
/*
* Assign ops
*/
c->idx = cfg->id;
c->cap = cfg;
_setup_intf_ops(&c->ops, c->cap->features, mdss_rev);
return c;
}
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
{
kfree(intf);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_sspp.h"
#include "dpu_kms.h"
#include "msm_mdss.h"
#include <drm/drm_file.h>
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
/* SSPP registers */
#define SSPP_SRC_SIZE 0x00
#define SSPP_SRC_XY 0x08
#define SSPP_OUT_SIZE 0x0c
#define SSPP_OUT_XY 0x10
#define SSPP_SRC0_ADDR 0x14
#define SSPP_SRC1_ADDR 0x18
#define SSPP_SRC2_ADDR 0x1C
#define SSPP_SRC3_ADDR 0x20
#define SSPP_SRC_YSTRIDE0 0x24
#define SSPP_SRC_YSTRIDE1 0x28
#define SSPP_SRC_FORMAT 0x30
#define SSPP_SRC_UNPACK_PATTERN 0x34
#define SSPP_SRC_OP_MODE 0x38
#define SSPP_SRC_CONSTANT_COLOR 0x3c
#define SSPP_EXCL_REC_CTL 0x40
#define SSPP_UBWC_STATIC_CTRL 0x44
#define SSPP_FETCH_CONFIG 0x48
#define SSPP_DANGER_LUT 0x60
#define SSPP_SAFE_LUT 0x64
#define SSPP_CREQ_LUT 0x68
#define SSPP_QOS_CTRL 0x6C
#define SSPP_SRC_ADDR_SW_STATUS 0x70
#define SSPP_CREQ_LUT_0 0x74
#define SSPP_CREQ_LUT_1 0x78
#define SSPP_DECIMATION_CONFIG 0xB4
#define SSPP_SW_PIX_EXT_C0_LR 0x100
#define SSPP_SW_PIX_EXT_C0_TB 0x104
#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
#define SSPP_SW_PIX_EXT_C3_LR 0x120
#define SSPP_SW_PIX_EXT_C3_TB 0x124
#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
#define SSPP_TRAFFIC_SHAPER 0x130
#define SSPP_CDP_CNTL 0x134
#define SSPP_UBWC_ERROR_STATUS 0x138
#define SSPP_CDP_CNTL_REC1 0x13c
#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
#define SSPP_TRAFFIC_SHAPER_REC1 0x158
#define SSPP_OUT_SIZE_REC1 0x160
#define SSPP_OUT_XY_REC1 0x164
#define SSPP_SRC_XY_REC1 0x168
#define SSPP_SRC_SIZE_REC1 0x16C
#define SSPP_MULTIRECT_OPMODE 0x170
#define SSPP_SRC_FORMAT_REC1 0x174
#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
#define SSPP_SRC_OP_MODE_REC1 0x17C
#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
#define SSPP_EXCL_REC_SIZE_REC1 0x184
#define SSPP_EXCL_REC_XY_REC1 0x188
#define SSPP_EXCL_REC_SIZE 0x1B4
#define SSPP_EXCL_REC_XY 0x1B8
/* SSPP_SRC_OP_MODE & OP_MODE_REC1 */
#define MDSS_MDP_OP_DEINTERLACE BIT(22)
#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
#define MDSS_MDP_OP_IGC_EN BIT(16)
#define MDSS_MDP_OP_FLIP_UD BIT(14)
#define MDSS_MDP_OP_FLIP_LR BIT(13)
#define MDSS_MDP_OP_BWC_EN BIT(0)
#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
/* SSPP_QOS_CTRL */
#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
/* DPU_SSPP_SCALER_QSEED2 */
#define SSPP_VIG_OP_MODE 0x0
#define SCALE_CONFIG 0x04
#define COMP0_3_PHASE_STEP_X 0x10
#define COMP0_3_PHASE_STEP_Y 0x14
#define COMP1_2_PHASE_STEP_X 0x18
#define COMP1_2_PHASE_STEP_Y 0x1c
#define COMP0_3_INIT_PHASE_X 0x20
#define COMP0_3_INIT_PHASE_Y 0x24
#define COMP1_2_INIT_PHASE_X 0x28
#define COMP1_2_INIT_PHASE_Y 0x2C
#define VIG_0_QSEED2_SHARP 0x30
/* SSPP_TRAFFIC_SHAPER and _REC1 */
#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
/*
* Definitions for ViG op modes
*/
#define VIG_OP_CSC_DST_DATAFMT BIT(19)
#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
#define VIG_OP_CSC_EN BIT(17)
#define VIG_OP_MEM_PROT_CONT BIT(15)
#define VIG_OP_MEM_PROT_VAL BIT(14)
#define VIG_OP_MEM_PROT_SAT BIT(13)
#define VIG_OP_MEM_PROT_HUE BIT(12)
#define VIG_OP_HIST BIT(8)
#define VIG_OP_SKY_COL BIT(7)
#define VIG_OP_FOIL BIT(6)
#define VIG_OP_SKIN_COL BIT(5)
#define VIG_OP_PA_EN BIT(4)
#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
#define VIG_OP_MEM_PROT_BLEND BIT(1)
/*
* Definitions for CSC 10 op modes
*/
#define SSPP_VIG_CSC_10_OP_MODE 0x0
#define VIG_CSC_10_SRC_DATAFMT BIT(1)
#define VIG_CSC_10_EN BIT(0)
#define CSC_10BIT_OFFSET 4
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
static void dpu_hw_sspp_setup_multirect(struct dpu_sw_pipe *pipe)
{
struct dpu_hw_sspp *ctx = pipe->sspp;
u32 mode_mask;
if (!ctx)
return;
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO) {
/**
* if rect index is RECT_SOLO, we cannot expect a
* virtual plane sharing the same SSPP id. So we go
* and disable multirect
*/
mode_mask = 0;
} else {
mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE);
mode_mask |= pipe->multirect_index;
if (pipe->multirect_mode == DPU_SSPP_MULTIRECT_TIME_MX)
mode_mask |= BIT(2);
else
mode_mask &= ~BIT(2);
}
DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE, mode_mask);
}
static void _sspp_setup_opmode(struct dpu_hw_sspp *ctx,
u32 mask, u8 en)
{
const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
u32 opmode;
if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
!test_bit(DPU_SSPP_CSC, &ctx->cap->features))
return;
opmode = DPU_REG_READ(&ctx->hw, sblk->scaler_blk.base + SSPP_VIG_OP_MODE);
if (en)
opmode |= mask;
else
opmode &= ~mask;
DPU_REG_WRITE(&ctx->hw, sblk->scaler_blk.base + SSPP_VIG_OP_MODE, opmode);
}
static void _sspp_setup_csc10_opmode(struct dpu_hw_sspp *ctx,
u32 mask, u8 en)
{
const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
u32 opmode;
opmode = DPU_REG_READ(&ctx->hw, sblk->csc_blk.base + SSPP_VIG_CSC_10_OP_MODE);
if (en)
opmode |= mask;
else
opmode &= ~mask;
DPU_REG_WRITE(&ctx->hw, sblk->csc_blk.base + SSPP_VIG_CSC_10_OP_MODE, opmode);
}
/*
* Setup source pixel format, flip,
*/
static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
const struct dpu_format *fmt, u32 flags)
{
struct dpu_hw_sspp *ctx = pipe->sspp;
struct dpu_hw_blk_reg_map *c;
u32 chroma_samp, unpack, src_format;
u32 opmode = 0;
u32 fast_clear = 0;
u32 op_mode_off, unpack_pat_off, format_off;
if (!ctx || !fmt)
return;
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
pipe->multirect_index == DPU_SSPP_RECT_0) {
op_mode_off = SSPP_SRC_OP_MODE;
unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
format_off = SSPP_SRC_FORMAT;
} else {
op_mode_off = SSPP_SRC_OP_MODE_REC1;
unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
format_off = SSPP_SRC_FORMAT_REC1;
}
c = &ctx->hw;
opmode = DPU_REG_READ(c, op_mode_off);
opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
if (flags & DPU_SSPP_FLIP_LR)
opmode |= MDSS_MDP_OP_FLIP_LR;
if (flags & DPU_SSPP_FLIP_UD)
opmode |= MDSS_MDP_OP_FLIP_UD;
chroma_samp = fmt->chroma_sample;
if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
if (chroma_samp == DPU_CHROMA_H2V1)
chroma_samp = DPU_CHROMA_H1V2;
else if (chroma_samp == DPU_CHROMA_H1V2)
chroma_samp = DPU_CHROMA_H2V1;
}
src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
(fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
(fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
if (flags & DPU_SSPP_ROT_90)
src_format |= BIT(11); /* ROT90 */
if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
src_format |= BIT(8); /* SRCC3_EN */
if (flags & DPU_SSPP_SOLID_FILL)
src_format |= BIT(22);
unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
(fmt->element[1] << 8) | (fmt->element[0] << 0);
src_format |= ((fmt->unpack_count - 1) << 12) |
(fmt->unpack_tight << 17) |
(fmt->unpack_align_msb << 18) |
((fmt->bpp - 1) << 9);
if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
if (DPU_FORMAT_IS_UBWC(fmt))
opmode |= MDSS_MDP_OP_BWC_EN;
src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
DPU_FETCH_CONFIG_RESET_VALUE |
ctx->ubwc->highest_bank_bit << 18);
switch (ctx->ubwc->ubwc_enc_version) {
case UBWC_1_0:
fast_clear = fmt->alpha_enable ? BIT(31) : 0;
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
fast_clear | (ctx->ubwc->ubwc_swizzle & 0x1) |
BIT(8) |
(ctx->ubwc->highest_bank_bit << 4));
break;
case UBWC_2_0:
fast_clear = fmt->alpha_enable ? BIT(31) : 0;
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
fast_clear | (ctx->ubwc->ubwc_swizzle) |
(ctx->ubwc->highest_bank_bit << 4));
break;
case UBWC_3_0:
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
BIT(30) | (ctx->ubwc->ubwc_swizzle) |
(ctx->ubwc->highest_bank_bit << 4));
break;
case UBWC_4_0:
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
break;
}
}
opmode |= MDSS_MDP_OP_PE_OVERRIDE;
/* if this is YUV pixel format, enable CSC */
if (DPU_FORMAT_IS_YUV(fmt))
src_format |= BIT(15);
if (DPU_FORMAT_IS_DX(fmt))
src_format |= BIT(14);
/* update scaler opmode, if appropriate */
if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
DPU_FORMAT_IS_YUV(fmt));
else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
_sspp_setup_csc10_opmode(ctx,
VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
DPU_FORMAT_IS_YUV(fmt));
DPU_REG_WRITE(c, format_off, src_format);
DPU_REG_WRITE(c, unpack_pat_off, unpack);
DPU_REG_WRITE(c, op_mode_off, opmode);
/* clear previous UBWC error */
DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS, BIT(31));
}
static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_sspp *ctx,
struct dpu_hw_pixel_ext *pe_ext)
{
struct dpu_hw_blk_reg_map *c;
u8 color;
u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
const u32 bytemask = 0xff;
const u32 shortmask = 0xffff;
if (!ctx || !pe_ext)
return;
c = &ctx->hw;
/* program SW pixel extension override for all pipes*/
for (color = 0; color < DPU_MAX_PLANES; color++) {
/* color 2 has the same set of registers as color 1 */
if (color == 2)
continue;
lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
((pe_ext->right_rpt[color] & bytemask) << 16)|
((pe_ext->left_ftch[color] & bytemask) << 8)|
(pe_ext->left_rpt[color] & bytemask);
tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
((pe_ext->btm_rpt[color] & bytemask) << 16)|
((pe_ext->top_ftch[color] & bytemask) << 8)|
(pe_ext->top_rpt[color] & bytemask);
tot_req_pixels[color] = (((pe_ext->roi_h[color] +
pe_ext->num_ext_pxls_top[color] +
pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
((pe_ext->roi_w[color] +
pe_ext->num_ext_pxls_left[color] +
pe_ext->num_ext_pxls_right[color]) & shortmask);
}
/* color 0 */
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR, lr_pe[0]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB, tb_pe[0]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS,
tot_req_pixels[0]);
/* color 1 and color 2 */
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR, lr_pe[1]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB, tb_pe[1]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS,
tot_req_pixels[1]);
/* color 3 */
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR, lr_pe[3]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB, lr_pe[3]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS,
tot_req_pixels[3]);
}
static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_sspp *ctx,
struct dpu_hw_scaler3_cfg *scaler3_cfg,
const struct dpu_format *format)
{
if (!ctx || !scaler3_cfg)
return;
dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg,
ctx->cap->sblk->scaler_blk.base,
ctx->cap->sblk->scaler_blk.version,
format);
}
static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_sspp *ctx)
{
if (!ctx)
return 0;
return dpu_hw_get_scaler3_ver(&ctx->hw,
ctx->cap->sblk->scaler_blk.base);
}
/*
* dpu_hw_sspp_setup_rects()
*/
static void dpu_hw_sspp_setup_rects(struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *cfg)
{
struct dpu_hw_sspp *ctx = pipe->sspp;
struct dpu_hw_blk_reg_map *c;
u32 src_size, src_xy, dst_size, dst_xy;
u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
if (!ctx || !cfg)
return;
c = &ctx->hw;
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
pipe->multirect_index == DPU_SSPP_RECT_0) {
src_size_off = SSPP_SRC_SIZE;
src_xy_off = SSPP_SRC_XY;
out_size_off = SSPP_OUT_SIZE;
out_xy_off = SSPP_OUT_XY;
} else {
src_size_off = SSPP_SRC_SIZE_REC1;
src_xy_off = SSPP_SRC_XY_REC1;
out_size_off = SSPP_OUT_SIZE_REC1;
out_xy_off = SSPP_OUT_XY_REC1;
}
/* src and dest rect programming */
src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
src_size = (drm_rect_height(&cfg->src_rect) << 16) |
drm_rect_width(&cfg->src_rect);
dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
drm_rect_width(&cfg->dst_rect);
/* rectangle register programming */
DPU_REG_WRITE(c, src_size_off, src_size);
DPU_REG_WRITE(c, src_xy_off, src_xy);
DPU_REG_WRITE(c, out_size_off, dst_size);
DPU_REG_WRITE(c, out_xy_off, dst_xy);
}
static void dpu_hw_sspp_setup_sourceaddress(struct dpu_sw_pipe *pipe,
struct dpu_hw_fmt_layout *layout)
{
struct dpu_hw_sspp *ctx = pipe->sspp;
u32 ystride0, ystride1;
int i;
if (!ctx)
return;
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO) {
for (i = 0; i < ARRAY_SIZE(layout->plane_addr); i++)
DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + i * 0x4,
layout->plane_addr[i]);
} else if (pipe->multirect_index == DPU_SSPP_RECT_0) {
DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR,
layout->plane_addr[0]);
DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR,
layout->plane_addr[2]);
} else {
DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR,
layout->plane_addr[0]);
DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR,
layout->plane_addr[2]);
}
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO) {
ystride0 = (layout->plane_pitch[0]) |
(layout->plane_pitch[1] << 16);
ystride1 = (layout->plane_pitch[2]) |
(layout->plane_pitch[3] << 16);
} else {
ystride0 = DPU_REG_READ(&ctx->hw, SSPP_SRC_YSTRIDE0);
ystride1 = DPU_REG_READ(&ctx->hw, SSPP_SRC_YSTRIDE1);
if (pipe->multirect_index == DPU_SSPP_RECT_0) {
ystride0 = (ystride0 & 0xFFFF0000) |
(layout->plane_pitch[0] & 0x0000FFFF);
ystride1 = (ystride1 & 0xFFFF0000)|
(layout->plane_pitch[2] & 0x0000FFFF);
} else {
ystride0 = (ystride0 & 0x0000FFFF) |
((layout->plane_pitch[0] << 16) &
0xFFFF0000);
ystride1 = (ystride1 & 0x0000FFFF) |
((layout->plane_pitch[2] << 16) &
0xFFFF0000);
}
}
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_YSTRIDE0, ystride0);
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_YSTRIDE1, ystride1);
}
static void dpu_hw_sspp_setup_csc(struct dpu_hw_sspp *ctx,
const struct dpu_csc_cfg *data)
{
u32 offset;
bool csc10 = false;
if (!ctx || !data)
return;
offset = ctx->cap->sblk->csc_blk.base;
if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
offset += CSC_10BIT_OFFSET;
csc10 = true;
}
dpu_hw_csc_setup(&ctx->hw, offset, data, csc10);
}
static void dpu_hw_sspp_setup_solidfill(struct dpu_sw_pipe *pipe, u32 color)
{
struct dpu_hw_sspp *ctx = pipe->sspp;
struct dpu_hw_fmt_layout cfg;
if (!ctx)
return;
/* cleanup source addresses */
memset(&cfg, 0, sizeof(cfg));
ctx->ops.setup_sourceaddress(pipe, &cfg);
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
pipe->multirect_index == DPU_SSPP_RECT_0)
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR, color);
else
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1,
color);
}
static void dpu_hw_sspp_setup_qos_lut(struct dpu_hw_sspp *ctx,
struct dpu_hw_qos_cfg *cfg)
{
if (!ctx || !cfg)
return;
_dpu_hw_setup_qos_lut(&ctx->hw, SSPP_DANGER_LUT,
test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features),
cfg);
}
static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_sspp *ctx,
bool danger_safe_en)
{
if (!ctx)
return;
DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL,
danger_safe_en ? SSPP_QOS_CTRL_DANGER_SAFE_EN : 0);
}
static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe,
const struct dpu_format *fmt,
bool enable)
{
struct dpu_hw_sspp *ctx = pipe->sspp;
u32 cdp_cntl_offset = 0;
if (!ctx)
return;
if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
pipe->multirect_index == DPU_SSPP_RECT_0)
cdp_cntl_offset = SSPP_CDP_CNTL;
else
cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
dpu_setup_cdp(&ctx->hw, cdp_cntl_offset, fmt, enable);
}
static void _setup_layer_ops(struct dpu_hw_sspp *c,
unsigned long features)
{
c->ops.setup_format = dpu_hw_sspp_setup_format;
c->ops.setup_rects = dpu_hw_sspp_setup_rects;
c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
if (test_bit(DPU_SSPP_QOS, &features)) {
c->ops.setup_qos_lut = dpu_hw_sspp_setup_qos_lut;
c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
}
if (test_bit(DPU_SSPP_CSC, &features) ||
test_bit(DPU_SSPP_CSC_10BIT, &features))
c->ops.setup_csc = dpu_hw_sspp_setup_csc;
if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
}
if (test_bit(DPU_SSPP_CDP, &features))
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
}
#ifdef CONFIG_DEBUG_FS
int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
struct dentry *entry)
{
const struct dpu_sspp_cfg *cfg = hw_pipe->cap;
const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
struct dentry *debugfs_root;
char sspp_name[32];
snprintf(sspp_name, sizeof(sspp_name), "%d", hw_pipe->idx);
/* create overall sub-directory for the pipe */
debugfs_root =
debugfs_create_dir(sspp_name, entry);
/* don't error check these */
debugfs_create_xul("features", 0600,
debugfs_root, (unsigned long *)&hw_pipe->cap->features);
/* add register dump support */
dpu_debugfs_create_regset32("src_blk", 0400,
debugfs_root,
cfg->base,
cfg->len,
kms);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED4))
dpu_debugfs_create_regset32("scaler_blk", 0400,
debugfs_root,
sblk->scaler_blk.base + cfg->base,
sblk->scaler_blk.len,
kms);
if (cfg->features & BIT(DPU_SSPP_CSC) ||
cfg->features & BIT(DPU_SSPP_CSC_10BIT))
dpu_debugfs_create_regset32("csc_blk", 0400,
debugfs_root,
sblk->csc_blk.base + cfg->base,
sblk->csc_blk.len,
kms);
debugfs_create_u32("xin_id",
0400,
debugfs_root,
(u32 *) &cfg->xin_id);
debugfs_create_u32("clk_ctrl",
0400,
debugfs_root,
(u32 *) &cfg->clk_ctrl);
return 0;
}
#endif
struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
void __iomem *addr, const struct msm_mdss_data *mdss_data)
{
struct dpu_hw_sspp *hw_pipe;
if (!addr)
return ERR_PTR(-EINVAL);
hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
if (!hw_pipe)
return ERR_PTR(-ENOMEM);
hw_pipe->hw.blk_addr = addr + cfg->base;
hw_pipe->hw.log_mask = DPU_DBG_MASK_SSPP;
/* Assign ops */
hw_pipe->ubwc = mdss_data;
hw_pipe->idx = cfg->id;
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
return hw_pipe;
}
void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx)
{
kfree(ctx);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
#include "dpu_kms.h"
#define FLD_SPLIT_DISPLAY_CMD BIT(1)
#define FLD_SMART_PANEL_FREE_RUN BIT(2)
#define FLD_INTF_1_SW_TRG_MUX BIT(4)
#define FLD_INTF_2_SW_TRG_MUX BIT(8)
#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
#define TRAFFIC_SHAPER_EN BIT(31)
#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
#define MDP_TICK_COUNT 16
#define XO_CLK_RATE 19200
#define MS_TICKS_IN_SEC 1000
#define CALCULATE_WD_LOAD_VALUE(fps) \
((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
struct split_pipe_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 upper_pipe = 0;
u32 lower_pipe = 0;
if (!mdp || !cfg)
return;
c = &mdp->hw;
if (cfg->en) {
if (cfg->mode == INTF_MODE_CMD) {
lower_pipe = FLD_SPLIT_DISPLAY_CMD;
/* interface controlling sw trigger */
if (cfg->intf == INTF_2)
lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
else
lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
upper_pipe = lower_pipe;
} else {
if (cfg->intf == INTF_2) {
lower_pipe = FLD_INTF_1_SW_TRG_MUX;
upper_pipe = FLD_INTF_2_SW_TRG_MUX;
} else {
lower_pipe = FLD_INTF_2_SW_TRG_MUX;
upper_pipe = FLD_INTF_1_SW_TRG_MUX;
}
}
}
DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
}
static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_off, bit_off;
u32 reg_val, new_val;
bool clk_forced_on;
if (!mdp)
return false;
c = &mdp->hw;
if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
return false;
reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
reg_val = DPU_REG_READ(c, reg_off);
if (enable)
new_val = reg_val | BIT(bit_off);
else
new_val = reg_val & ~BIT(bit_off);
DPU_REG_WRITE(c, reg_off, new_val);
clk_forced_on = !(reg_val & BIT(bit_off));
return clk_forced_on;
}
static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
struct dpu_danger_safe_status *status)
{
struct dpu_hw_blk_reg_map *c;
u32 value;
if (!mdp || !status)
return;
c = &mdp->hw;
value = DPU_REG_READ(c, DANGER_STATUS);
status->mdp = (value >> 0) & 0x3;
status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
}
static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
struct dpu_vsync_source_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 reg, wd_load_value, wd_ctl, wd_ctl2;
if (!mdp || !cfg)
return;
c = &mdp->hw;
if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
switch (cfg->vsync_source) {
case DPU_VSYNC_SOURCE_WD_TIMER_4:
wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_4_CTL;
wd_ctl2 = MDP_WD_TIMER_4_CTL2;
break;
case DPU_VSYNC_SOURCE_WD_TIMER_3:
wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_3_CTL;
wd_ctl2 = MDP_WD_TIMER_3_CTL2;
break;
case DPU_VSYNC_SOURCE_WD_TIMER_2:
wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_2_CTL;
wd_ctl2 = MDP_WD_TIMER_2_CTL2;
break;
case DPU_VSYNC_SOURCE_WD_TIMER_1:
wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_1_CTL;
wd_ctl2 = MDP_WD_TIMER_1_CTL2;
break;
case DPU_VSYNC_SOURCE_WD_TIMER_0:
default:
wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_0_CTL;
wd_ctl2 = MDP_WD_TIMER_0_CTL2;
break;
}
DPU_REG_WRITE(c, wd_load_value,
CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
reg = DPU_REG_READ(c, wd_ctl2);
reg |= BIT(8); /* enable heartbeat timer */
reg |= BIT(0); /* enable WD timer */
DPU_REG_WRITE(c, wd_ctl2, reg);
/* make sure that timers are enabled/disabled for vsync state */
wmb();
}
}
static void dpu_hw_setup_vsync_source_and_vsync_sel(struct dpu_hw_mdp *mdp,
struct dpu_vsync_source_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 reg, i;
static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
return;
c = &mdp->hw;
reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
for (i = 0; i < cfg->pp_count; i++) {
int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
if (pp_idx >= ARRAY_SIZE(pp_offset))
continue;
reg &= ~(0xf << pp_offset[pp_idx]);
reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
}
DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
dpu_hw_setup_vsync_source(mdp, cfg);
}
static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
struct dpu_danger_safe_status *status)
{
struct dpu_hw_blk_reg_map *c;
u32 value;
if (!mdp || !status)
return;
c = &mdp->hw;
value = DPU_REG_READ(c, SAFE_STATUS);
status->mdp = (value >> 0) & 0x1;
status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
}
static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
{
struct dpu_hw_blk_reg_map *c;
if (!mdp)
return;
c = &mdp->hw;
DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
}
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
unsigned long cap)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
if (cap & BIT(DPU_MDP_VSYNC_SEL))
ops->setup_vsync_source = dpu_hw_setup_vsync_source_and_vsync_sel;
else
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
ops->get_safe_status = dpu_hw_get_safe_status;
if (cap & BIT(DPU_MDP_AUDIO_SELECT))
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mdp *mdp;
if (!addr)
return ERR_PTR(-EINVAL);
mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
if (!mdp)
return ERR_PTR(-ENOMEM);
mdp->hw.blk_addr = addr + cfg->base;
mdp->hw.log_mask = DPU_DBG_MASK_TOP;
/*
* Assign ops
*/
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
return mdp;
}
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
{
kfree(mdp);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/delay.h>
#include "dpu_vbif.h"
#include "dpu_hw_vbif.h"
#include "dpu_trace.h"
static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
{
if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
return dpu_kms->hw_vbif[vbif_idx];
return NULL;
}
static const char *dpu_vbif_name(enum dpu_vbif idx)
{
switch (idx) {
case VBIF_RT:
return "VBIF_RT";
case VBIF_NRT:
return "VBIF_NRT";
default:
return "??";
}
}
/**
* _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
* @vbif: Pointer to hardware vbif driver
* @xin_id: Client interface identifier
* @return: 0 if success; error code otherwise
*/
static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
{
ktime_t timeout;
bool status;
int rc;
if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
for (;;) {
status = vbif->ops.get_halt_ctrl(vbif, xin_id);
if (status)
break;
if (ktime_compare_safe(ktime_get(), timeout) > 0) {
status = vbif->ops.get_halt_ctrl(vbif, xin_id);
break;
}
usleep_range(501, 1000);
}
if (!status) {
rc = -ETIMEDOUT;
DPU_ERROR("%s client %d not halting. TIMEDOUT.\n",
dpu_vbif_name(vbif->idx), xin_id);
} else {
rc = 0;
DRM_DEBUG_ATOMIC("%s client %d is halted\n",
dpu_vbif_name(vbif->idx), xin_id);
}
return rc;
}
/**
* _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
* @vbif: Pointer to hardware vbif driver
* @ot_lim: Pointer to OT limit to be modified
* @params: Pointer to usecase parameters
*/
static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
{
u64 pps;
const struct dpu_vbif_dynamic_ot_tbl *tbl;
u32 i;
if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
return;
/* Dynamic OT setting done only for WFD */
if (!params->is_wfd)
return;
pps = params->frame_rate;
pps *= params->width;
pps *= params->height;
tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
&vbif->cap->dynamic_ot_wr_tbl;
for (i = 0; i < tbl->count; i++) {
if (pps <= tbl->cfg[i].pps) {
*ot_lim = tbl->cfg[i].ot_limit;
break;
}
}
DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
dpu_vbif_name(vbif->idx), params->xin_id,
params->width, params->height, params->frame_rate,
pps, *ot_lim);
}
/**
* _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
* @vbif: Pointer to hardware vbif driver
* @params: Pointer to usecase parameters
* @return: OT limit
*/
static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
struct dpu_vbif_set_ot_params *params)
{
u32 ot_lim = 0;
u32 val;
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
if (vbif->cap->default_ot_wr_limit && !params->rd)
ot_lim = vbif->cap->default_ot_wr_limit;
else if (vbif->cap->default_ot_rd_limit && params->rd)
ot_lim = vbif->cap->default_ot_rd_limit;
/*
* If default ot is not set from dt/catalog,
* then do not configure it.
*/
if (ot_lim == 0)
goto exit;
/* Modify the limits if the target and the use case requires it */
_dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
if (vbif && vbif->ops.get_limit_conf) {
val = vbif->ops.get_limit_conf(vbif,
params->xin_id, params->rd);
if (val == ot_lim)
ot_lim = 0;
}
exit:
DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n",
dpu_vbif_name(vbif->idx), params->xin_id, ot_lim);
return ot_lim;
}
/**
* dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
* @dpu_kms: DPU handler
* @params: Pointer to usecase parameters
*
* Note this function would block waiting for bus halt.
*/
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
int ret;
mdp = dpu_kms->hw_mdp;
vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !mdp) {
DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
return;
}
if (!mdp->ops.setup_clk_force_ctrl ||
!vbif->ops.set_limit_conf ||
!vbif->ops.set_halt_ctrl)
return;
/* set write_gather_en for all write clients */
if (vbif->ops.set_write_gather_en && !params->rd)
vbif->ops.set_write_gather_en(vbif, params->xin_id);
ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
if (ot_lim == 0)
return;
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
if (ret)
trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
int i;
if (!params || !dpu_kms->hw_mdp) {
DPU_ERROR("invalid arguments\n");
return;
}
mdp = dpu_kms->hw_mdp;
vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
return;
}
if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
DRM_DEBUG_ATOMIC("qos remap not supported\n");
return;
}
qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
&vbif->cap->qos_nrt_tbl;
if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
DRM_DEBUG_ATOMIC("qos tbl not defined\n");
return;
}
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
dpu_vbif_name(params->vbif_idx), params->xin_id, i,
qos_tbl->priority_lvl[i]);
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
qos_tbl->priority_lvl[i]);
}
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
{
struct dpu_hw_vbif *vbif;
u32 i, pnd, src;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
vbif = dpu_kms->hw_vbif[i];
if (vbif && vbif->ops.clear_errors) {
vbif->ops.clear_errors(vbif, &pnd, &src);
if (pnd || src) {
DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n",
dpu_vbif_name(vbif->idx), pnd, src);
}
}
}
}
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
{
struct dpu_hw_vbif *vbif;
int i, j;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
vbif = dpu_kms->hw_vbif[i];
if (vbif && vbif->cap && vbif->ops.set_mem_type) {
for (j = 0; j < vbif->cap->memtype_count; j++)
vbif->ops.set_mem_type(
vbif, j, vbif->cap->memtype[j]);
}
}
}
#ifdef CONFIG_DEBUG_FS
void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
char vbif_name[32];
struct dentry *entry, *debugfs_vbif;
int i, j;
entry = debugfs_create_dir("vbif", debugfs_root);
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
debugfs_vbif = debugfs_create_dir(vbif_name, entry);
debugfs_create_u32("features", 0600, debugfs_vbif,
(u32 *)&vbif->features);
debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
(u32 *)&vbif->xin_halt_timeout);
debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
(u32 *)&vbif->default_ot_rd_limit);
debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
(u32 *)&vbif->default_ot_wr_limit);
for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_rd_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
"dynamic_ot_rd_%d_pps", j);
debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
(u64 *)&cfg->pps);
snprintf(vbif_name, sizeof(vbif_name),
"dynamic_ot_rd_%d_ot_limit", j);
debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
(u32 *)&cfg->ot_limit);
}
for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_wr_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
"dynamic_ot_wr_%d_pps", j);
debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
(u64 *)&cfg->pps);
snprintf(vbif_name, sizeof(vbif_name),
"dynamic_ot_wr_%d_ot_limit", j);
debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
(u32 *)&cfg->ot_limit);
}
}
}
#endif
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include "msm_drv.h"
#include "dpu_kms.h"
#include "dpu_formats.h"
#include "dpu_hw_sspp.h"
#include "dpu_trace.h"
#include "dpu_crtc.h"
#include "dpu_vbif.h"
#include "dpu_plane.h"
#define DPU_DEBUG_PLANE(pl, fmt, ...) DRM_DEBUG_ATOMIC("plane%d " fmt,\
(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
#define PHASE_STEP_SHIFT 21
#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
#define PHASE_RESIDUAL 15
#define SHARP_STRENGTH_DEFAULT 32
#define SHARP_EDGE_THR_DEFAULT 112
#define SHARP_SMOOTH_THR_DEFAULT 8
#define SHARP_NOISE_THR_DEFAULT 2
#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
#define DPU_ZPOS_MAX 255
/*
* Default Preload Values
*/
#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2
#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4
#define DEFAULT_REFRESH_RATE 60
static const uint32_t qcom_compressed_supported_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_P010,
};
/*
* struct dpu_plane - local dpu plane structure
* @aspace: address space pointer
* @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
*/
struct dpu_plane {
struct drm_plane base;
struct mutex lock;
enum dpu_sspp pipe;
uint32_t color_fill;
bool is_error;
bool is_rt_pipe;
const struct dpu_mdss_cfg *catalog;
};
static const uint64_t supported_format_modifiers[] = {
DRM_FORMAT_MOD_QCOM_COMPRESSED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
return to_dpu_kms(priv->kms);
}
/**
* _dpu_plane_calc_bw - calculate bandwidth required for a plane
* @catalog: Points to dpu catalog structure
* @fmt: Pointer to source buffer format
* @mode: Pointer to drm display mode
* @pipe_cfg: Pointer to pipe configuration
* Result: Updates calculated bandwidth in the plane state.
* BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
* Prefill BW Equation: line src bytes * line_time
*/
static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
const struct dpu_format *fmt,
const struct drm_display_mode *mode,
struct dpu_sw_pipe_cfg *pipe_cfg)
{
int src_width, src_height, dst_height, fps;
u64 plane_prefill_bw;
u64 plane_bw;
u32 hw_latency_lines;
u64 scale_factor;
int vbp, vpw, vfp;
src_width = drm_rect_width(&pipe_cfg->src_rect);
src_height = drm_rect_height(&pipe_cfg->src_rect);
dst_height = drm_rect_height(&pipe_cfg->dst_rect);
fps = drm_mode_vrefresh(mode);
vbp = mode->vtotal - mode->vsync_end;
vpw = mode->vsync_end - mode->vsync_start;
vfp = mode->vsync_start - mode->vdisplay;
hw_latency_lines = catalog->perf->min_prefill_lines;
scale_factor = src_height > dst_height ?
mult_frac(src_height, 1, dst_height) : 1;
plane_bw =
src_width * mode->vtotal * fps * fmt->bpp *
scale_factor;
plane_prefill_bw =
src_width * hw_latency_lines * fps * fmt->bpp *
scale_factor * mode->vtotal;
if ((vbp+vpw) > hw_latency_lines)
do_div(plane_prefill_bw, (vbp+vpw));
else if ((vbp+vpw+vfp) < hw_latency_lines)
do_div(plane_prefill_bw, (vbp+vpw+vfp));
else
do_div(plane_prefill_bw, hw_latency_lines);
return max(plane_bw, plane_prefill_bw);
}
/**
* _dpu_plane_calc_clk - calculate clock required for a plane
* @mode: Pointer to drm display mode
* @pipe_cfg: Pointer to pipe configuration
* Result: Updates calculated clock in the plane state.
* Clock equation: dst_w * v_total * fps * (src_h / dst_h)
*/
static u64 _dpu_plane_calc_clk(const struct drm_display_mode *mode,
struct dpu_sw_pipe_cfg *pipe_cfg)
{
int dst_width, src_height, dst_height, fps;
u64 plane_clk;
src_height = drm_rect_height(&pipe_cfg->src_rect);
dst_width = drm_rect_width(&pipe_cfg->dst_rect);
dst_height = drm_rect_height(&pipe_cfg->dst_rect);
fps = drm_mode_vrefresh(mode);
plane_clk =
dst_width * mode->vtotal * fps;
if (src_height > dst_height) {
plane_clk *= src_height;
do_div(plane_clk, dst_height);
}
return plane_clk;
}
/**
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
* @plane: Pointer to drm plane
* @pipe: Pointer to software pipe
* @lut_usage: LUT usecase
* @fmt: Pointer to source buffer format
* @src_width: width of source buffer
* Return: fill level corresponding to the source buffer/format or 0 if error
*/
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
struct dpu_sw_pipe *pipe,
enum dpu_qos_lut_usage lut_usage,
const struct dpu_format *fmt, u32 src_width)
{
struct dpu_plane *pdpu;
u32 fixed_buff_size;
u32 total_fl;
if (!fmt || !pipe || !src_width || !fmt->bpp) {
DPU_ERROR("invalid arguments\n");
return 0;
}
if (lut_usage == DPU_QOS_LUT_USAGE_NRT)
return 0;
pdpu = to_dpu_plane(plane);
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
/* FIXME: in multirect case account for the src_width of all the planes */
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
if (fmt->chroma_sample == DPU_CHROMA_420) {
/* NV12 */
total_fl = (fixed_buff_size / 2) /
((src_width + 32) * fmt->bpp);
} else {
/* non NV12 */
total_fl = (fixed_buff_size / 2) * 2 /
((src_width + 32) * fmt->bpp);
}
} else {
if (pipe->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
total_fl = (fixed_buff_size / 2) * 2 /
((src_width + 32) * fmt->bpp);
} else {
total_fl = (fixed_buff_size) * 2 /
((src_width + 32) * fmt->bpp);
}
}
DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s w:%u fl:%u\n",
pipe->sspp->idx - SSPP_VIG0,
(char *)&fmt->base.pixel_format,
src_width, total_fl);
return total_fl;
}
/**
* _dpu_plane_set_qos_lut - set QoS LUT of the given plane
* @plane: Pointer to drm plane
* @pipe: Pointer to software pipe
* @fmt: Pointer to source buffer format
* @pipe_cfg: Pointer to pipe configuration
*/
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
struct dpu_sw_pipe *pipe,
const struct dpu_format *fmt, struct dpu_sw_pipe_cfg *pipe_cfg)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_hw_qos_cfg cfg;
u32 total_fl, lut_usage;
if (!pdpu->is_rt_pipe) {
lut_usage = DPU_QOS_LUT_USAGE_NRT;
} else {
if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
else
lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
}
total_fl = _dpu_plane_calc_fill_level(plane, pipe, lut_usage, fmt,
drm_rect_width(&pipe_cfg->src_rect));
cfg.creq_lut = _dpu_hw_get_qos_lut(&pdpu->catalog->perf->qos_lut_tbl[lut_usage], total_fl);
cfg.danger_lut = pdpu->catalog->perf->danger_lut_tbl[lut_usage];
cfg.safe_lut = pdpu->catalog->perf->safe_lut_tbl[lut_usage];
if (pipe->sspp->idx != SSPP_CURSOR0 &&
pipe->sspp->idx != SSPP_CURSOR1 &&
pdpu->is_rt_pipe)
cfg.danger_safe_en = true;
DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d is_rt:%d\n",
pdpu->pipe - SSPP_VIG0,
cfg.danger_safe_en,
pdpu->is_rt_pipe);
trace_dpu_perf_set_qos_luts(pipe->sspp->idx - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
pdpu->is_rt_pipe, total_fl, cfg.creq_lut, lut_usage);
DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
pdpu->pipe - SSPP_VIG0,
fmt ? (char *)&fmt->base.pixel_format : NULL,
pdpu->is_rt_pipe, total_fl, cfg.creq_lut);
trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
(fmt) ? fmt->fetch_mode : 0,
cfg.danger_lut,
cfg.safe_lut);
DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
pdpu->pipe - SSPP_VIG0,
fmt ? (char *)&fmt->base.pixel_format : NULL,
fmt ? fmt->fetch_mode : -1,
cfg.danger_lut,
cfg.safe_lut);
pipe->sspp->ops.setup_qos_lut(pipe->sspp, &cfg);
}
/**
* _dpu_plane_set_qos_ctrl - set QoS control of the given plane
* @plane: Pointer to drm plane
* @pipe: Pointer to software pipe
* @enable: true to enable QoS control
*/
static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
struct dpu_sw_pipe *pipe,
bool enable)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
if (!pdpu->is_rt_pipe)
enable = false;
DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d is_rt:%d\n",
pdpu->pipe - SSPP_VIG0,
enable,
pdpu->is_rt_pipe);
pipe->sspp->ops.setup_qos_ctrl(pipe->sspp,
enable);
}
/**
* _dpu_plane_set_ot_limit - set OT limit for the given plane
* @plane: Pointer to drm plane
* @pipe: Pointer to software pipe
* @pipe_cfg: Pointer to pipe configuration
* @frame_rate: CRTC's frame rate
*/
static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
int frame_rate)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pipe->sspp->cap->xin_id;
ot_params.num = pipe->sspp->idx - SSPP_NONE;
ot_params.width = drm_rect_width(&pipe_cfg->src_rect);
ot_params.height = drm_rect_height(&pipe_cfg->src_rect);
ot_params.is_wfd = !pdpu->is_rt_pipe;
ot_params.frame_rate = frame_rate;
ot_params.vbif_idx = VBIF_RT;
ot_params.clk_ctrl = pipe->sspp->cap->clk_ctrl;
ot_params.rd = true;
dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
}
/**
* _dpu_plane_set_qos_remap - set vbif QoS for the given plane
* @plane: Pointer to drm plane
* @pipe: Pointer to software pipe
*/
static void _dpu_plane_set_qos_remap(struct drm_plane *plane,
struct dpu_sw_pipe *pipe)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_qos_params qos_params;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = VBIF_RT;
qos_params.clk_ctrl = pipe->sspp->cap->clk_ctrl;
qos_params.xin_id = pipe->sspp->cap->xin_id;
qos_params.num = pipe->sspp->idx - SSPP_VIG0;
qos_params.is_rt = pdpu->is_rt_pipe;
DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
qos_params.num,
qos_params.vbif_idx,
qos_params.xin_id, qos_params.is_rt,
qos_params.clk_ctrl);
dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
}
static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw,
uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
struct dpu_hw_scaler3_cfg *scale_cfg,
const struct dpu_format *fmt,
uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v,
unsigned int rotation)
{
uint32_t i;
bool inline_rotation = rotation & DRM_MODE_ROTATE_90;
/*
* For inline rotation cases, scaler config is post-rotation,
* so swap the dimensions here. However, pixel extension will
* need pre-rotation settings.
*/
if (inline_rotation)
swap(src_w, src_h);
scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
for (i = 0; i < DPU_MAX_PLANES; i++) {
scale_cfg->src_width[i] = src_w;
scale_cfg->src_height[i] = src_h;
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
scale_cfg->src_width[i] /= chroma_subsmpl_h;
scale_cfg->src_height[i] /= chroma_subsmpl_v;
}
if (pipe_hw->cap->features &
BIT(DPU_SSPP_SCALER_QSEED4)) {
scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
} else {
scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
}
}
if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
&& (src_w == dst_w))
return;
scale_cfg->dst_width = dst_w;
scale_cfg->dst_height = dst_h;
scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
scale_cfg->lut_flag = 0;
scale_cfg->blend_cfg = 1;
scale_cfg->enable = 1;
}
static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
struct dpu_hw_pixel_ext *pixel_ext,
uint32_t src_w, uint32_t src_h,
uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
{
int i;
for (i = 0; i < DPU_MAX_PLANES; i++) {
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
src_w /= chroma_subsmpl_h;
src_h /= chroma_subsmpl_v;
}
pixel_ext->num_ext_pxls_top[i] = src_h;
pixel_ext->num_ext_pxls_left[i] = src_w;
}
}
static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
{
/* S15.16 format */
0x00012A00, 0x00000000, 0x00019880,
0x00012A00, 0xFFFF9B80, 0xFFFF3000,
0x00012A00, 0x00020480, 0x00000000,
},
/* signed bias */
{ 0xfff0, 0xff80, 0xff80,},
{ 0x0, 0x0, 0x0,},
/* unsigned clamp */
{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
{ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
};
static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
{
/* S15.16 format */
0x00012A00, 0x00000000, 0x00019880,
0x00012A00, 0xFFFF9B80, 0xFFFF3000,
0x00012A00, 0x00020480, 0x00000000,
},
/* signed bias */
{ 0xffc0, 0xfe00, 0xfe00,},
{ 0x0, 0x0, 0x0,},
/* unsigned clamp */
{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
{ 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
};
static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe,
const struct dpu_format *fmt)
{
const struct dpu_csc_cfg *csc_ptr;
if (!DPU_FORMAT_IS_YUV(fmt))
return NULL;
if (BIT(DPU_SSPP_CSC_10BIT) & pipe->sspp->cap->features)
csc_ptr = &dpu_csc10_YUV2RGB_601L;
else
csc_ptr = &dpu_csc_YUV2RGB_601L;
return csc_ptr;
}
static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe,
const struct dpu_format *fmt, bool color_fill,
struct dpu_sw_pipe_cfg *pipe_cfg,
unsigned int rotation)
{
struct dpu_hw_sspp *pipe_hw = pipe->sspp;
const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
struct dpu_hw_scaler3_cfg scaler3_cfg;
struct dpu_hw_pixel_ext pixel_ext;
u32 src_width = drm_rect_width(&pipe_cfg->src_rect);
u32 src_height = drm_rect_height(&pipe_cfg->src_rect);
u32 dst_width = drm_rect_width(&pipe_cfg->dst_rect);
u32 dst_height = drm_rect_height(&pipe_cfg->dst_rect);
memset(&scaler3_cfg, 0, sizeof(scaler3_cfg));
memset(&pixel_ext, 0, sizeof(pixel_ext));
/* don't chroma subsample if decimating */
/* update scaler. calculate default config for QSEED3 */
_dpu_plane_setup_scaler3(pipe_hw,
src_width,
src_height,
dst_width,
dst_height,
&scaler3_cfg, fmt,
info->hsub, info->vsub,
rotation);
/* configure pixel extension based on scalar config */
_dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext,
src_width, src_height, info->hsub, info->vsub);
if (pipe_hw->ops.setup_pe)
pipe_hw->ops.setup_pe(pipe_hw,
&pixel_ext);
/**
* when programmed in multirect mode, scalar block will be
* bypassed. Still we need to update alpha and bitwidth
* ONLY for RECT0
*/
if (pipe_hw->ops.setup_scaler &&
pipe->multirect_index != DPU_SSPP_RECT_1)
pipe_hw->ops.setup_scaler(pipe_hw,
&scaler3_cfg,
fmt);
}
static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate,
struct dpu_sw_pipe *pipe,
struct drm_rect *dst_rect,
u32 fill_color,
const struct dpu_format *fmt)
{
struct dpu_sw_pipe_cfg pipe_cfg;
/* update sspp */
if (!pipe->sspp->ops.setup_solidfill)
return;
pipe->sspp->ops.setup_solidfill(pipe, fill_color);
/* override scaler/decimation if solid fill */
pipe_cfg.dst_rect = *dst_rect;
pipe_cfg.src_rect.x1 = 0;
pipe_cfg.src_rect.y1 = 0;
pipe_cfg.src_rect.x2 =
drm_rect_width(&pipe_cfg.dst_rect);
pipe_cfg.src_rect.y2 =
drm_rect_height(&pipe_cfg.dst_rect);
if (pipe->sspp->ops.setup_format)
pipe->sspp->ops.setup_format(pipe, fmt, DPU_SSPP_SOLID_FILL);
if (pipe->sspp->ops.setup_rects)
pipe->sspp->ops.setup_rects(pipe, &pipe_cfg);
_dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg, pstate->rotation);
}
/**
* _dpu_plane_color_fill - enables color fill on plane
* @pdpu: Pointer to DPU plane object
* @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
* @alpha: 8-bit fill alpha value, 255 selects 100% alpha
*/
static void _dpu_plane_color_fill(struct dpu_plane *pdpu,
uint32_t color, uint32_t alpha)
{
const struct dpu_format *fmt;
const struct drm_plane *plane = &pdpu->base;
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
u32 fill_color = (color & 0xFFFFFF) | ((alpha & 0xFF) << 24);
DPU_DEBUG_PLANE(pdpu, "\n");
/*
* select fill format to match user property expectation,
* h/w only supports RGB variants
*/
fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
/* should not happen ever */
if (!fmt)
return;
/* update sspp */
_dpu_plane_color_fill_pipe(pstate, &pstate->pipe, &pstate->pipe_cfg.dst_rect,
fill_color, fmt);
if (pstate->r_pipe.sspp)
_dpu_plane_color_fill_pipe(pstate, &pstate->r_pipe, &pstate->r_pipe_cfg.dst_rect,
fill_color, fmt);
}
static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_framebuffer *fb = new_state->fb;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
struct dpu_hw_fmt_layout layout;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
return 0;
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
/* cache aspace */
pstate->aspace = kms->base.aspace;
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
* we can use msm_atomic_prepare_fb() instead of doing the
* implicit fence and fb prepare by hand here.
*/
drm_gem_plane_helper_prepare_fb(plane, new_state);
if (pstate->aspace) {
ret = msm_framebuffer_prepare(new_state->fb,
pstate->aspace, pstate->needs_dirtyfb);
if (ret) {
DPU_ERROR("failed to prepare framebuffer\n");
return ret;
}
}
/* validate framebuffer layout before commit */
ret = dpu_format_populate_layout(pstate->aspace,
new_state->fb, &layout);
if (ret) {
DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
return ret;
}
return 0;
}
static void dpu_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *old_pstate;
if (!old_state || !old_state->fb)
return;
old_pstate = to_dpu_plane_state(old_state);
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace,
old_pstate->needs_dirtyfb);
}
static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
const struct dpu_sspp_sub_blks *sblk,
struct drm_rect src, const struct dpu_format *fmt)
{
size_t num_formats;
const u32 *supported_formats;
if (!sblk->rotation_cfg) {
DPU_ERROR("invalid rotation cfg\n");
return -EINVAL;
}
if (drm_rect_width(&src) > sblk->rotation_cfg->rot_maxheight) {
DPU_DEBUG_PLANE(pdpu, "invalid height for inline rot:%d max:%d\n",
src.y2, sblk->rotation_cfg->rot_maxheight);
return -EINVAL;
}
supported_formats = sblk->rotation_cfg->rot_format_list;
num_formats = sblk->rotation_cfg->rot_num_formats;
if (!DPU_FORMAT_IS_UBWC(fmt) ||
!dpu_find_format(fmt->base.pixel_format, supported_formats, num_formats))
return -EINVAL;
return 0;
}
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
const struct dpu_format *fmt)
{
uint32_t min_src_size;
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
(!(pipe->sspp->cap->features & DPU_SSPP_SCALER) ||
!(pipe->sspp->cap->features & DPU_SSPP_CSC_ANY))) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
}
/* check src bounds */
if (drm_rect_width(&pipe_cfg->src_rect) < min_src_size ||
drm_rect_height(&pipe_cfg->src_rect) < min_src_size) {
DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&pipe_cfg->src_rect));
return -E2BIG;
}
/* valid yuv image */
if (DPU_FORMAT_IS_YUV(fmt) &&
(pipe_cfg->src_rect.x1 & 0x1 ||
pipe_cfg->src_rect.y1 & 0x1 ||
drm_rect_width(&pipe_cfg->src_rect) & 0x1 ||
drm_rect_height(&pipe_cfg->src_rect) & 0x1)) {
DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&pipe_cfg->src_rect));
return -EINVAL;
}
/* min dst support */
if (drm_rect_width(&pipe_cfg->dst_rect) < 0x1 ||
drm_rect_height(&pipe_cfg->dst_rect) < 0x1) {
DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&pipe_cfg->dst_rect));
return -EINVAL;
}
return 0;
}
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
int ret = 0, min_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
const struct drm_crtc_state *crtc_state = NULL;
const struct dpu_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
struct drm_rect fb_rect = { 0 };
uint32_t max_linewidth;
unsigned int rotation;
uint32_t supported_rotations;
const struct dpu_sspp_cfg *pipe_hw_caps = pstate->pipe.sspp->cap;
const struct dpu_sspp_sub_blks *sblk = pstate->pipe.sspp->cap->sblk;
if (new_plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
min_scale = FRAC_16_16(1, sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale,
sblk->maxdwnscale << 16,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
return ret;
}
if (!new_plane_state->visible)
return 0;
pipe->multirect_index = DPU_SSPP_RECT_SOLO;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
r_pipe->sspp = NULL;
pstate->stage = DPU_STAGE_0 + pstate->base.normalized_zpos;
if (pstate->stage >= pdpu->catalog->caps->max_mixer_blendstages) {
DPU_ERROR("> %d plane stages assigned\n",
pdpu->catalog->caps->max_mixer_blendstages - DPU_STAGE_0);
return -EINVAL;
}
pipe_cfg->src_rect = new_plane_state->src;
/* state->src is 16.16, src_rect is not */
pipe_cfg->src_rect.x1 >>= 16;
pipe_cfg->src_rect.x2 >>= 16;
pipe_cfg->src_rect.y1 >>= 16;
pipe_cfg->src_rect.y2 >>= 16;
pipe_cfg->dst_rect = new_plane_state->dst;
fb_rect.x2 = new_plane_state->fb->width;
fb_rect.y2 = new_plane_state->fb->height;
/* Ensure fb size is supported */
if (drm_rect_width(&fb_rect) > MAX_IMG_WIDTH ||
drm_rect_height(&fb_rect) > MAX_IMG_HEIGHT) {
DPU_DEBUG_PLANE(pdpu, "invalid framebuffer " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&fb_rect));
return -E2BIG;
}
fmt = to_dpu_format(msm_framebuffer_format(new_plane_state->fb));
max_linewidth = pdpu->catalog->caps->max_linewidth;
if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
/*
* In parallel multirect case only the half of the usual width
* is supported for tiled formats. If we are here, we know that
* full width is more than max_linewidth, thus each rect is
* wider than allowed.
*/
if (DPU_FORMAT_IS_UBWC(fmt)) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
}
if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
}
if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) ||
(!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) ||
DPU_FORMAT_IS_YUV(fmt)) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
}
/*
* Use multirect for wide plane. We do not support dynamic
* assignment of SSPPs, so we know the configuration.
*/
pipe->multirect_index = DPU_SSPP_RECT_0;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
r_pipe->sspp = pipe->sspp;
r_pipe->multirect_index = DPU_SSPP_RECT_1;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
*r_pipe_cfg = *pipe_cfg;
pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
}
ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt);
if (ret)
return ret;
if (r_pipe->sspp) {
ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt);
if (ret)
return ret;
}
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
supported_rotations |= DRM_MODE_ROTATE_90;
rotation = drm_rotation_simplify(new_plane_state->rotation,
supported_rotations);
if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) &&
(rotation & DRM_MODE_ROTATE_90)) {
ret = dpu_plane_check_inline_rotation(pdpu, sblk, pipe_cfg->src_rect, fmt);
if (ret)
return ret;
}
pstate->rotation = rotation;
pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
return 0;
}
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
{
const struct dpu_format *format =
to_dpu_format(msm_framebuffer_format(pdpu->base.state->fb));
const struct dpu_csc_cfg *csc_ptr;
if (!pipe->sspp || !pipe->sspp->ops.setup_csc)
return;
csc_ptr = _dpu_plane_get_csc(pipe, format);
if (!csc_ptr)
return;
DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
csc_ptr->csc_mv[0],
csc_ptr->csc_mv[1],
csc_ptr->csc_mv[2]);
pipe->sspp->ops.setup_csc(pipe->sspp, csc_ptr);
}
void dpu_plane_flush(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
if (!plane || !plane->state) {
DPU_ERROR("invalid plane\n");
return;
}
pdpu = to_dpu_plane(plane);
pstate = to_dpu_plane_state(plane->state);
/*
* These updates have to be done immediately before the plane flush
* timing, and may not be moved to the atomic_update/mode_set functions.
*/
if (pdpu->is_error)
/* force white frame with 100% alpha pipe output on error */
_dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
/* force 100% alpha */
_dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
else {
dpu_plane_flush_csc(pdpu, &pstate->pipe);
dpu_plane_flush_csc(pdpu, &pstate->r_pipe);
}
/* flag h/w flush complete */
if (plane->state)
pstate->pending = false;
}
/**
* dpu_plane_set_error: enable/disable error condition
* @plane: pointer to drm_plane structure
* @error: error value to set
*/
void dpu_plane_set_error(struct drm_plane *plane, bool error)
{
struct dpu_plane *pdpu;
if (!plane)
return;
pdpu = to_dpu_plane(plane);
pdpu->is_error = error;
}
static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
const struct dpu_format *fmt,
int frame_rate,
struct dpu_hw_fmt_layout *layout)
{
uint32_t src_flags;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
if (layout && pipe->sspp->ops.setup_sourceaddress) {
trace_dpu_plane_set_scanout(pipe, layout);
pipe->sspp->ops.setup_sourceaddress(pipe, layout);
}
/* override for color fill */
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
_dpu_plane_set_qos_ctrl(plane, pipe, false);
/* skip remaining processing on color fill */
return;
}
if (pipe->sspp->ops.setup_rects) {
pipe->sspp->ops.setup_rects(pipe,
pipe_cfg);
}
_dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg, pstate->rotation);
if (pipe->sspp->ops.setup_multirect)
pipe->sspp->ops.setup_multirect(
pipe);
if (pipe->sspp->ops.setup_format) {
unsigned int rotation = pstate->rotation;
src_flags = 0x0;
if (rotation & DRM_MODE_REFLECT_X)
src_flags |= DPU_SSPP_FLIP_LR;
if (rotation & DRM_MODE_REFLECT_Y)
src_flags |= DPU_SSPP_FLIP_UD;
if (rotation & DRM_MODE_ROTATE_90)
src_flags |= DPU_SSPP_ROT_90;
/* update format */
pipe->sspp->ops.setup_format(pipe, fmt, src_flags);
if (pipe->sspp->ops.setup_cdp) {
const struct dpu_perf_cfg *perf = pdpu->catalog->perf;
pipe->sspp->ops.setup_cdp(pipe, fmt,
perf->cdp_cfg[DPU_PERF_CDP_USAGE_RT].rd_enable);
}
}
_dpu_plane_set_qos_lut(plane, pipe, fmt, pipe_cfg);
if (pipe->sspp->idx != SSPP_CURSOR0 &&
pipe->sspp->idx != SSPP_CURSOR1)
_dpu_plane_set_ot_limit(plane, pipe, pipe_cfg, frame_rate);
if (pstate->needs_qos_remap)
_dpu_plane_set_qos_remap(plane, pipe);
}
static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
bool is_rt_pipe;
const struct dpu_format *fmt =
to_dpu_format(msm_framebuffer_format(fb));
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
struct msm_gem_address_space *aspace = kms->base.aspace;
struct dpu_hw_fmt_layout layout;
bool layout_valid = false;
int ret;
ret = dpu_format_populate_layout(aspace, fb, &layout);
if (ret)
DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
else
layout_valid = true;
pstate->pending = true;
is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
pdpu->is_rt_pipe = is_rt_pipe;
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
crtc->base.id, DRM_RECT_ARG(&state->dst),
(char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt,
drm_mode_vrefresh(&crtc->mode),
layout_valid ? &layout : NULL);
if (r_pipe->sspp) {
dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt,
drm_mode_vrefresh(&crtc->mode),
layout_valid ? &layout : NULL);
}
if (pstate->needs_qos_remap)
pstate->needs_qos_remap = false;
pstate->plane_fetch_bw = _dpu_plane_calc_bw(pdpu->catalog, fmt,
&crtc->mode, pipe_cfg);
pstate->plane_clk = _dpu_plane_calc_clk(&crtc->mode, pipe_cfg);
if (r_pipe->sspp) {
pstate->plane_fetch_bw += _dpu_plane_calc_bw(pdpu->catalog, fmt, &crtc->mode, r_pipe_cfg);
pstate->plane_clk = max(pstate->plane_clk, _dpu_plane_calc_clk(&crtc->mode, r_pipe_cfg));
}
}
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
trace_dpu_plane_disable(DRMID(plane), false,
pstate->pipe.multirect_mode);
if (r_pipe->sspp) {
r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
if (r_pipe->sspp->ops.setup_multirect)
r_pipe->sspp->ops.setup_multirect(r_pipe);
}
pstate->pending = true;
}
static void dpu_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
pdpu->is_error = false;
DPU_DEBUG_PLANE(pdpu, "\n");
if (!new_state->visible) {
_dpu_plane_atomic_disable(plane);
} else {
dpu_plane_sspp_atomic_update(plane);
}
}
static void dpu_plane_destroy(struct drm_plane *plane)
{
struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
struct dpu_plane_state *pstate;
DPU_DEBUG_PLANE(pdpu, "\n");
if (pdpu) {
pstate = to_dpu_plane_state(plane->state);
_dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false);
if (pstate->r_pipe.sspp)
_dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false);
mutex_destroy(&pdpu->lock);
/* this will destroy the states as well */
drm_plane_cleanup(plane);
kfree(pdpu);
}
}
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_dpu_plane_state(state));
}
static struct drm_plane_state *
dpu_plane_duplicate_state(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
struct dpu_plane_state *old_state;
if (!plane) {
DPU_ERROR("invalid plane\n");
return NULL;
} else if (!plane->state) {
DPU_ERROR("invalid plane state\n");
return NULL;
}
old_state = to_dpu_plane_state(plane->state);
pdpu = to_dpu_plane(plane);
pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
if (!pstate) {
DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
return NULL;
}
DPU_DEBUG_PLANE(pdpu, "\n");
pstate->pending = false;
__drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
return &pstate->base;
}
static const char * const multirect_mode_name[] = {
[DPU_SSPP_MULTIRECT_NONE] = "none",
[DPU_SSPP_MULTIRECT_PARALLEL] = "parallel",
[DPU_SSPP_MULTIRECT_TIME_MX] = "time_mx",
};
static const char * const multirect_index_name[] = {
[DPU_SSPP_RECT_SOLO] = "solo",
[DPU_SSPP_RECT_0] = "rect_0",
[DPU_SSPP_RECT_1] = "rect_1",
};
static const char *dpu_get_multirect_mode(enum dpu_sspp_multirect_mode mode)
{
if (WARN_ON(mode >= ARRAY_SIZE(multirect_mode_name)))
return "unknown";
return multirect_mode_name[mode];
}
static const char *dpu_get_multirect_index(enum dpu_sspp_multirect_index index)
{
if (WARN_ON(index >= ARRAY_SIZE(multirect_index_name)))
return "unknown";
return multirect_index_name[index];
}
static void dpu_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
const struct dpu_plane_state *pstate = to_dpu_plane_state(state);
const struct dpu_sw_pipe *pipe = &pstate->pipe;
const struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
const struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
const struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
drm_printf(p, "\tstage=%d\n", pstate->stage);
drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
drm_printf(p, "\tmultirect_mode[0]=%s\n", dpu_get_multirect_mode(pipe->multirect_mode));
drm_printf(p, "\tmultirect_index[0]=%s\n",
dpu_get_multirect_index(pipe->multirect_index));
drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
if (r_pipe->sspp) {
drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name);
drm_printf(p, "\tmultirect_mode[1]=%s\n",
dpu_get_multirect_mode(r_pipe->multirect_mode));
drm_printf(p, "\tmultirect_index[1]=%s\n",
dpu_get_multirect_index(r_pipe->multirect_index));
drm_printf(p, "\tsrc[1]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&r_pipe_cfg->src_rect));
drm_printf(p, "\tdst[1]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&r_pipe_cfg->dst_rect));
}
}
static void dpu_plane_reset(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
if (!plane) {
DPU_ERROR("invalid plane\n");
return;
}
pdpu = to_dpu_plane(plane);
DPU_DEBUG_PLANE(pdpu, "\n");
/* remove previous state, if present */
if (plane->state) {
dpu_plane_destroy_state(plane, plane->state);
plane->state = NULL;
}
pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
if (!pstate) {
DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
return;
}
/*
* Set the SSPP here until we have proper virtualized DPU planes.
* This is the place where the state is allocated, so fill it fully.
*/
pstate->pipe.sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
pstate->pipe.multirect_index = DPU_SSPP_RECT_SOLO;
pstate->pipe.multirect_mode = DPU_SSPP_MULTIRECT_NONE;
pstate->r_pipe.sspp = NULL;
__drm_atomic_helper_plane_reset(plane, &pstate->base);
}
#ifdef CONFIG_DEBUG_FS
void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
if (!pdpu->is_rt_pipe)
return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
_dpu_plane_set_qos_ctrl(plane, &pstate->pipe, enable);
if (pstate->r_pipe.sspp)
_dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, enable);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
#endif
static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format, uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED)
return dpu_find_format(format, qcom_compressed_supported_formats,
ARRAY_SIZE(qcom_compressed_supported_formats));
return false;
}
static const struct drm_plane_funcs dpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = dpu_plane_destroy,
.reset = dpu_plane_reset,
.atomic_duplicate_state = dpu_plane_duplicate_state,
.atomic_destroy_state = dpu_plane_destroy_state,
.atomic_print_state = dpu_plane_atomic_print_state,
.format_mod_supported = dpu_plane_format_mod_supported,
};
static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
.prepare_fb = dpu_plane_prepare_fb,
.cleanup_fb = dpu_plane_cleanup_fb,
.atomic_check = dpu_plane_atomic_check,
.atomic_update = dpu_plane_atomic_update,
};
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs)
{
struct drm_plane *plane = NULL;
const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
struct dpu_hw_sspp *pipe_hw;
uint32_t num_formats;
uint32_t supported_rotations;
int ret = -EINVAL;
/* create and zero local structure */
pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
if (!pdpu) {
DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
ret = -ENOMEM;
return ERR_PTR(ret);
}
/* cache local stuff for later */
plane = &pdpu->base;
pdpu->pipe = pipe;
/* initialize underlying h/w driver */
pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
DPU_ERROR("[%u]SSPP is invalid\n", pipe);
goto clean_plane;
}
format_list = pipe_hw->cap->sblk->format_list;
num_formats = pipe_hw->cap->sblk->num_formats;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
format_list, num_formats,
supported_format_modifiers, type, NULL);
if (ret)
goto clean_plane;
pdpu->catalog = kms->catalog;
ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX);
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
drm_plane_create_alpha_property(plane);
drm_plane_create_blend_mode_property(plane,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
if (pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
supported_rotations |= DRM_MODE_ROTATE_MASK;
drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0, supported_rotations);
drm_plane_enable_fb_damage_clips(plane);
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
mutex_init(&pdpu->lock);
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
return plane;
clean_plane:
kfree(pdpu);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include "msm_media_info.h"
#include "dpu_kms.h"
#include "dpu_formats.h"
#define DPU_UBWC_META_MACRO_W_H 16
#define DPU_UBWC_META_BLOCK_SIZE 256
#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
#define DPU_TILE_HEIGHT_DEFAULT 1
#define DPU_TILE_HEIGHT_TILED 4
#define DPU_TILE_HEIGHT_UBWC 4
#define DPU_TILE_HEIGHT_NV12 8
#define DPU_MAX_IMG_WIDTH 0x3FFF
#define DPU_MAX_IMG_HEIGHT 0x3FFF
/*
* DPU supported format packing, bpp, and other format
* information.
* DPU currently only supports interleaved RGB formats
* UBWC support for a pixel format is indicated by the flag,
* there is additional meta data plane for such formats
*/
#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
bp, flg, fm, np) \
{ \
.base.pixel_format = DRM_FORMAT_ ## fmt, \
.fetch_planes = DPU_PLANE_INTERLEAVED, \
.alpha_enable = alpha, \
.element = { (e0), (e1), (e2), (e3) }, \
.bits = { g, b, r, a }, \
.chroma_sample = DPU_CHROMA_RGB, \
.unpack_align_msb = 0, \
.unpack_tight = 1, \
.unpack_count = uc, \
.bpp = bp, \
.fetch_mode = fm, \
.flag = {(flg)}, \
.num_planes = np, \
.tile_height = DPU_TILE_HEIGHT_DEFAULT \
}
#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
alpha, bp, flg, fm, np, th) \
{ \
.base.pixel_format = DRM_FORMAT_ ## fmt, \
.fetch_planes = DPU_PLANE_INTERLEAVED, \
.alpha_enable = alpha, \
.element = { (e0), (e1), (e2), (e3) }, \
.bits = { g, b, r, a }, \
.chroma_sample = DPU_CHROMA_RGB, \
.unpack_align_msb = 0, \
.unpack_tight = 1, \
.unpack_count = uc, \
.bpp = bp, \
.fetch_mode = fm, \
.flag = {(flg)}, \
.num_planes = np, \
.tile_height = th \
}
#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
alpha, chroma, count, bp, flg, fm, np) \
{ \
.base.pixel_format = DRM_FORMAT_ ## fmt, \
.fetch_planes = DPU_PLANE_INTERLEAVED, \
.alpha_enable = alpha, \
.element = { (e0), (e1), (e2), (e3)}, \
.bits = { g, b, r, a }, \
.chroma_sample = chroma, \
.unpack_align_msb = 0, \
.unpack_tight = 1, \
.unpack_count = count, \
.bpp = bp, \
.fetch_mode = fm, \
.flag = {(flg)}, \
.num_planes = np, \
.tile_height = DPU_TILE_HEIGHT_DEFAULT \
}
#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
{ \
.base.pixel_format = DRM_FORMAT_ ## fmt, \
.fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
.alpha_enable = false, \
.element = { (e0), (e1), 0, 0 }, \
.bits = { g, b, r, a }, \
.chroma_sample = chroma, \
.unpack_align_msb = 0, \
.unpack_tight = 1, \
.unpack_count = 2, \
.bpp = 2, \
.fetch_mode = fm, \
.flag = {(flg)}, \
.num_planes = np, \
.tile_height = DPU_TILE_HEIGHT_DEFAULT \
}
#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
flg, fm, np, th) \
{ \
.base.pixel_format = DRM_FORMAT_ ## fmt, \
.fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
.alpha_enable = false, \
.element = { (e0), (e1), 0, 0 }, \
.bits = { g, b, r, a }, \
.chroma_sample = chroma, \
.unpack_align_msb = 0, \
.unpack_tight = 1, \
.unpack_count = 2, \
.bpp = 2, \
.fetch_mode = fm, \
.flag = {(flg)}, \
.num_planes = np, \
.tile_height = th \
}
#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
{ \
.base.pixel_format = DRM_FORMAT_ ## fmt, \
.fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
.alpha_enable = false, \
.element = { (e0), (e1), 0, 0 }, \
.bits = { g, b, r, a }, \
.chroma_sample = chroma, \
.unpack_align_msb = 1, \
.unpack_tight = 0, \
.unpack_count = 2, \
.bpp = 2, \
.fetch_mode = fm, \
.flag = {(flg)}, \
.num_planes = np, \
.tile_height = DPU_TILE_HEIGHT_DEFAULT \
}
#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
flg, fm, np, th) \
{ \
.base.pixel_format = DRM_FORMAT_ ## fmt, \
.fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
.alpha_enable = false, \
.element = { (e0), (e1), 0, 0 }, \
.bits = { g, b, r, a }, \
.chroma_sample = chroma, \
.unpack_align_msb = 1, \
.unpack_tight = 0, \
.unpack_count = 2, \
.bpp = 2, \
.fetch_mode = fm, \
.flag = {(flg)}, \
.num_planes = np, \
.tile_height = th \
}
#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
flg, fm, np) \
{ \
.base.pixel_format = DRM_FORMAT_ ## fmt, \
.fetch_planes = DPU_PLANE_PLANAR, \
.alpha_enable = alpha, \
.element = { (e0), (e1), (e2), 0 }, \
.bits = { g, b, r, a }, \
.chroma_sample = chroma, \
.unpack_align_msb = 0, \
.unpack_tight = 1, \
.unpack_count = 1, \
.bpp = bp, \
.fetch_mode = fm, \
.flag = {(flg)}, \
.num_planes = np, \
.tile_height = DPU_TILE_HEIGHT_DEFAULT \
}
/*
* struct dpu_media_color_map - maps drm format to media format
* @format: DRM base pixel format
* @color: Media API color related to DRM format
*/
struct dpu_media_color_map {
uint32_t format;
uint32_t color;
};
static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGB888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 3, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 3, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA1010102,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA1010102,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX1010102,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX1010102,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_LINEAR, 1),
PSEUDO_YUV_FMT(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
PSEUDO_YUV_FMT(NV21,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb,
DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
PSEUDO_YUV_FMT(NV16,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
PSEUDO_YUV_FMT(NV61,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb,
DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
PSEUDO_YUV_FMT_LOOSE(P010,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_420, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
INTERLEAVED_YUV_FMT(VYUY,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
INTERLEAVED_YUV_FMT(UYVY,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
INTERLEAVED_YUV_FMT(YUYV,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
INTERLEAVED_YUV_FMT(YVYU,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 2),
PLANAR_YUV_FMT(YUV420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb, C0_G_Y,
false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 3),
PLANAR_YUV_FMT(YVU420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr, C0_G_Y,
false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_LINEAR, 3),
};
/*
* UBWC formats table:
* This table holds the UBWC formats supported.
* If a compression ratio needs to be used for this or any other format,
* the data will be passed by user-space.
*/
static const struct dpu_format dpu_format_map_ubwc[] = {
INTERLEAVED_RGB_FMT_TILED(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
/* ARGB8888 and ABGR8888 purposely have the same color
* ordering. The hardware only supports ABGR8888 UBWC
* natively.
*/
INTERLEAVED_RGB_FMT_TILED(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
INTERLEAVED_RGB_FMT_TILED(XRGB2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
/* XRGB2101010 and ARGB2101010 purposely have the same color
* ordering. The hardware only supports ARGB2101010 UBWC
* natively.
*/
INTERLEAVED_RGB_FMT_TILED(ARGB2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
PSEUDO_YUV_FMT_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
PSEUDO_YUV_FMT_TILED(P010,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_420, DPU_FORMAT_FLAG_DX |
DPU_FORMAT_FLAG_YUV |
DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_UBWC),
};
/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
* Note: Not using the drm_format_*_subsampling since we have formats
*/
static void _dpu_get_v_h_subsample_rate(
enum dpu_chroma_samp_type chroma_sample,
uint32_t *v_sample,
uint32_t *h_sample)
{
if (!v_sample || !h_sample)
return;
switch (chroma_sample) {
case DPU_CHROMA_H2V1:
*v_sample = 1;
*h_sample = 2;
break;
case DPU_CHROMA_H1V2:
*v_sample = 2;
*h_sample = 1;
break;
case DPU_CHROMA_420:
*v_sample = 2;
*h_sample = 2;
break;
default:
*v_sample = 1;
*h_sample = 1;
break;
}
}
static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
{
static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_ARGB2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_XRGB2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
};
int color_fmt = -1;
int i;
if (fmt->base.pixel_format == DRM_FORMAT_NV12 ||
fmt->base.pixel_format == DRM_FORMAT_P010) {
if (DPU_FORMAT_IS_DX(fmt)) {
if (fmt->unpack_tight)
color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
else
color_fmt = COLOR_FMT_P010_UBWC;
} else
color_fmt = COLOR_FMT_NV12_UBWC;
return color_fmt;
}
for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
color_fmt = dpu_media_ubwc_map[i].color;
break;
}
return color_fmt;
}
static int _dpu_format_get_plane_sizes_ubwc(
const struct dpu_format *fmt,
const uint32_t width,
const uint32_t height,
struct dpu_hw_fmt_layout *layout)
{
int i;
int color;
bool meta = DPU_FORMAT_IS_UBWC(fmt);
memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
layout->format = fmt;
layout->width = width;
layout->height = height;
layout->num_planes = fmt->num_planes;
color = _dpu_format_get_media_color_ubwc(fmt);
if (color < 0) {
DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
(char *)&fmt->base.pixel_format);
return -EINVAL;
}
if (DPU_FORMAT_IS_YUV(layout->format)) {
uint32_t y_sclines, uv_sclines;
uint32_t y_meta_scanlines = 0;
uint32_t uv_meta_scanlines = 0;
layout->num_planes = 2;
layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
y_sclines = VENUS_Y_SCANLINES(color, height);
layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
uv_sclines = VENUS_UV_SCANLINES(color, height);
layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
if (!meta)
goto done;
layout->num_planes += 2;
layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
} else {
uint32_t rgb_scanlines, rgb_meta_scanlines;
layout->num_planes = 1;
layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
if (!meta)
goto done;
layout->num_planes += 2;
layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
}
done:
for (i = 0; i < DPU_MAX_PLANES; i++)
layout->total_size += layout->plane_size[i];
return 0;
}
static int _dpu_format_get_plane_sizes_linear(
const struct dpu_format *fmt,
const uint32_t width,
const uint32_t height,
struct dpu_hw_fmt_layout *layout,
const uint32_t *pitches)
{
int i;
memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
layout->format = fmt;
layout->width = width;
layout->height = height;
layout->num_planes = fmt->num_planes;
/* Due to memset above, only need to set planes of interest */
if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
layout->num_planes = 1;
layout->plane_size[0] = width * height * layout->format->bpp;
layout->plane_pitch[0] = width * layout->format->bpp;
} else {
uint32_t v_subsample, h_subsample;
uint32_t chroma_samp;
uint32_t bpp = 1;
chroma_samp = fmt->chroma_sample;
_dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
&h_subsample);
if (width % h_subsample || height % v_subsample) {
DRM_ERROR("mismatch in subsample vs dimensions\n");
return -EINVAL;
}
if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
(DPU_FORMAT_IS_DX(fmt)))
bpp = 2;
layout->plane_pitch[0] = width * bpp;
layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
layout->plane_size[0] = layout->plane_pitch[0] * height;
layout->plane_size[1] = layout->plane_pitch[1] *
(height / v_subsample);
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
layout->num_planes = 2;
layout->plane_size[1] *= 2;
layout->plane_pitch[1] *= 2;
} else {
/* planar */
layout->num_planes = 3;
layout->plane_size[2] = layout->plane_size[1];
layout->plane_pitch[2] = layout->plane_pitch[1];
}
}
/*
* linear format: allow user allocated pitches if they are greater than
* the requirement.
* ubwc format: pitch values are computed uniformly across
* all the components based on ubwc specifications.
*/
for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
if (pitches && layout->plane_pitch[i] < pitches[i])
layout->plane_pitch[i] = pitches[i];
}
for (i = 0; i < DPU_MAX_PLANES; i++)
layout->total_size += layout->plane_size[i];
return 0;
}
static int dpu_format_get_plane_sizes(
const struct dpu_format *fmt,
const uint32_t w,
const uint32_t h,
struct dpu_hw_fmt_layout *layout,
const uint32_t *pitches)
{
if (!layout || !fmt) {
DRM_ERROR("invalid pointer\n");
return -EINVAL;
}
if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
DRM_ERROR("image dimensions outside max range\n");
return -ERANGE;
}
if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
}
static int _dpu_format_populate_addrs_ubwc(
struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
uint32_t base_addr = 0;
bool meta;
if (!fb || !layout) {
DRM_ERROR("invalid pointers\n");
return -EINVAL;
}
if (aspace)
base_addr = msm_framebuffer_iova(fb, aspace, 0);
if (!base_addr) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
}
meta = DPU_FORMAT_IS_UBWC(layout->format);
/* Per-format logic for verifying active planes */
if (DPU_FORMAT_IS_YUV(layout->format)) {
/************************************************/
/* UBWC ** */
/* buffer ** DPU PLANE */
/* format ** */
/************************************************/
/* ------------------- ** -------------------- */
/* | Y meta | ** | Y bitstream | */
/* | data | ** | plane | */
/* ------------------- ** -------------------- */
/* | Y bitstream | ** | CbCr bitstream | */
/* | data | ** | plane | */
/* ------------------- ** -------------------- */
/* | Cbcr metadata | ** | Y meta | */
/* | data | ** | plane | */
/* ------------------- ** -------------------- */
/* | CbCr bitstream | ** | CbCr meta | */
/* | data | ** | plane | */
/* ------------------- ** -------------------- */
/************************************************/
/* configure Y bitstream plane */
layout->plane_addr[0] = base_addr + layout->plane_size[2];
/* configure CbCr bitstream plane */
layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ layout->plane_size[2] + layout->plane_size[3];
if (!meta)
return 0;
/* configure Y metadata plane */
layout->plane_addr[2] = base_addr;
/* configure CbCr metadata plane */
layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ layout->plane_size[2];
} else {
/************************************************/
/* UBWC ** */
/* buffer ** DPU PLANE */
/* format ** */
/************************************************/
/* ------------------- ** -------------------- */
/* | RGB meta | ** | RGB bitstream | */
/* | data | ** | plane | */
/* ------------------- ** -------------------- */
/* | RGB bitstream | ** | NONE | */
/* | data | ** | | */
/* ------------------- ** -------------------- */
/* ** | RGB meta | */
/* ** | plane | */
/* ** -------------------- */
/************************************************/
layout->plane_addr[0] = base_addr + layout->plane_size[2];
layout->plane_addr[1] = 0;
if (!meta)
return 0;
layout->plane_addr[2] = base_addr;
layout->plane_addr[3] = 0;
}
return 0;
}
static int _dpu_format_populate_addrs_linear(
struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
unsigned int i;
/* Can now check the pitches given vs pitches expected */
for (i = 0; i < layout->num_planes; ++i) {
if (layout->plane_pitch[i] > fb->pitches[i]) {
DRM_ERROR("plane %u expected pitch %u, fb %u\n",
i, layout->plane_pitch[i], fb->pitches[i]);
return -EINVAL;
}
}
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i) {
if (aspace)
layout->plane_addr[i] =
msm_framebuffer_iova(fb, aspace, i);
if (!layout->plane_addr[i]) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
}
}
return 0;
}
int dpu_format_populate_layout(
struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
int ret;
if (!fb || !layout) {
DRM_ERROR("invalid arguments\n");
return -EINVAL;
}
if ((fb->width > DPU_MAX_IMG_WIDTH) ||
(fb->height > DPU_MAX_IMG_HEIGHT)) {
DRM_ERROR("image dimensions outside max range\n");
return -ERANGE;
}
layout->format = to_dpu_format(msm_framebuffer_format(fb));
/* Populate the plane sizes etc via get_format */
ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
layout, fb->pitches);
if (ret)
return ret;
/* Populate the addresses given the fb */
if (DPU_FORMAT_IS_UBWC(layout->format) ||
DPU_FORMAT_IS_TILE(layout->format))
ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
else
ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
return ret;
}
int dpu_format_check_modified_format(
const struct msm_kms *kms,
const struct msm_format *msm_fmt,
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos)
{
const struct drm_format_info *info;
const struct dpu_format *fmt;
struct dpu_hw_fmt_layout layout;
uint32_t bos_total_size = 0;
int ret, i;
if (!msm_fmt || !cmd || !bos) {
DRM_ERROR("invalid arguments\n");
return -EINVAL;
}
fmt = to_dpu_format(msm_fmt);
info = drm_format_info(fmt->base.pixel_format);
if (!info)
return -EINVAL;
ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
&layout, cmd->pitches);
if (ret)
return ret;
for (i = 0; i < info->num_planes; i++) {
if (!bos[i]) {
DRM_ERROR("invalid handle for plane %d\n", i);
return -EINVAL;
}
if ((i == 0) || (bos[i] != bos[0]))
bos_total_size += bos[i]->size;
}
if (bos_total_size < layout.total_size) {
DRM_ERROR("buffers total size too small %u expected %u\n",
bos_total_size, layout.total_size);
return -EINVAL;
}
return 0;
}
const struct dpu_format *dpu_get_dpu_format_ext(
const uint32_t format,
const uint64_t modifier)
{
uint32_t i = 0;
const struct dpu_format *fmt = NULL;
const struct dpu_format *map = NULL;
ssize_t map_size = 0;
/*
* Currently only support exactly zero or one modifier.
* All planes use the same modifier.
*/
DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier);
switch (modifier) {
case 0:
map = dpu_format_map;
map_size = ARRAY_SIZE(dpu_format_map);
break;
case DRM_FORMAT_MOD_QCOM_COMPRESSED:
map = dpu_format_map_ubwc;
map_size = ARRAY_SIZE(dpu_format_map_ubwc);
DRM_DEBUG_ATOMIC("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
(char *)&format);
break;
default:
DPU_ERROR("unsupported format modifier %llX\n", modifier);
return NULL;
}
for (i = 0; i < map_size; i++) {
if (format == map[i].base.pixel_format) {
fmt = &map[i];
break;
}
}
if (fmt == NULL)
DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
(char *)&format, modifier);
else
DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
(char *)&format, modifier,
DPU_FORMAT_IS_UBWC(fmt),
DPU_FORMAT_IS_YUV(fmt));
return fmt;
}
const struct msm_format *dpu_get_msm_format(
struct msm_kms *kms,
const uint32_t format,
const uint64_t modifiers)
{
const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
modifiers);
if (fmt)
return &fmt->base;
return NULL;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
*/
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_wb.h"
#include "dpu_formats.h"
#include "dpu_kms.h"
#define WB_DST_FORMAT 0x000
#define WB_DST_OP_MODE 0x004
#define WB_DST_PACK_PATTERN 0x008
#define WB_DST0_ADDR 0x00C
#define WB_DST1_ADDR 0x010
#define WB_DST2_ADDR 0x014
#define WB_DST3_ADDR 0x018
#define WB_DST_YSTRIDE0 0x01C
#define WB_DST_YSTRIDE1 0x020
#define WB_DST_YSTRIDE1 0x020
#define WB_DST_DITHER_BITDEPTH 0x024
#define WB_DST_MATRIX_ROW0 0x030
#define WB_DST_MATRIX_ROW1 0x034
#define WB_DST_MATRIX_ROW2 0x038
#define WB_DST_MATRIX_ROW3 0x03C
#define WB_DST_WRITE_CONFIG 0x048
#define WB_ROTATION_DNSCALER 0x050
#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
#define WB_N16_INIT_PHASE_X_C03 0x060
#define WB_N16_INIT_PHASE_X_C12 0x064
#define WB_N16_INIT_PHASE_Y_C03 0x068
#define WB_N16_INIT_PHASE_Y_C12 0x06C
#define WB_OUT_SIZE 0x074
#define WB_ALPHA_X_VALUE 0x078
#define WB_DANGER_LUT 0x084
#define WB_SAFE_LUT 0x088
#define WB_QOS_CTRL 0x090
#define WB_CREQ_LUT_0 0x098
#define WB_CREQ_LUT_1 0x09C
#define WB_UBWC_STATIC_CTRL 0x144
#define WB_MUX 0x150
#define WB_CROP_CTRL 0x154
#define WB_CROP_OFFSET 0x158
#define WB_CSC_BASE 0x260
#define WB_DST_ADDR_SW_STATUS 0x2B0
#define WB_CDP_CNTL 0x2B4
#define WB_OUT_IMAGE_SIZE 0x2C0
#define WB_OUT_XY 0x2C4
static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *data)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
DPU_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
DPU_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
DPU_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
DPU_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
}
static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *data)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
const struct dpu_format *fmt = data->dest.format;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 write_config = 0;
u32 opmode = 0;
u32 dst_addr_sw = 0;
chroma_samp = fmt->chroma_sample;
dst_format = (chroma_samp << 23) |
(fmt->fetch_planes << 19) |
(fmt->bits[C3_ALPHA] << 6) |
(fmt->bits[C2_R_Cr] << 4) |
(fmt->bits[C1_B_Cb] << 2) |
(fmt->bits[C0_G_Y] << 0);
if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
dst_format |= BIT(8); /* DSTC3_EN */
if (!fmt->alpha_enable ||
!(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA)))
dst_format |= BIT(14); /* DST_ALPHA_X */
}
pattern = (fmt->element[3] << 24) |
(fmt->element[2] << 16) |
(fmt->element[1] << 8) |
(fmt->element[0] << 0);
dst_format |= (fmt->unpack_align_msb << 18) |
(fmt->unpack_tight << 17) |
((fmt->unpack_count - 1) << 12) |
((fmt->bpp - 1) << 9);
ystride0 = data->dest.plane_pitch[0] |
(data->dest.plane_pitch[1] << 16);
ystride1 = data->dest.plane_pitch[2] |
(data->dest.plane_pitch[3] << 16);
if (drm_rect_height(&data->roi) && drm_rect_width(&data->roi))
outsize = (drm_rect_height(&data->roi) << 16) | drm_rect_width(&data->roi);
else
outsize = (data->dest.height << 16) | data->dest.width;
DPU_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
DPU_REG_WRITE(c, WB_DST_FORMAT, dst_format);
DPU_REG_WRITE(c, WB_DST_OP_MODE, opmode);
DPU_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
DPU_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
DPU_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
DPU_REG_WRITE(c, WB_OUT_SIZE, outsize);
DPU_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
DPU_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
}
static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 image_size, out_size, out_xy;
image_size = (wb->dest.height << 16) | wb->dest.width;
out_xy = 0;
out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi);
DPU_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
DPU_REG_WRITE(c, WB_OUT_XY, out_xy);
DPU_REG_WRITE(c, WB_OUT_SIZE, out_size);
}
static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
struct dpu_hw_qos_cfg *cfg)
{
if (!ctx || !cfg)
return;
_dpu_hw_setup_qos_lut(&ctx->hw, WB_DANGER_LUT,
test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features),
cfg);
}
static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
const struct dpu_format *fmt,
bool enable)
{
if (!ctx)
return;
dpu_setup_cdp(&ctx->hw, WB_CDP_CNTL, fmt, enable);
}
static void dpu_hw_wb_bind_pingpong_blk(
struct dpu_hw_wb *ctx,
const enum dpu_pingpong pp)
{
struct dpu_hw_blk_reg_map *c;
int mux_cfg;
if (!ctx)
return;
c = &ctx->hw;
mux_cfg = DPU_REG_READ(c, WB_MUX);
mux_cfg &= ~0xf;
if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
DPU_REG_WRITE(c, WB_MUX, mux_cfg);
}
static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
unsigned long features)
{
ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
ops->setup_outformat = dpu_hw_wb_setup_format;
if (test_bit(DPU_WB_XY_ROI_OFFSET, &features))
ops->setup_roi = dpu_hw_wb_roi;
if (test_bit(DPU_WB_QOS, &features))
ops->setup_qos_lut = dpu_hw_wb_setup_qos_lut;
if (test_bit(DPU_WB_CDP, &features))
ops->setup_cdp = dpu_hw_wb_setup_cdp;
if (test_bit(DPU_WB_INPUT_CTRL, &features))
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
}
struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_wb *c;
if (!addr)
return ERR_PTR(-EINVAL);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_WB;
/* Assign ops */
c->idx = cfg->id;
c->caps = cfg;
_setup_wb_ops(&c->ops, c->caps->features);
return c;
}
void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
{
kfree(hw_wb);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/delay.h>
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_pingpong.h"
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define to_dpu_encoder_phys_cmd(x) \
container_of(x, struct dpu_encoder_phys_cmd, base)
#define PP_TIMEOUT_MAX_TRIALS 10
/*
* Tearcheck sync start and continue thresholds are empirically found
* based on common panels In the future, may want to allow panels to override
* these default values
*/
#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE);
}
static void _dpu_encoder_phys_cmd_update_intf_cfg(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_hw_ctl *ctl;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
struct dpu_hw_intf_cmd_mode_cfg cmd_mode_cfg = {};
ctl = phys_enc->hw_ctl;
if (!ctl->ops.setup_intf_cfg)
return;
intf_cfg.intf = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
phys_enc->hw_pp->idx);
if (intf_cfg.dsc != 0)
cmd_mode_cfg.data_compress = true;
if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
}
static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
unsigned long lock_flags;
int new_cnt;
u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("pp_done_irq");
/* notify all synchronous clients first, then asynchronous clients */
dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
new_cnt, event);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
DPU_ATRACE_END("pp_done_irq");
}
static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
if (phys_enc->has_intf_te) {
if (!phys_enc->hw_intf)
return;
} else {
if (!phys_enc->hw_pp)
return;
}
DPU_ATRACE_BEGIN("rd_ptr_irq");
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
wake_up_all(&cmd_enc->pending_vblank_wq);
DPU_ATRACE_END("rd_ptr_irq");
}
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
DPU_ATRACE_BEGIN("ctl_start_irq");
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
/* Signal any waiting ctl start interrupt */
wake_up_all(&phys_enc->pending_kickoff_wq);
DPU_ATRACE_END("ctl_start_irq");
}
static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
}
static void dpu_encoder_phys_cmd_atomic_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
if (phys_enc->has_intf_te)
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
else
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
bool do_log = false;
struct drm_encoder *drm_enc;
if (!phys_enc->hw_pp)
return -EINVAL;
drm_enc = phys_enc->parent;
cmd_enc->pp_timeout_report_cnt++;
if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
do_log = true;
} else if (cmd_enc->pp_timeout_report_cnt == 1) {
do_log = true;
}
trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
phys_enc->hw_pp->idx - PINGPONG_0,
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt),
frame_event);
/* to avoid flooding, only log first time, and "dead" time */
if (do_log) {
DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
DRMID(drm_enc),
phys_enc->hw_pp->idx - PINGPONG_0,
phys_enc->hw_ctl->idx - CTL_0,
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt));
msm_disp_snapshot_state(drm_enc->dev);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_RDPTR]);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
/* request a ctl reset before the next kickoff */
phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
return -ETIMEDOUT;
}
static int _dpu_encoder_phys_cmd_wait_for_idle(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_encoder_wait_info wait_info;
int ret;
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
ret = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_PINGPONG],
dpu_encoder_phys_cmd_pp_tx_done_irq,
&wait_info);
if (ret == -ETIMEDOUT)
_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
else if (!ret)
cmd_enc->pp_timeout_report_cnt = 0;
return ret;
}
static int dpu_encoder_phys_cmd_control_vblank_irq(
struct dpu_encoder_phys *phys_enc,
bool enable)
{
int ret = 0;
int refcount;
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
goto end;
/* protect against negative */
if (!enable && refcount == 0) {
ret = -EINVAL;
goto end;
}
DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable ? "true" : "false", refcount);
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_RDPTR],
dpu_encoder_phys_cmd_te_rd_ptr_irq,
phys_enc);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_RDPTR]);
end:
if (ret) {
DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0, ret,
enable ? "true" : "false", refcount);
}
return ret;
}
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
if (enable) {
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_PINGPONG],
dpu_encoder_phys_cmd_pp_tx_done_irq,
phys_enc);
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_UNDERRUN],
dpu_encoder_phys_cmd_underrun_irq,
phys_enc);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_CTL_START],
dpu_encoder_phys_cmd_ctl_start_irq,
phys_enc);
} else {
if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_CTL_START]);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_UNDERRUN]);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_PINGPONG]);
}
}
static void dpu_encoder_phys_cmd_tearcheck_config(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_hw_tear_check tc_cfg = { 0 };
struct drm_display_mode *mode;
bool tc_enable = true;
unsigned long vsync_hz;
struct dpu_kms *dpu_kms;
if (phys_enc->has_intf_te) {
if (!phys_enc->hw_intf ||
!phys_enc->hw_intf->ops.enable_tearcheck) {
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "");
} else {
if (!phys_enc->hw_pp ||
!phys_enc->hw_pp->ops.enable_tearcheck) {
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
}
mode = &phys_enc->cached_mode;
dpu_kms = phys_enc->dpu_kms;
/*
* TE default: dsi byte clock calculated base on 70 fps;
* around 14 ms to complete a kickoff cycle if te disabled;
* vclk_line base on 60 fps; write is faster than read;
* init == start == rdptr;
*
* vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
* frequency divided by the no. of rows (lines) in the LCDpanel.
*/
vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
if (!vsync_hz) {
DPU_DEBUG_CMDENC(cmd_enc, "invalid - no vsync clock\n");
return;
}
tc_cfg.vsync_count = vsync_hz /
(mode->vtotal * drm_mode_vrefresh(mode));
/*
* Set the sync_cfg_height to twice vtotal so that if we lose a
* TE event coming from the display TE pin we won't stall immediately
*/
tc_cfg.hw_vsync_mode = 1;
tc_cfg.sync_cfg_height = mode->vtotal * 2;
tc_cfg.vsync_init_val = mode->vdisplay;
tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
tc_cfg.start_pos = mode->vdisplay;
tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
DPU_DEBUG_CMDENC(cmd_enc,
"tc vsync_clk_speed_hz %lu vtotal %u vrefresh %u\n",
vsync_hz, mode->vtotal, drm_mode_vrefresh(mode));
DPU_DEBUG_CMDENC(cmd_enc,
"tc enable %u start_pos %u rd_ptr_irq %u\n",
tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
DPU_DEBUG_CMDENC(cmd_enc,
"tc hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
tc_cfg.vsync_init_val);
DPU_DEBUG_CMDENC(cmd_enc,
"tc cfgheight %u thresh_start %u thresh_cont %u\n",
tc_cfg.sync_cfg_height, tc_cfg.sync_threshold_start,
tc_cfg.sync_threshold_continue);
if (phys_enc->has_intf_te)
phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf, &tc_cfg);
else
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, &tc_cfg);
}
static void _dpu_encoder_phys_cmd_pingpong_config(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
phys_enc->hw_pp->idx - PINGPONG_0);
drm_mode_debug_printmodeline(&phys_enc->cached_mode);
_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
}
static bool dpu_encoder_phys_cmd_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
/**
* we do separate flush for each CTL and let
* CTL_START synchronize them
*/
return false;
}
static void dpu_encoder_phys_cmd_enable_helper(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return;
ctl = phys_enc->hw_ctl;
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
}
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid phys encoder\n");
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
if (phys_enc->enable_state == DPU_ENC_ENABLED) {
DPU_ERROR("already enabled\n");
return;
}
dpu_encoder_phys_cmd_enable_helper(phys_enc);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
static void _dpu_encoder_phys_cmd_connect_te(
struct dpu_encoder_phys *phys_enc, bool enable)
{
if (phys_enc->has_intf_te) {
if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.connect_external_te)
return;
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf, enable);
} else {
if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
return;
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
}
}
static void dpu_encoder_phys_cmd_prepare_idle_pc(
struct dpu_encoder_phys *phys_enc)
{
_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
}
static int dpu_encoder_phys_cmd_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_pingpong *hw_pp;
struct dpu_hw_intf *hw_intf;
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return -EINVAL;
if (phys_enc->has_intf_te) {
hw_intf = phys_enc->hw_intf;
if (!hw_intf || !hw_intf->ops.get_line_count)
return -EINVAL;
return hw_intf->ops.get_line_count(hw_intf);
}
hw_pp = phys_enc->hw_pp;
if (!hw_pp || !hw_pp->ops.get_line_count)
return -EINVAL;
return hw_pp->ops.get_line_count(hw_pp);
}
static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_hw_ctl *ctl;
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
return;
}
if (phys_enc->has_intf_te) {
DRM_DEBUG_KMS("id:%u intf:%d state:%d\n", DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0,
phys_enc->enable_state);
if (phys_enc->hw_intf->ops.disable_tearcheck)
phys_enc->hw_intf->ops.disable_tearcheck(phys_enc->hw_intf);
} else {
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
phys_enc->enable_state);
if (phys_enc->hw_pp->ops.disable_tearcheck)
phys_enc->hw_pp->ops.disable_tearcheck(phys_enc->hw_pp);
}
if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
PINGPONG_NONE);
ctl = phys_enc->hw_ctl;
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
}
phys_enc->enable_state = DPU_ENC_DISABLED;
}
static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
kfree(cmd_enc);
}
static void dpu_encoder_phys_cmd_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
int ret;
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(&phys_enc->pending_kickoff_cnt));
/*
* Mark kickoff request as outstanding. If there are more than one,
* outstanding, then we have to wait for the previous one to complete
*/
ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (ret) {
/* force pending_kickoff_cnt 0 to discard failed kickoff */
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
DRMID(phys_enc->parent), ret,
phys_enc->hw_pp->idx - PINGPONG_0);
}
dpu_encoder_phys_cmd_enable_te(phys_enc);
DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(&phys_enc->pending_kickoff_cnt));
}
static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return;
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return;
if (phys_enc->has_intf_te) {
if (!phys_enc->hw_intf->ops.disable_autorefresh)
return;
phys_enc->hw_intf->ops.disable_autorefresh(
phys_enc->hw_intf,
DRMID(phys_enc->parent),
phys_enc->cached_mode.vdisplay);
} else {
if (!phys_enc->hw_pp ||
!phys_enc->hw_pp->ops.disable_autorefresh)
return;
phys_enc->hw_pp->ops.disable_autorefresh(
phys_enc->hw_pp,
DRMID(phys_enc->parent),
phys_enc->cached_mode.vdisplay);
}
}
static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_encoder_wait_info wait_info;
int ret;
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
ret = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_CTL_START],
dpu_encoder_phys_cmd_ctl_start_irq,
&wait_info);
if (ret == -ETIMEDOUT) {
DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
ret = -EINVAL;
} else if (!ret)
ret = 0;
return ret;
}
static int dpu_encoder_phys_cmd_wait_for_tx_complete(
struct dpu_encoder_phys *phys_enc)
{
int rc;
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (rc) {
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
DRMID(phys_enc->parent), rc,
phys_enc->hw_intf->idx - INTF_0);
}
return rc;
}
static int dpu_encoder_phys_cmd_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
/* only required for master controller */
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return 0;
if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
}
static int dpu_encoder_phys_cmd_wait_for_vblank(
struct dpu_encoder_phys *phys_enc)
{
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
struct dpu_encoder_wait_info wait_info;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return rc;
wait_info.wq = &cmd_enc->pending_vblank_wq;
wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
atomic_inc(&cmd_enc->pending_vblank_cnt);
rc = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_RDPTR],
dpu_encoder_phys_cmd_te_rd_ptr_irq,
&wait_info);
return rc;
}
static void dpu_encoder_phys_cmd_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
/**
* re-enable external TE, either for the first time after enabling
* or if disabled for Autorefresh
*/
_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
}
static void dpu_encoder_phys_cmd_trigger_start(
struct dpu_encoder_phys *phys_enc)
{
dpu_encoder_helper_trigger_start(phys_enc);
}
static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_cmd_is_master;
ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable;
ops->destroy = dpu_encoder_phys_cmd_destroy;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
}
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
DPU_DEBUG("intf\n");
cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
if (!cmd_enc) {
DPU_ERROR("failed to allocate\n");
return ERR_PTR(-ENOMEM);
}
phys_enc = &cmd_enc->base;
dpu_encoder_phys_init(phys_enc, p);
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_CMD;
cmd_enc->stream_sel = 0;
phys_enc->has_intf_te = test_bit(DPU_INTF_TE,
&phys_enc->hw_intf->cap->features);
atomic_set(&cmd_enc->pending_vblank_cnt, 0);
init_waitqueue_head(&cmd_enc->pending_vblank_wq);
DPU_DEBUG_CMDENC(cmd_enc, "created\n");
return phys_enc;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_merge3d.h"
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->parent ? \
(e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
(e) && (e)->parent ? \
(e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define to_dpu_encoder_phys_vid(x) \
container_of(x, struct dpu_encoder_phys_vid, base)
static bool dpu_encoder_phys_vid_is_master(
struct dpu_encoder_phys *phys_enc)
{
bool ret = false;
if (phys_enc->split_role != ENC_ROLE_SLAVE)
ret = true;
return ret;
}
static void drm_mode_to_intf_timing_params(
const struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct dpu_hw_intf_timing_params *timing)
{
memset(timing, 0, sizeof(*timing));
if ((mode->htotal < mode->hsync_end)
|| (mode->hsync_start < mode->hdisplay)
|| (mode->vtotal < mode->vsync_end)
|| (mode->vsync_start < mode->vdisplay)
|| (mode->hsync_end < mode->hsync_start)
|| (mode->vsync_end < mode->vsync_start)) {
DPU_ERROR(
"invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
mode->hsync_start, mode->hsync_end,
mode->htotal, mode->hdisplay);
DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
mode->vsync_start, mode->vsync_end,
mode->vtotal, mode->vdisplay);
return;
}
/*
* https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
* Active Region Front Porch Sync Back Porch
* <-----------------><------------><-----><----------->
* <- [hv]display --->
* <--------- [hv]sync_start ------>
* <----------------- [hv]sync_end ------->
* <---------------------------- [hv]total ------------->
*/
timing->width = mode->hdisplay; /* active width */
timing->height = mode->vdisplay; /* active height */
timing->xres = timing->width;
timing->yres = timing->height;
timing->h_back_porch = mode->htotal - mode->hsync_end;
timing->h_front_porch = mode->hsync_start - mode->hdisplay;
timing->v_back_porch = mode->vtotal - mode->vsync_end;
timing->v_front_porch = mode->vsync_start - mode->vdisplay;
timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
timing->border_clr = 0;
timing->underflow_clr = 0xff;
timing->hsync_skew = mode->hskew;
/* DSI controller cannot handle active-low sync signals. */
if (phys_enc->hw_intf->cap->type == INTF_DSI) {
timing->hsync_polarity = 0;
timing->vsync_polarity = 0;
}
/* for DP/EDP, Shift timings to align it to bottom right */
if (phys_enc->hw_intf->cap->type == INTF_DP) {
timing->h_back_porch += timing->h_front_porch;
timing->h_front_porch = 0;
timing->v_back_porch += timing->v_front_porch;
timing->v_front_porch = 0;
}
timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
/*
* for DP, divide the horizonal parameters by 2 when
* widebus is enabled
*/
if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
timing->width = timing->width >> 1;
timing->xres = timing->xres >> 1;
timing->h_back_porch = timing->h_back_porch >> 1;
timing->h_front_porch = timing->h_front_porch >> 1;
timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
}
}
static u32 get_horizontal_total(const struct dpu_hw_intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
timing->h_back_porch + timing->h_front_porch +
timing->hsync_pulse_width;
return active + inactive;
}
static u32 get_vertical_total(const struct dpu_hw_intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
timing->v_back_porch + timing->v_front_porch +
timing->vsync_pulse_width;
return active + inactive;
}
/*
* programmable_fetch_get_num_lines:
* Number of fetch lines in vertical front porch
* @timing: Pointer to the intf timing information for the requested mode
*
* Returns the number of fetch lines in vertical front porch at which mdp
* can start fetching the next frame.
*
* Number of needed prefetch lines is anything that cannot be absorbed in the
* start of frame time (back porch + vsync pulse width).
*
* Some panels have very large VFP, however we only need a total number of
* lines based on the chip worst case latencies.
*/
static u32 programmable_fetch_get_num_lines(
struct dpu_encoder_phys *phys_enc,
const struct dpu_hw_intf_timing_params *timing)
{
u32 worst_case_needed_lines =
phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
u32 start_of_frame_lines =
timing->v_back_porch + timing->vsync_pulse_width;
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
u32 actual_vfp_lines = 0;
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
DPU_DEBUG_VIDENC(phys_enc,
"prog fetch is not needed, large vbp+vsw\n");
actual_vfp_lines = 0;
} else if (timing->v_front_porch < needed_vfp_lines) {
/* Warn fetch needed, but not enough porch in panel config */
pr_warn_once
("low vbp+vfp may lead to perf issues in some cases\n");
DPU_DEBUG_VIDENC(phys_enc,
"less vfp than fetch req, using entire vfp\n");
actual_vfp_lines = timing->v_front_porch;
} else {
DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
actual_vfp_lines = needed_vfp_lines;
}
DPU_DEBUG_VIDENC(phys_enc,
"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
timing->v_front_porch, timing->v_back_porch,
timing->vsync_pulse_width);
DPU_DEBUG_VIDENC(phys_enc,
"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
return actual_vfp_lines;
}
/*
* programmable_fetch_config: Programs HW to prefetch lines by offsetting
* the start of fetch into the vertical front porch for cases where the
* vsync pulse width and vertical back porch time is insufficient
*
* Gets # of lines to pre-fetch, then calculate VSYNC counter value.
* HW layer requires VSYNC counter of first pixel of tgt VFP line.
*
* @timing: Pointer to the intf timing information for the requested mode
*/
static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
const struct dpu_hw_intf_timing_params *timing)
{
struct dpu_hw_intf_prog_fetch f = { 0 };
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
u32 vert_total = 0;
u32 vfp_fetch_start_vsync_counter = 0;
unsigned long lock_flags;
if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
return;
vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
if (vfp_fetch_lines) {
vert_total = get_vertical_total(timing);
horiz_total = get_horizontal_total(timing);
vfp_fetch_start_vsync_counter =
(vert_total - vfp_fetch_lines) * horiz_total + 1;
f.enable = 1;
f.fetch_start = vfp_fetch_start_vsync_counter;
}
DPU_DEBUG_VIDENC(phys_enc,
"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
vfp_fetch_lines, vfp_fetch_start_vsync_counter);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
}
static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_encoder_phys *phys_enc)
{
struct drm_display_mode mode;
struct dpu_hw_intf_timing_params timing_params = { 0 };
const struct dpu_format *fmt = NULL;
u32 fmt_fourcc = DRM_FORMAT_RGB888;
unsigned long lock_flags;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
drm_mode_init(&mode, &phys_enc->cached_mode);
if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
return;
}
if (!phys_enc->hw_intf->ops.setup_timing_gen) {
DPU_ERROR("timing engine setup is not supported\n");
return;
}
DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
if (phys_enc->split_role != ENC_ROLE_SOLO) {
mode.hdisplay >>= 1;
mode.htotal >>= 1;
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
DPU_DEBUG_VIDENC(phys_enc,
"split_role %d, halve horizontal %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
mode.hsync_start, mode.hsync_end);
}
drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
fmt = dpu_get_dpu_format(fmt_fourcc);
DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
intf_cfg.intf = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
if (phys_enc->hw_intf->ops.bind_pingpong_blk)
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
phys_enc->hw_pp->idx);
if (phys_enc->hw_pp->merge_3d)
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, intf_cfg.mode_3d);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
}
static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_hw_ctl *hw_ctl;
unsigned long lock_flags;
u32 flush_register = 0;
hw_ctl = phys_enc->hw_ctl;
DPU_ATRACE_BEGIN("vblank_irq");
dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
atomic_read(&phys_enc->pending_kickoff_cnt);
/*
* only decrement the pending flush count if we've actually flushed
* hardware. due to sw irq latency, vblank may have already happened
* so we need to double-check with hw that it accepted the flush bits
*/
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
if (hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
DPU_ENCODER_FRAME_EVENT_DONE);
DPU_ATRACE_END("vblank_irq");
}
static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
}
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
return phys_enc->split_role != ENC_ROLE_SOLO;
}
static void dpu_encoder_phys_vid_atomic_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int dpu_encoder_phys_vid_control_vblank_irq(
struct dpu_encoder_phys *phys_enc,
bool enable)
{
int ret = 0;
int refcount;
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_vid_is_master(phys_enc))
goto end;
/* protect against negative */
if (!enable && refcount == 0) {
ret = -EINVAL;
goto end;
}
DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
atomic_read(&phys_enc->vblank_refcount));
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_VSYNC],
dpu_encoder_phys_vid_vblank_irq,
phys_enc);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_VSYNC]);
end:
if (ret) {
DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0, ret, enable,
refcount);
}
return ret;
}
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
ctl = phys_enc->hw_ctl;
DPU_DEBUG_VIDENC(phys_enc, "\n");
if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
/*
* For single flush cases (dual-ctl or pp-split), skip setting the
* flush bit for the slave intf, since both intfs use same ctl
* and HW will only flush the master.
*/
if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
skip_flush:
DPU_DEBUG_VIDENC(phys_enc,
"update pending flush ctl %d intf %d\n",
ctl->idx - CTL_0, phys_enc->hw_intf->idx);
atomic_set(&phys_enc->underrun_cnt, 0);
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
phys_enc->enable_state = DPU_ENC_ENABLING;
}
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
DPU_DEBUG_VIDENC(phys_enc, "\n");
kfree(phys_enc);
}
static int dpu_encoder_phys_vid_wait_for_vblank(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_wait_info wait_info;
int ret;
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
return 0;
}
/* Wait for kickoff to complete */
ret = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_VSYNC],
dpu_encoder_phys_vid_vblank_irq,
&wait_info);
if (ret == -ETIMEDOUT) {
dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
}
return ret;
}
static int dpu_encoder_phys_vid_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
int ret;
if (!hw_ctl)
return 0;
ret = wait_event_timeout(phys_enc->pending_kickoff_wq,
(hw_ctl->ops.get_flush_register(hw_ctl) == 0),
msecs_to_jiffies(50));
if (ret <= 0) {
DPU_ERROR("vblank timeout\n");
return -ETIMEDOUT;
}
return 0;
}
static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
int rc;
struct drm_encoder *drm_enc;
drm_enc = phys_enc->parent;
ctl = phys_enc->hw_ctl;
if (!ctl->ops.wait_reset_status)
return;
/*
* hw supports hardware initiated ctl reset, so before we kickoff a new
* frame, need to check and wait for hw initiated ctl reset completion
*/
rc = ctl->ops.wait_reset_status(ctl);
if (rc) {
DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
msm_disp_snapshot_state(drm_enc->dev);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_VSYNC]);
}
}
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
unsigned long lock_flags;
int ret;
struct dpu_hw_intf_status intf_status = {0};
if (!phys_enc->parent || !phys_enc->parent->dev) {
DPU_ERROR("invalid encoder/device\n");
return;
}
if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
return;
}
if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DPU_ERROR("already disabled\n");
return;
}
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
if (dpu_encoder_phys_vid_is_master(phys_enc))
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
if (dpu_encoder_phys_vid_is_master(phys_enc)) {
ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0, ret);
}
}
if (phys_enc->hw_intf && phys_enc->hw_intf->ops.get_status)
phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &intf_status);
/*
* Wait for a vsync if timing en status is on after timing engine
* is disabled.
*/
if (intf_status.is_en && dpu_encoder_phys_vid_is_master(phys_enc)) {
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0, ret);
}
}
dpu_encoder_helper_phys_cleanup(phys_enc);
phys_enc->enable_state = DPU_ENC_DISABLED;
}
static void dpu_encoder_phys_vid_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
unsigned long lock_flags;
/*
* Video mode must flush CTL before enabling timing engine
* Video encoders need to turn on their interfaces now
*/
if (phys_enc->enable_state == DPU_ENC_ENABLING) {
trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
}
static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
int ret;
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0,
enable,
atomic_read(&phys_enc->vblank_refcount));
if (enable) {
ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
if (WARN_ON(ret))
return;
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_UNDERRUN],
dpu_encoder_phys_vid_underrun_irq,
phys_enc);
} else {
dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_UNDERRUN]);
}
}
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
return -EINVAL;
return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
}
static int dpu_encoder_phys_vid_get_frame_count(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_intf_status s = {0};
u32 fetch_start = 0;
struct drm_display_mode mode;
drm_mode_init(&mode, &phys_enc->cached_mode);
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_status)
return -EINVAL;
phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &s);
if (s.is_prog_fetch_en && s.is_en) {
fetch_start = mode.vtotal - (mode.vsync_start - mode.vdisplay);
if ((s.line_count > fetch_start) &&
(s.line_count <= mode.vtotal))
return s.frame_count + 1;
}
return s.frame_count;
}
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_vid_is_master;
ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable;
ops->destroy = dpu_encoder_phys_vid_destroy;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
ops->irq_control = dpu_encoder_phys_vid_irq_control;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
}
struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
if (!p) {
DPU_ERROR("failed to create encoder due to invalid parameter\n");
return ERR_PTR(-EINVAL);
}
phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
if (!phys_enc) {
DPU_ERROR("failed to create encoder due to memory allocation error\n");
return ERR_PTR(-ENOMEM);
}
DPU_DEBUG_VIDENC(phys_enc, "\n");
dpu_encoder_phys_init(phys_enc, p);
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_VIDEO;
DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx);
return phys_enc;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_wb.h"
#include "dpu_hw_dspp.h"
#include "dpu_hw_merge3d.h"
#include "dpu_hw_dsc.h"
#include "dpu_encoder.h"
#include "dpu_trace.h"
static inline bool reserved_by_other(uint32_t *res_map, int idx,
uint32_t enc_id)
{
return res_map[idx] && res_map[idx] != enc_id;
}
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
* @topology: selected topology for the display
* @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
struct msm_display_topology topology;
};
int dpu_rm_destroy(struct dpu_rm *rm)
{
int i;
for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
struct dpu_hw_dspp *hw;
if (rm->dspp_blks[i]) {
hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
dpu_hw_dspp_destroy(hw);
}
}
for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
struct dpu_hw_pingpong *hw;
if (rm->pingpong_blks[i]) {
hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
dpu_hw_pingpong_destroy(hw);
}
}
for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
struct dpu_hw_merge_3d *hw;
if (rm->merge_3d_blks[i]) {
hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
dpu_hw_merge_3d_destroy(hw);
}
}
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
struct dpu_hw_mixer *hw;
if (rm->mixer_blks[i]) {
hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
dpu_hw_lm_destroy(hw);
}
}
for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
struct dpu_hw_ctl *hw;
if (rm->ctl_blks[i]) {
hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
dpu_hw_ctl_destroy(hw);
}
}
for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
dpu_hw_intf_destroy(rm->hw_intf[i]);
for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
struct dpu_hw_dsc *hw;
if (rm->dsc_blks[i]) {
hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
dpu_hw_dsc_destroy(hw);
}
}
for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
dpu_hw_wb_destroy(rm->hw_wb[i]);
for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++)
dpu_hw_sspp_destroy(rm->hw_sspp[i]);
return 0;
}
int dpu_rm_init(struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio)
{
int rc, i;
if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
hw = dpu_hw_lm_init(lm, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc);
goto fail;
}
rm->mixer_blks[lm->id - LM_0] = &hw->base;
}
for (i = 0; i < cat->merge_3d_count; i++) {
struct dpu_hw_merge_3d *hw;
const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
hw = dpu_hw_merge_3d_init(merge_3d, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed merge_3d object creation: err %d\n",
rc);
goto fail;
}
rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
}
for (i = 0; i < cat->pingpong_count; i++) {
struct dpu_hw_pingpong *hw;
const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
hw = dpu_hw_pingpong_init(pp, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed pingpong object creation: err %d\n",
rc);
goto fail;
}
if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
}
for (i = 0; i < cat->intf_count; i++) {
struct dpu_hw_intf *hw;
const struct dpu_intf_cfg *intf = &cat->intf[i];
hw = dpu_hw_intf_init(intf, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed intf object creation: err %d\n", rc);
goto fail;
}
rm->hw_intf[intf->id - INTF_0] = hw;
}
for (i = 0; i < cat->wb_count; i++) {
struct dpu_hw_wb *hw;
const struct dpu_wb_cfg *wb = &cat->wb[i];
hw = dpu_hw_wb_init(wb, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed wb object creation: err %d\n", rc);
goto fail;
}
rm->hw_wb[wb->id - WB_0] = hw;
}
for (i = 0; i < cat->ctl_count; i++) {
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc);
goto fail;
}
rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
}
for (i = 0; i < cat->dspp_count; i++) {
struct dpu_hw_dspp *hw;
const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
hw = dpu_hw_dspp_init(dspp, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed dspp object creation: err %d\n", rc);
goto fail;
}
rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
}
for (i = 0; i < cat->dsc_count; i++) {
struct dpu_hw_dsc *hw;
const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
hw = dpu_hw_dsc_init_1_2(dsc, mmio);
else
hw = dpu_hw_dsc_init(dsc, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed dsc object creation: err %d\n", rc);
goto fail;
}
rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
}
for (i = 0; i < cat->sspp_count; i++) {
struct dpu_hw_sspp *hw;
const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
hw = dpu_hw_sspp_init(sspp, mmio, mdss_data);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed sspp object creation: err %d\n", rc);
goto fail;
}
rm->hw_sspp[sspp->id - SSPP_NONE] = hw;
}
return 0;
fail:
dpu_rm_destroy(rm);
return rc ? rc : -EFAULT;
}
static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
{
return top->num_intf > 1;
}
/**
* _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
* @rm: dpu resource manager handle
* @primary_idx: index of primary mixer in rm->mixer_blks[]
*/
static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
{
const struct dpu_lm_cfg *prim_lm_cfg;
prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX)
return prim_lm_cfg->lm_pair - LM_0;
return -EINVAL;
}
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
* @global_state: resources shared across multiple kms objects
* @enc_id: encoder id requesting for allocation
* @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
* if lm, and all other hardwired blocks connected to the lm (pp) is
* available and appropriate
* @pp_idx: output parameter, index of pingpong block attached to the layer
* mixer in rm->pingpong_blks[].
* @dspp_idx: output parameter, index of dspp block attached to the layer
* mixer in rm->dspp_blks[].
* @reqs: input parameter, rm requirements for HW blocks needed in the
* datapath.
* Return: true if lm matches all requirements, false otherwise
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
struct dpu_rm_requirements *reqs)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
/* Already reserved? */
if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false;
}
lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
idx = lm_cfg->pingpong - PINGPONG_0;
if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
return false;
}
if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
lm_cfg->pingpong);
return false;
}
*pp_idx = idx;
if (!reqs->topology.num_dspp)
return true;
idx = lm_cfg->dspp - DSPP_0;
if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
return false;
}
if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
lm_cfg->dspp);
return false;
}
*dspp_idx = idx;
return true;
}
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
struct dpu_rm_requirements *reqs)
{
int lm_idx[MAX_BLOCKS];
int pp_idx[MAX_BLOCKS];
int dspp_idx[MAX_BLOCKS] = {0};
int i, lm_count = 0;
if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
return -EINVAL;
}
/* Find a primary mixer */
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
lm_count < reqs->topology.num_lm; i++) {
if (!rm->mixer_blks[i])
continue;
lm_count = 0;
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
enc_id, i, &pp_idx[lm_count],
&dspp_idx[lm_count], reqs)) {
continue;
}
++lm_count;
/* Valid primary mixer found, find matching peers */
if (lm_count < reqs->topology.num_lm) {
int j = _dpu_rm_get_lm_peer(rm, i);
/* ignore the peer if there is an error or if the peer was already processed */
if (j < 0 || j < i)
continue;
if (!rm->mixer_blks[j])
continue;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
reqs)) {
continue;
}
lm_idx[lm_count] = j;
++lm_count;
}
}
if (lm_count != reqs->topology.num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
for (i = 0; i < lm_count; i++) {
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
global_state->dspp_to_enc_id[dspp_idx[i]] =
reqs->topology.num_dspp ? enc_id : 0;
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
}
return 0;
}
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
const struct msm_display_topology *top)
{
int ctl_idx[MAX_BLOCKS];
int i = 0, j, num_ctls;
bool needs_split_display;
/* each hw_intf needs its own hw_ctrl to program its control path */
num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
const struct dpu_hw_ctl *ctl;
unsigned long features;
bool has_split_display;
if (!rm->ctl_blks[j])
continue;
if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
continue;
ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
features = ctl->caps->features;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
if (needs_split_display != has_split_display)
continue;
ctl_idx[i] = j;
DPU_DEBUG("ctl %d match\n", j + CTL_0);
if (++i == num_ctls)
break;
}
if (i != num_ctls)
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
}
return 0;
}
static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc,
const struct msm_display_topology *top)
{
int num_dsc = top->num_dsc;
int i;
/* check if DSC required are allocated or not */
for (i = 0; i < num_dsc; i++) {
if (!rm->dsc_blks[i]) {
DPU_ERROR("DSC %d does not exist\n", i);
return -EIO;
}
if (global_state->dsc_to_enc_id[i]) {
DPU_ERROR("DSC %d is already allocated\n", i);
return -EIO;
}
}
for (i = 0; i < num_dsc; i++)
global_state->dsc_to_enc_id[i] = enc->base.id;
return 0;
}
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct dpu_rm_requirements *reqs)
{
int ret;
ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
&reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
if (ret)
return ret;
return ret;
}
static int _dpu_rm_populate_requirements(
struct drm_encoder *enc,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
reqs->topology = req_topology;
DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
reqs->topology.num_lm, reqs->topology.num_dsc,
reqs->topology.num_intf);
return 0;
}
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
uint32_t enc_id)
{
int i;
for (i = 0; i < cnt; i++) {
if (res_mapping[i] == enc_id)
res_mapping[i] = 0;
}
}
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc)
{
_dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
}
int dpu_rm_reserve(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
struct msm_display_topology topology)
{
struct dpu_rm_requirements reqs;
int ret;
/* Check if this is just a page-flip */
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
if (IS_ERR(global_state)) {
DPU_ERROR("failed to global state\n");
return PTR_ERR(global_state);
}
DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
enc->base.id, crtc_state->crtc->base.id);
ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
return ret;
}
ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
return ret;
}
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
{
struct dpu_hw_blk **hw_blks;
uint32_t *hw_to_enc_id;
int i, num_blks, max_blks;
switch (type) {
case DPU_HW_BLK_PINGPONG:
hw_blks = rm->pingpong_blks;
hw_to_enc_id = global_state->pingpong_to_enc_id;
max_blks = ARRAY_SIZE(rm->pingpong_blks);
break;
case DPU_HW_BLK_LM:
hw_blks = rm->mixer_blks;
hw_to_enc_id = global_state->mixer_to_enc_id;
max_blks = ARRAY_SIZE(rm->mixer_blks);
break;
case DPU_HW_BLK_CTL:
hw_blks = rm->ctl_blks;
hw_to_enc_id = global_state->ctl_to_enc_id;
max_blks = ARRAY_SIZE(rm->ctl_blks);
break;
case DPU_HW_BLK_DSPP:
hw_blks = rm->dspp_blks;
hw_to_enc_id = global_state->dspp_to_enc_id;
max_blks = ARRAY_SIZE(rm->dspp_blks);
break;
case DPU_HW_BLK_DSC:
hw_blks = rm->dsc_blks;
hw_to_enc_id = global_state->dsc_to_enc_id;
max_blks = ARRAY_SIZE(rm->dsc_blks);
break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
}
num_blks = 0;
for (i = 0; i < max_blks; i++) {
if (hw_to_enc_id[i] != enc_id)
continue;
if (num_blks == blks_size) {
DPU_ERROR("More than %d resources assigned to enc %d\n",
blks_size, enc_id);
break;
}
if (!hw_blks[i]) {
DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
type, enc_id);
break;
}
blks[num_blks++] = hw_blks[i];
}
return num_blks;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include "msm_drv.h"
#include "dpu_kms.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
/* using a file static variables for debugfs access */
static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
/* DPU_SCALER_QSEED3 */
#define QSEED3_HW_VERSION 0x00
#define QSEED3_OP_MODE 0x04
#define QSEED3_RGB2Y_COEFF 0x08
#define QSEED3_PHASE_INIT 0x0C
#define QSEED3_PHASE_STEP_Y_H 0x10
#define QSEED3_PHASE_STEP_Y_V 0x14
#define QSEED3_PHASE_STEP_UV_H 0x18
#define QSEED3_PHASE_STEP_UV_V 0x1C
#define QSEED3_PRELOAD 0x20
#define QSEED3_DE_SHARPEN 0x24
#define QSEED3_DE_SHARPEN_CTL 0x28
#define QSEED3_DE_SHAPE_CTL 0x2C
#define QSEED3_DE_THRESHOLD 0x30
#define QSEED3_DE_ADJUST_DATA_0 0x34
#define QSEED3_DE_ADJUST_DATA_1 0x38
#define QSEED3_DE_ADJUST_DATA_2 0x3C
#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
#define QSEED3_SRC_SIZE_UV 0x44
#define QSEED3_DST_SIZE 0x48
#define QSEED3_COEF_LUT_CTRL 0x4C
#define QSEED3_COEF_LUT_SWAP_BIT 0
#define QSEED3_COEF_LUT_DIR_BIT 1
#define QSEED3_COEF_LUT_Y_CIR_BIT 2
#define QSEED3_COEF_LUT_UV_CIR_BIT 3
#define QSEED3_COEF_LUT_Y_SEP_BIT 4
#define QSEED3_COEF_LUT_UV_SEP_BIT 5
#define QSEED3_BUFFER_CTRL 0x50
#define QSEED3_CLK_CTRL0 0x54
#define QSEED3_CLK_CTRL1 0x58
#define QSEED3_CLK_STATUS 0x5C
#define QSEED3_PHASE_INIT_Y_H 0x90
#define QSEED3_PHASE_INIT_Y_V 0x94
#define QSEED3_PHASE_INIT_UV_H 0x98
#define QSEED3_PHASE_INIT_UV_V 0x9C
#define QSEED3_COEF_LUT 0x100
#define QSEED3_FILTERS 5
#define QSEED3_LUT_REGIONS 4
#define QSEED3_CIRCULAR_LUTS 9
#define QSEED3_SEPARABLE_LUTS 10
#define QSEED3_LUT_SIZE 60
#define QSEED3_ENABLE 2
#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
#define QSEED3_CIR_LUT_SIZE \
(QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
#define QSEED3_SEP_LUT_SIZE \
(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
/* DPU_SCALER_QSEED3LITE */
#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4
#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5
#define QSEED3LITE_COEF_LUT_CTRL 0x4C
#define QSEED3LITE_COEF_LUT_SWAP_BIT 0
#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60
#define QSEED3LITE_FILTERS 2
#define QSEED3LITE_SEPARABLE_LUTS 10
#define QSEED3LITE_LUT_SIZE 33
#define QSEED3LITE_SEP_LUT_SIZE \
(QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
/* QOS_LUT */
#define QOS_DANGER_LUT 0x00
#define QOS_SAFE_LUT 0x04
#define QOS_CREQ_LUT 0x08
#define QOS_QOS_CTRL 0x0C
#define QOS_CREQ_LUT_0 0x14
#define QOS_CREQ_LUT_1 0x18
/* QOS_QOS_CTRL */
#define QOS_QOS_CTRL_DANGER_SAFE_EN BIT(0)
#define QOS_QOS_CTRL_DANGER_VBLANK_MASK GENMASK(5, 4)
#define QOS_QOS_CTRL_VBLANK_EN BIT(16)
#define QOS_QOS_CTRL_CREQ_VBLANK_MASK GENMASK(21, 20)
void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
u32 reg_off,
u32 val,
const char *name)
{
/* don't need to mutex protect this */
if (c->log_mask & dpu_hw_util_log_mask)
DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
name, reg_off, val);
writel_relaxed(val, c->blk_addr + reg_off);
}
int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
{
return readl_relaxed(c->blk_addr + reg_off);
}
u32 *dpu_hw_util_get_log_mask_ptr(void)
{
return &dpu_hw_util_log_mask;
}
static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
{
int i, j, filter;
int config_lut = 0x0;
unsigned long lut_flags;
u32 lut_addr, lut_offset, lut_len;
u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
};
lut_flags = (unsigned long) scaler3_cfg->lut_flag;
if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
lut[0] = scaler3_cfg->dir_lut;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
lut[1] = scaler3_cfg->cir_lut +
scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
lut[2] = scaler3_cfg->cir_lut +
scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
lut[3] = scaler3_cfg->sep_lut +
scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
lut[4] = scaler3_cfg->sep_lut +
scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
config_lut = 1;
}
if (config_lut) {
for (filter = 0; filter < QSEED3_FILTERS; filter++) {
if (!lut[filter])
continue;
lut_offset = 0;
for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
lut_addr = QSEED3_COEF_LUT + offset
+ off_tbl[filter][i][1];
lut_len = off_tbl[filter][i][0] << 2;
for (j = 0; j < lut_len; j++) {
DPU_REG_WRITE(c,
lut_addr,
(lut[filter])[lut_offset++]);
lut_addr += 4;
}
}
}
}
if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
}
static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
{
int j, filter;
int config_lut = 0x0;
unsigned long lut_flags;
u32 lut_addr, lut_offset;
u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 };
DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight);
if (!scaler3_cfg->sep_lut)
return;
lut_flags = (unsigned long) scaler3_cfg->lut_flag;
if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
(scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
lut[0] = scaler3_cfg->sep_lut +
scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
(scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
(scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
lut[1] = scaler3_cfg->sep_lut +
scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE;
config_lut = 1;
}
if (config_lut) {
for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
if (!lut[filter])
continue;
lut_offset = 0;
lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter];
for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) {
DPU_REG_WRITE(c,
lut_addr,
(lut[filter])[lut_offset++]);
lut_addr += 4;
}
}
}
if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
}
static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
{
u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
u32 adjust_a, adjust_b, adjust_c;
if (!de_cfg->enable)
return;
sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
((de_cfg->sharpen_level2 & 0x1FF) << 16);
sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
((de_cfg->prec_shift & 0x7) << 13) |
((de_cfg->clip & 0x7) << 16);
shape_ctl = (de_cfg->thr_quiet & 0xFF) |
((de_cfg->thr_dieout & 0x3FF) << 16);
de_thr = (de_cfg->thr_low & 0x3FF) |
((de_cfg->thr_high & 0x3FF) << 16);
adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
((de_cfg->adjust_a[1] & 0x3FF) << 10) |
((de_cfg->adjust_a[2] & 0x3FF) << 20);
adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
((de_cfg->adjust_b[1] & 0x3FF) << 10) |
((de_cfg->adjust_b[2] & 0x3FF) << 20);
adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
((de_cfg->adjust_c[1] & 0x3FF) << 10) |
((de_cfg->adjust_c[2] & 0x3FF) << 20);
DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
}
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg,
u32 scaler_offset, u32 scaler_version,
const struct dpu_format *format)
{
u32 op_mode = 0;
u32 phase_init, preload, src_y_rgb, src_uv, dst;
if (!scaler3_cfg->enable)
goto end;
op_mode |= BIT(0);
op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
if (format && DPU_FORMAT_IS_YUV(format)) {
op_mode |= BIT(12);
op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
}
op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
preload =
((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
((scaler3_cfg->preload_y[1] & 0x7F) << 24);
src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
dst = (scaler3_cfg->dst_width & 0x1FFFF) |
((scaler3_cfg->dst_height & 0x1FFFF) << 16);
if (scaler3_cfg->de.enable) {
_dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
op_mode |= BIT(8);
}
if (scaler3_cfg->lut_flag) {
if (scaler_version < 0x2004)
_dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset);
else
_dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset);
}
if (scaler_version == 0x1002) {
phase_init =
((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
} else {
DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
}
DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
end:
if (format && !DPU_FORMAT_IS_DX(format))
op_mode |= BIT(14);
if (format && format->alpha_enable) {
op_mode |= BIT(10);
if (scaler_version == 0x1002)
op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
else
op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
}
DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
}
u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
u32 scaler_offset)
{
return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
}
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10)
{
static const u32 matrix_shift = 7;
u32 clamp_shift = csc10 ? 16 : 8;
u32 val;
/* matrix coeff - convert S15.16 to S4.9 */
val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
(((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
DPU_REG_WRITE(c, csc_reg_off, val);
val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
(((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
(((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
(((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
/* Pre clamp */
val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
/* Post clamp */
val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
/* Pre-Bias */
DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
/* Post-Bias */
DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
}
/**
* _dpu_hw_get_qos_lut - get LUT mapping based on fill level
* @tbl: Pointer to LUT table
* @total_fl: fill level
* Return: LUT setting corresponding to the fill level
*/
u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
u32 total_fl)
{
int i;
if (!tbl || !tbl->nentry || !tbl->entries)
return 0;
for (i = 0; i < tbl->nentry; i++)
if (total_fl <= tbl->entries[i].fl)
return tbl->entries[i].lut;
/* if last fl is zero, use as default */
if (!tbl->entries[i-1].fl)
return tbl->entries[i-1].lut;
return 0;
}
void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
bool qos_8lvl,
const struct dpu_hw_qos_cfg *cfg)
{
DPU_REG_WRITE(c, offset + QOS_DANGER_LUT, cfg->danger_lut);
DPU_REG_WRITE(c, offset + QOS_SAFE_LUT, cfg->safe_lut);
if (qos_8lvl) {
DPU_REG_WRITE(c, offset + QOS_CREQ_LUT_0, cfg->creq_lut);
DPU_REG_WRITE(c, offset + QOS_CREQ_LUT_1, cfg->creq_lut >> 32);
} else {
DPU_REG_WRITE(c, offset + QOS_CREQ_LUT, cfg->creq_lut);
}
DPU_REG_WRITE(c, offset + QOS_QOS_CTRL,
cfg->danger_safe_en ? QOS_QOS_CTRL_DANGER_SAFE_EN : 0);
}
void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
u32 misr_ctrl_offset,
bool enable, u32 frame_count)
{
u32 config = 0;
DPU_REG_WRITE(c, misr_ctrl_offset, MISR_CTRL_STATUS_CLEAR);
/* Clear old MISR value (in case it's read before a new value is calculated)*/
wmb();
if (enable) {
config = (frame_count & MISR_FRAME_COUNT_MASK) |
MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK;
DPU_REG_WRITE(c, misr_ctrl_offset, config);
} else {
DPU_REG_WRITE(c, misr_ctrl_offset, 0);
}
}
int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
u32 misr_ctrl_offset,
u32 misr_signature_offset,
u32 *misr_value)
{
u32 ctrl = 0;
if (!misr_value)
return -EINVAL;
ctrl = DPU_REG_READ(c, misr_ctrl_offset);
if (!(ctrl & MISR_CTRL_ENABLE))
return -ENODATA;
if (!(ctrl & MISR_CTRL_STATUS))
return -EINVAL;
*misr_value = DPU_REG_READ(c, misr_signature_offset);
return 0;
}
#define CDP_ENABLE BIT(0)
#define CDP_UBWC_META_ENABLE BIT(1)
#define CDP_TILE_AMORTIZE_ENABLE BIT(2)
#define CDP_PRELOAD_AHEAD_64 BIT(3)
void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
const struct dpu_format *fmt, bool enable)
{
u32 cdp_cntl = CDP_PRELOAD_AHEAD_64;
if (enable)
cdp_cntl |= CDP_ENABLE;
if (DPU_FORMAT_IS_UBWC(fmt))
cdp_cntl |= CDP_UBWC_META_ENABLE;
if (DPU_FORMAT_IS_UBWC(fmt) ||
DPU_FORMAT_IS_TILE(fmt))
cdp_cntl |= CDP_TILE_AMORTIZE_ENABLE;
DPU_REG_WRITE(c, offset, cdp_cntl);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/iopoll.h>
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_pingpong.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
#define PP_TEAR_CHECK_EN 0x000
#define PP_SYNC_CONFIG_VSYNC 0x004
#define PP_SYNC_CONFIG_HEIGHT 0x008
#define PP_SYNC_WRCOUNT 0x00C
#define PP_VSYNC_INIT_VAL 0x010
#define PP_INT_COUNT_VAL 0x014
#define PP_SYNC_THRESH 0x018
#define PP_START_POS 0x01C
#define PP_RD_PTR_IRQ 0x020
#define PP_WR_PTR_IRQ 0x024
#define PP_OUT_LINE_COUNT 0x028
#define PP_LINE_COUNT 0x02C
#define PP_AUTOREFRESH_CONFIG 0x030
#define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
#define PP_DSC_MODE 0x0a0
#define PP_DCE_DATA_IN_SWAP 0x0ac
#define PP_DCE_DATA_OUT_SWAP 0x0c8
#define PP_DITHER_EN 0x000
#define PP_DITHER_BITDEPTH 0x004
#define PP_DITHER_MATRIX 0x008
#define DITHER_DEPTH_MAP_INDEX 9
static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
0, 0, 0, 0, 0, 0, 0, 1, 2
};
static void dpu_hw_pp_setup_dither(struct dpu_hw_pingpong *pp,
struct dpu_hw_dither_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 i, base, data = 0;
c = &pp->hw;
base = pp->caps->sblk->dither.base;
if (!cfg) {
DPU_REG_WRITE(c, base + PP_DITHER_EN, 0);
return;
}
data = dither_depth_map[cfg->c0_bitdepth] & REG_MASK(2);
data |= (dither_depth_map[cfg->c1_bitdepth] & REG_MASK(2)) << 2;
data |= (dither_depth_map[cfg->c2_bitdepth] & REG_MASK(2)) << 4;
data |= (dither_depth_map[cfg->c3_bitdepth] & REG_MASK(2)) << 6;
data |= (cfg->temporal_en) ? (1 << 8) : 0;
DPU_REG_WRITE(c, base + PP_DITHER_BITDEPTH, data);
for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) {
data = (cfg->matrix[i] & REG_MASK(4)) |
((cfg->matrix[i + 1] & REG_MASK(4)) << 4) |
((cfg->matrix[i + 2] & REG_MASK(4)) << 8) |
((cfg->matrix[i + 3] & REG_MASK(4)) << 12);
DPU_REG_WRITE(c, base + PP_DITHER_MATRIX + i, data);
}
DPU_REG_WRITE(c, base + PP_DITHER_EN, 1);
}
static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp,
struct dpu_hw_tear_check *te)
{
struct dpu_hw_blk_reg_map *c;
int cfg;
if (!pp || !te)
return -EINVAL;
c = &pp->hw;
cfg = BIT(19); /*VSYNC_COUNTER_EN */
if (te->hw_vsync_mode)
cfg |= BIT(20);
cfg |= te->vsync_count;
DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
DPU_REG_WRITE(c, PP_SYNC_THRESH,
((te->sync_threshold_continue << 16) |
te->sync_threshold_start));
DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
(te->start_pos + te->sync_threshold_start + 1));
DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, 1);
return 0;
}
static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp,
u32 frame_count, bool enable)
{
DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG,
enable ? (BIT(31) | frame_count) : 0);
}
/*
* dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW
* @pp: DPU pingpong structure
* @frame_count: Used to return the current frame count from hw
*
* Returns: True if autorefresh enabled, false if disabled.
*/
static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
u32 *frame_count)
{
u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG);
if (frame_count != NULL)
*frame_count = val & 0xffff;
return !!((val & BIT(31)) >> 31);
}
static int dpu_hw_pp_disable_te(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *c;
if (!pp)
return -EINVAL;
c = &pp->hw;
DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, 0);
return 0;
}
static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
bool enable_external_te)
{
struct dpu_hw_blk_reg_map *c = &pp->hw;
u32 cfg;
int orig;
if (!pp)
return -EINVAL;
c = &pp->hw;
cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
orig = (bool)(cfg & BIT(20));
if (enable_external_te)
cfg |= BIT(20);
else
cfg &= ~BIT(20);
DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
return orig;
}
static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
struct dpu_hw_pp_vsync_info *info)
{
struct dpu_hw_blk_reg_map *c;
u32 val;
if (!pp || !info)
return -EINVAL;
c = &pp->hw;
val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
info->rd_ptr_init_val = val & 0xffff;
val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
info->rd_ptr_line_count = val & 0xffff;
val = DPU_REG_READ(c, PP_LINE_COUNT);
info->wr_ptr_line_count = val & 0xffff;
return 0;
}
static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *c = &pp->hw;
u32 height, init;
u32 line = 0xFFFF;
if (!pp)
return 0;
c = &pp->hw;
init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
if (height < init)
return line;
line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
if (line < init)
line += (0xFFFF - init);
else
line -= init;
return line;
}
static void dpu_hw_pp_disable_autorefresh(struct dpu_hw_pingpong *pp,
uint32_t encoder_id, u16 vdisplay)
{
struct dpu_hw_pp_vsync_info info;
int trial = 0;
/* If autorefresh is already disabled, we have nothing to do */
if (!dpu_hw_pp_get_autorefresh_config(pp, NULL))
return;
/*
* If autorefresh is enabled, disable it and make sure it is safe to
* proceed with current frame commit/push. Sequence followed is,
* 1. Disable TE
* 2. Disable autorefresh config
* 4. Poll for frame transfer ongoing to be false
* 5. Enable TE back
*/
dpu_hw_pp_connect_external_te(pp, false);
dpu_hw_pp_setup_autorefresh_config(pp, 0, false);
do {
udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
DPU_ERROR("enc%d pp%d disable autorefresh failed\n",
encoder_id, pp->idx - PINGPONG_0);
break;
}
trial++;
dpu_hw_pp_get_vsync_info(pp, &info);
} while (info.wr_ptr_line_count > 0 &&
info.wr_ptr_line_count < vdisplay);
dpu_hw_pp_connect_external_te(pp, true);
DPU_DEBUG("enc%d pp%d disabled autorefresh\n",
encoder_id, pp->idx - PINGPONG_0);
}
static int dpu_hw_pp_dsc_enable(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *c = &pp->hw;
DPU_REG_WRITE(c, PP_DSC_MODE, 1);
return 0;
}
static void dpu_hw_pp_dsc_disable(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *c = &pp->hw;
DPU_REG_WRITE(c, PP_DSC_MODE, 0);
}
static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *pp_c = &pp->hw;
int data;
data = DPU_REG_READ(pp_c, PP_DCE_DATA_OUT_SWAP);
data |= BIT(18); /* endian flip */
DPU_REG_WRITE(pp_c, PP_DCE_DATA_OUT_SWAP, data);
return 0;
}
static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
unsigned long features)
{
if (test_bit(DPU_PINGPONG_TE, &features)) {
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
c->ops.disable_tearcheck = dpu_hw_pp_disable_te;
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
}
if (test_bit(DPU_PINGPONG_DSC, &features)) {
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
}
if (test_bit(DPU_PINGPONG_DITHER, &features))
c->ops.setup_dither = dpu_hw_pp_setup_dither;
};
struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_pingpong *c;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_PINGPONG;
c->idx = cfg->id;
c->caps = cfg;
_setup_pingpong_ops(c, c->caps->features);
return c;
}
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
{
kfree(pp);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
#define LM_OP_MODE 0x00
#define LM_OUT_SIZE 0x04
#define LM_BORDER_COLOR_0 0x08
#define LM_BORDER_COLOR_1 0x010
/* These register are offset to mixer base + stage base */
#define LM_BLEND0_OP 0x00
#define LM_BLEND0_CONST_ALPHA 0x04
#define LM_FG_COLOR_FILL_COLOR_0 0x08
#define LM_FG_COLOR_FILL_COLOR_1 0x0C
#define LM_FG_COLOR_FILL_SIZE 0x10
#define LM_FG_COLOR_FILL_XY 0x14
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
#define LM_MISR_CTRL 0x310
#define LM_MISR_SIGNATURE 0x314
/**
* _stage_offset(): returns the relative offset of the blend registers
* for the stage to be setup
* @ctx: mixer ctx contains the mixer to be programmed
* @stage: stage index to setup
*/
static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
{
const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
return sblk->blendstage_base[stage - DPU_STAGE_0];
return -EINVAL;
}
static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
struct dpu_hw_mixer_cfg *mixer)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 outsize;
u32 op_mode;
op_mode = DPU_REG_READ(c, LM_OP_MODE);
outsize = mixer->out_height << 16 | mixer->out_width;
DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
/* SPLIT_LEFT_RIGHT */
if (mixer->right_mixer)
op_mode |= BIT(31);
else
op_mode &= ~BIT(31);
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
if (border_en) {
DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
(color->color_0 & 0xFFF) |
((color->color_1 & 0xFFF) << 0x10));
DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
(color->color_2 & 0xFFF) |
((color->color_3 & 0xFFF) << 0x10));
}
}
static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count)
{
dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, enable, frame_count);
}
static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
{
return dpu_hw_collect_misr(&ctx->hw, LM_MISR_CTRL, LM_MISR_SIGNATURE, misr_value);
}
static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int stage_off;
u32 const_alpha;
if (stage == DPU_STAGE_BASE)
return;
stage_off = _stage_offset(ctx, stage);
if (WARN_ON(stage_off < 0))
return;
const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
}
static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int stage_off;
if (stage == DPU_STAGE_BASE)
return;
stage_off = _stage_offset(ctx, stage);
if (WARN_ON(stage_off < 0))
return;
DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
}
static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
uint32_t mixer_op_mode)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int op_mode;
/* read the existing op_mode configuration */
op_mode = DPU_REG_READ(c, LM_OP_MODE);
op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
unsigned long features)
{
ops->setup_mixer_out = dpu_hw_lm_setup_out;
if (test_bit(DPU_MIXER_COMBINED_ALPHA, &features))
ops->setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
ops->setup_misr = dpu_hw_lm_setup_misr;
ops->collect_misr = dpu_hw_lm_collect_misr;
}
struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_mixer *c;
if (cfg->pingpong == PINGPONG_NONE) {
DPU_DEBUG("skip mixer %d without pingpong\n", cfg->id);
return NULL;
}
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_LM;
/* Assign ops */
c->idx = cfg->id;
c->cap = cfg;
_setup_mixer_ops(&c->ops, c->cap->features);
return c;
}
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
{
kfree(lm);
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <drm/drm_framebuffer.h>
#include "dpu_encoder_phys.h"
#include "dpu_formats.h"
#include "dpu_hw_top.h"
#include "dpu_hw_wb.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_merge3d.h"
#include "dpu_hw_interrupts.h"
#include "dpu_core_irq.h"
#include "dpu_vbif.h"
#include "dpu_crtc.h"
#include "disp/msm_disp_snapshot.h"
#define to_dpu_encoder_phys_wb(x) \
container_of(x, struct dpu_encoder_phys_wb, base)
/**
* dpu_encoder_phys_wb_is_master - report wb always as master encoder
* @phys_enc: Pointer to physical encoder
*/
static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
{
/* there is only one physical enc for dpu_writeback */
return true;
}
/**
* dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_set_ot_limit(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct dpu_vbif_set_ot_params ot_params;
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = hw_wb->caps->xin_id;
ot_params.num = hw_wb->idx - WB_0;
ot_params.width = phys_enc->cached_mode.hdisplay;
ot_params.height = phys_enc->cached_mode.vdisplay;
ot_params.is_wfd = true;
ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
ot_params.vbif_idx = hw_wb->caps->vbif_idx;
ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
ot_params.rd = false;
dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params);
}
/**
* dpu_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_set_qos_remap(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_vbif_set_qos_params qos_params;
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
DPU_ERROR("invalid arguments\n");
return;
}
if (!phys_enc->hw_wb || !phys_enc->hw_wb->caps) {
DPU_ERROR("invalid writeback hardware\n");
return;
}
hw_wb = phys_enc->hw_wb;
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = hw_wb->caps->vbif_idx;
qos_params.xin_id = hw_wb->caps->xin_id;
qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
qos_params.num = hw_wb->idx - WB_0;
qos_params.is_rt = false;
DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
qos_params.num,
qos_params.vbif_idx,
qos_params.xin_id, qos_params.is_rt);
dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params);
}
/**
* dpu_encoder_phys_wb_set_qos - set QoS/danger/safe LUTs for writeback
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_qos_cfg qos_cfg;
const struct dpu_mdss_cfg *catalog;
const struct dpu_qos_lut_tbl *qos_lut_tb;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid parameter(s)\n");
return;
}
catalog = phys_enc->dpu_kms->catalog;
hw_wb = phys_enc->hw_wb;
memset(&qos_cfg, 0, sizeof(struct dpu_hw_qos_cfg));
qos_cfg.danger_safe_en = true;
qos_cfg.danger_lut =
catalog->perf->danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
qos_cfg.safe_lut = catalog->perf->safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
qos_lut_tb = &catalog->perf->qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
qos_cfg.creq_lut = _dpu_hw_get_qos_lut(qos_lut_tb, 0);
if (hw_wb->ops.setup_qos_lut)
hw_wb->ops.setup_qos_lut(hw_wb, &qos_cfg);
}
/**
* dpu_encoder_phys_wb_setup_fb - setup output framebuffer
* @phys_enc: Pointer to physical encoder
* @fb: Pointer to output framebuffer
*/
static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
struct drm_framebuffer *fb)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb;
struct dpu_hw_wb_cfg *wb_cfg;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid encoder\n");
return;
}
hw_wb = phys_enc->hw_wb;
wb_cfg = &wb_enc->wb_cfg;
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->roi.x1 = 0;
wb_cfg->roi.x2 = phys_enc->cached_mode.hdisplay;
wb_cfg->roi.y1 = 0;
wb_cfg->roi.y2 = phys_enc->cached_mode.vdisplay;
if (hw_wb->ops.setup_roi)
hw_wb->ops.setup_roi(hw_wb, wb_cfg);
if (hw_wb->ops.setup_outformat)
hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
if (hw_wb->ops.setup_cdp) {
const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
hw_wb->ops.setup_cdp(hw_wb, wb_cfg->dest.format,
perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
}
if (hw_wb->ops.setup_outaddress)
hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
}
/**
* dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
* @phys_enc:Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_ctl *ctl;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
hw_wb = phys_enc->hw_wb;
ctl = phys_enc->hw_ctl;
if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
(phys_enc->hw_ctl &&
phys_enc->hw_ctl->ops.setup_intf_cfg)) {
struct dpu_hw_intf_cfg intf_cfg = {0};
struct dpu_hw_pingpong *hw_pp = phys_enc->hw_pp;
enum dpu_3d_blend_mode mode_3d;
mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.intf = DPU_NONE;
intf_cfg.wb = hw_wb->idx;
if (mode_3d && hw_pp && hw_pp->merge_3d)
intf_cfg.merge_3d = hw_pp->merge_3d->idx;
if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
mode_3d);
/* setup which pp blk will connect to this wb */
if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb,
phys_enc->hw_pp->idx);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
} else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
struct dpu_hw_intf_cfg intf_cfg = {0};
intf_cfg.intf = DPU_NONE;
intf_cfg.wb = hw_wb->idx;
intf_cfg.mode_3d =
dpu_encoder_helper_get_3d_blend_mode(phys_enc);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
}
}
/**
* dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
* @phys_enc: Pointer to physical encoder
* @crtc_state: Pointer to CRTC atomic state
* @conn_state: Pointer to connector atomic state
*/
static int dpu_encoder_phys_wb_atomic_check(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_framebuffer *fb;
const struct drm_display_mode *mode = &crtc_state->mode;
DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
phys_enc->hw_wb->idx, mode->name, mode->hdisplay, mode->vdisplay);
if (!conn_state || !conn_state->connector) {
DPU_ERROR("invalid connector state\n");
return -EINVAL;
} else if (conn_state->connector->status !=
connector_status_connected) {
DPU_ERROR("connector not connected %d\n",
conn_state->connector->status);
return -EINVAL;
}
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
fb = conn_state->writeback_job->fb;
DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
fb->width, fb->height);
if (fb->width != mode->hdisplay) {
DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
mode->hdisplay);
return -EINVAL;
} else if (fb->height != mode->vdisplay) {
DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
mode->vdisplay);
return -EINVAL;
} else if (fb->width > phys_enc->hw_wb->caps->maxlinewidth) {
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
fb->width, phys_enc->hw_wb->caps->maxlinewidth);
return -EINVAL;
}
return 0;
}
/**
* _dpu_encoder_phys_wb_update_flush - flush hardware update
* @phys_enc: Pointer to physical encoder
*/
static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
u32 pending_flush = 0;
if (!phys_enc)
return;
hw_wb = phys_enc->hw_wb;
hw_pp = phys_enc->hw_pp;
hw_ctl = phys_enc->hw_ctl;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
if (!hw_ctl) {
DPU_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
return;
}
if (hw_ctl->ops.update_pending_flush_wb)
hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
hw_pp->merge_3d->idx);
if (hw_ctl->ops.get_pending_flush)
pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl);
DPU_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n",
hw_ctl->idx - CTL_0, pending_flush,
hw_wb->idx - WB_0);
}
/**
* dpu_encoder_phys_wb_setup - setup writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_setup(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct drm_display_mode mode = phys_enc->cached_mode;
struct drm_framebuffer *fb = NULL;
DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
hw_wb->idx - WB_0, mode.name,
mode.hdisplay, mode.vdisplay);
dpu_encoder_phys_wb_set_ot_limit(phys_enc);
dpu_encoder_phys_wb_set_qos_remap(phys_enc);
dpu_encoder_phys_wb_set_qos(phys_enc);
dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
dpu_encoder_phys_wb_setup_cdp(phys_enc);
}
static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
unsigned long lock_flags;
u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
if (wb_enc->wb_conn)
drm_writeback_signal_completion(wb_enc->wb_conn, 0);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
}
/**
* dpu_encoder_phys_wb_done_irq - writeback interrupt handler
* @arg: Pointer to writeback encoder
* @irq_idx: interrupt index
*/
static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
{
_dpu_encoder_phys_wb_frame_done_helper(arg);
}
/**
* dpu_encoder_phys_wb_irq_ctrl - irq control of WB
* @phys: Pointer to physical encoder
* @enable: indicates enable or disable interrupts
*/
static void dpu_encoder_phys_wb_irq_ctrl(
struct dpu_encoder_phys *phys, bool enable)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
dpu_core_irq_register_callback(phys->dpu_kms,
phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys);
else if (!enable &&
atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
}
static void dpu_encoder_phys_wb_atomic_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
}
static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
wb_enc->wb_done_timeout_cnt++;
if (wb_enc->wb_done_timeout_cnt == 1)
msm_disp_snapshot_state(phys_enc->parent->dev);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
/* request a ctl reset before the next kickoff */
phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
if (wb_enc->wb_conn)
drm_writeback_signal_completion(wb_enc->wb_conn, 0);
dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
}
/**
* dpu_encoder_phys_wb_wait_for_commit_done - wait until request is committed
* @phys_enc: Pointer to physical encoder
*/
static int dpu_encoder_phys_wb_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
unsigned long ret;
struct dpu_encoder_wait_info wait_info;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
ret = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_WB_DONE],
dpu_encoder_phys_wb_done_irq, &wait_info);
if (ret == -ETIMEDOUT)
_dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
else if (!ret)
wb_enc->wb_done_timeout_cnt = 0;
return ret;
}
/**
* dpu_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
* @phys_enc: Pointer to physical encoder
* Returns: Zero on success
*/
static void dpu_encoder_phys_wb_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct drm_connector *drm_conn;
struct drm_connector_state *state;
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
if (!wb_enc->wb_conn || !wb_enc->wb_job) {
DPU_ERROR("invalid wb_conn or wb_job\n");
return;
}
drm_conn = &wb_enc->wb_conn->base;
state = drm_conn->state;
if (wb_enc->wb_conn && wb_enc->wb_job)
drm_writeback_queue_job(wb_enc->wb_conn, state);
dpu_encoder_phys_wb_setup(phys_enc);
_dpu_encoder_phys_wb_update_flush(phys_enc);
}
/**
* dpu_encoder_phys_wb_needs_single_flush - trigger flush processing
* @phys_enc: Pointer to physical encoder
*/
static bool dpu_encoder_phys_wb_needs_single_flush(struct dpu_encoder_phys *phys_enc)
{
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
return false;
}
/**
* dpu_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
}
/**
* dpu_encoder_phys_wb_enable - enable writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
{
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
/**
* dpu_encoder_phys_wb_disable - disable writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DPU_ERROR("encoder is already disabled\n");
return;
}
/* reset h/w before final flush */
if (phys_enc->hw_ctl->ops.clear_pending_flush)
phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
/*
* New CTL reset sequence from 5.0 MDP onwards.
* If has_3d_merge_reset is not set, legacy reset
* sequence is executed.
*
* Legacy reset sequence has not been implemented yet.
* Any target earlier than SM8150 will need it and when
* WB support is added to those targets will need to add
* the legacy teardown sequence as well.
*/
if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
dpu_encoder_helper_phys_cleanup(phys_enc);
phys_enc->enable_state = DPU_ENC_DISABLED;
}
/**
* dpu_encoder_phys_wb_destroy - destroy writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return;
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
kfree(phys_enc);
}
static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job)
{
const struct msm_format *format;
struct msm_gem_address_space *aspace;
struct dpu_hw_wb_cfg *wb_cfg;
int ret;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
if (!job->fb)
return;
wb_enc->wb_job = job;
wb_enc->wb_conn = job->connector;
aspace = phys_enc->dpu_kms->base.aspace;
wb_cfg = &wb_enc->wb_cfg;
memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
ret = msm_framebuffer_prepare(job->fb, aspace, false);
if (ret) {
DPU_ERROR("prep fb failed, %d\n", ret);
return;
}
format = msm_framebuffer_format(job->fb);
wb_cfg->dest.format = dpu_get_dpu_format_ext(
format->pixel_format, job->fb->modifier);
if (!wb_cfg->dest.format) {
/* this error should be detected during atomic_check */
DPU_ERROR("failed to get format %x\n", format->pixel_format);
return;
}
ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest);
if (ret) {
DPU_DEBUG("failed to populate layout %d\n", ret);
return;
}
wb_cfg->dest.width = job->fb->width;
wb_cfg->dest.height = job->fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) &&
(wb_cfg->dest.format->element[0] == C1_B_Cb))
swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_addr[1],
wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_addr[3]);
DPU_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
wb_cfg->dest.plane_pitch[0], wb_cfg->dest.plane_pitch[1],
wb_cfg->dest.plane_pitch[2], wb_cfg->dest.plane_pitch[3]);
}
static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct msm_gem_address_space *aspace;
if (!job->fb)
return;
aspace = phys_enc->dpu_kms->base.aspace;
msm_framebuffer_cleanup(job->fb, aspace, false);
wb_enc->wb_job = NULL;
wb_enc->wb_conn = NULL;
}
static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
if (wb_enc->wb_job)
return true;
else
return false;
}
/**
* dpu_encoder_phys_wb_init_ops - initialize writeback operations
* @ops: Pointer to encoder operation table
*/
static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_wb_is_master;
ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable;
ops->destroy = dpu_encoder_phys_wb_destroy;
ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_wb_needs_single_flush;
ops->trigger_start = dpu_encoder_helper_trigger_start;
ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job;
ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job;
ops->irq_control = dpu_encoder_phys_wb_irq_ctrl;
ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit;
}
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
* @p: Pointer to init info structure with initialization params
*/
struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_wb *wb_enc = NULL;
DPU_DEBUG("\n");
if (!p || !p->parent) {
DPU_ERROR("invalid params\n");
return ERR_PTR(-EINVAL);
}
wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
if (!wb_enc) {
DPU_ERROR("failed to allocate wb phys_enc enc\n");
return ERR_PTR(-ENOMEM);
}
phys_enc = &wb_enc->base;
dpu_encoder_phys_init(phys_enc, p);
dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_WB_LINE;
atomic_set(&wb_enc->wbirq_refcount, 0);
wb_enc->wb_done_timeout_cnt = 0;
DPU_DEBUG("Created dpu_encoder_phys for wb %d\n", phys_enc->hw_wb->idx);
return phys_enc;
}
| linux-master | drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_crtc.h>
#include <drm/drm_probe_helper.h>
#include "mdp5_kms.h"
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static void mdp5_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(mdp5_encoder);
}
static const struct drm_encoder_funcs mdp5_encoder_funcs = {
.destroy = mdp5_encoder_destroy,
};
static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
int intf = mdp5_encoder->intf->num;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
uint32_t format = 0x2100;
unsigned long flags;
mode = adjusted_mode;
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
ctrl_pol = 0;
/* DSI controller cannot handle active-low sync signals. */
if (mdp5_encoder->intf->type != INTF_DSI) {
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
}
/* probably need to get DATA_EN polarity from panel.. */
dtv_hsync_skew = 0; /* get this from panel? */
/* Get color format from panel, default is 8bpc */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
switch (connector->display_info.bpc) {
case 4:
format |= 0;
break;
case 5:
format |= 0x15;
break;
case 6:
format |= 0x2A;
break;
case 8:
default:
format |= 0x3F;
break;
}
break;
}
}
hsync_start_x = (mode->htotal - mode->hsync_start);
hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
vsync_period = mode->vtotal * mode->htotal;
vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
/*
* For edp only:
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
if (mdp5_encoder->intf->type == INTF_eDP) {
display_v_start += mode->htotal - mode->hsync_start;
display_v_end -= mode->hsync_start - mode->hdisplay;
}
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
MDP5_INTF_ACTIVE_HCTL_START(0) |
MDP5_INTF_ACTIVE_HCTL_END(0));
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
mdp5_crtc_set_pipeline(encoder->crtc);
}
static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
struct mdp5_interface *intf = mdp5_encoder->intf;
int intfn = mdp5_encoder->intf->num;
unsigned long flags;
if (WARN_ON(!mdp5_encoder->enabled))
return;
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
mdp5_encoder->enabled = false;
}
static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
struct mdp5_interface *intf = mdp5_encoder->intf;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
int intfn = intf->num;
unsigned long flags;
if (WARN_ON(mdp5_encoder->enabled))
return;
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
mdp5_encoder->enabled = true;
}
static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
else
mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode);
}
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
else
mdp5_vid_encoder_disable(encoder);
}
static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = mdp5_encoder->intf;
/* this isn't right I think */
struct drm_crtc_state *cstate = encoder->crtc->state;
mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode);
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_enable(encoder);
else
mdp5_vid_encoder_enable(encoder);
}
static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
struct mdp5_interface *intf = mdp5_encoder->intf;
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
/*
* This is a bit awkward, but we want to flush the CTL and hit the
* START bit at most once for an atomic update. In the non-full-
* modeset case, this is done from crtc->atomic_flush(), but that
* is too early in the case of full modeset, in which case we
* defer to encoder->enable(). But we need to *know* whether
* encoder->enable() will be called to do this:
*/
if (drm_atomic_crtc_needs_modeset(crtc_state))
mdp5_cstate->defer_start = true;
return 0;
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.disable = mdp5_encoder_disable,
.enable = mdp5_encoder_enable,
.atomic_check = mdp5_encoder_atomic_check,
};
int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf->num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
}
u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf->num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
}
int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
struct mdp5_kms *mdp5_kms;
struct device *dev;
int intf_num;
u32 data = 0;
if (!encoder || !slave_encoder)
return -EINVAL;
mdp5_kms = get_kms(encoder);
intf_num = mdp5_encoder->intf->num;
/* Switch slave encoder's TimingGen Sync mode,
* to use the master's enable signal for the slave encoder.
*/
if (intf_num == 1)
data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
else if (intf_num == 2)
data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
else
return -EINVAL;
dev = &mdp5_kms->pdev->dev;
/* Make sure clocks are on when connectors calling this function. */
pm_runtime_get_sync(dev);
/* Dumb Panel, Sync mode */
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
pm_runtime_put_sync(dev);
return 0;
}
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = mdp5_encoder->intf;
/* TODO: Expand this to set writeback modes too */
if (cmd_mode) {
WARN_ON(intf->type != INTF_DSI);
intf->mode = MDP5_INTF_DSI_MODE_COMMAND;
} else {
if (intf->type == INTF_DSI)
intf->mode = MDP5_INTF_DSI_MODE_VIDEO;
else
intf->mode = MDP5_INTF_MODE_NONE;
}
}
/* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf,
struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
int enc_type = (intf->type == INTF_DSI) ?
DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
int ret;
mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
if (!mdp5_encoder) {
ret = -ENOMEM;
goto fail;
}
encoder = &mdp5_encoder->base;
mdp5_encoder->ctl = ctl;
mdp5_encoder->intf = intf;
spin_lock_init(&mdp5_encoder->intf_lock);
drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
return encoder;
fail:
if (encoder)
mdp5_encoder_destroy(encoder);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*/
#include "mdp5_kms.h"
#include "mdp5_ctl.h"
/*
* CTL - MDP Control Pool Manager
*
* Controls are shared between all display interfaces.
*
* They are intended to be used for data path configuration.
* The top level register programming describes the complete data path for
* a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
*
* Hardware capabilities determine the number of concurrent data paths
*
* In certain use cases (high-resolution dual pipe), one single CTL can be
* shared across multiple CRTCs.
*/
#define CTL_STAT_BUSY 0x1
#define CTL_STAT_BOOKED 0x2
struct mdp5_ctl {
struct mdp5_ctl_manager *ctlm;
u32 id;
/* CTL status bitmask */
u32 status;
bool encoder_enabled;
/* pending flush_mask bits */
u32 flush_mask;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
u32 reg_offset;
/* when do CTL registers need to be flushed? (mask of trigger bits) */
u32 pending_ctl_trigger;
bool cursor_on;
/* True if the current CTL has FLUSH bits pending for single FLUSH. */
bool flush_pending;
struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
};
struct mdp5_ctl_manager {
struct drm_device *dev;
/* number of CTL / Layer Mixers in this hw config: */
u32 nlm;
u32 nctl;
/* to filter out non-present bits in the current hardware config */
u32 flush_hw_mask;
/* status for single FLUSH */
bool single_flush_supported;
u32 single_flush_pending_mask;
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
spinlock_t pool_lock;
struct mdp5_ctl ctls[MAX_CTL];
};
static inline
struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
{
struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static inline
void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
{
struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
mdp5_write(mdp5_kms, reg, data);
}
static inline
u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
{
struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
return mdp5_read(mdp5_kms, reg);
}
static void set_display_intf(struct mdp5_kms *mdp5_kms,
struct mdp5_interface *intf)
{
unsigned long flags;
u32 intf_sel;
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf->num) {
case 0:
intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
break;
case 1:
intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
break;
case 2:
intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
break;
case 3:
intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
break;
default:
BUG();
break;
}
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
}
static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
unsigned long flags;
struct mdp5_interface *intf = pipeline->intf;
u32 ctl_op = 0;
if (!mdp5_cfg_intf_is_virtual(intf->type))
ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
switch (intf->type) {
case INTF_DSI:
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
ctl_op |= MDP5_CTL_OP_CMD_MODE;
break;
case INTF_WB:
if (intf->mode == MDP5_INTF_WB_MODE_LINE)
ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
break;
default:
break;
}
if (pipeline->r_mixer)
ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
MDP5_CTL_OP_PACK_3D(1);
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
struct mdp5_interface *intf = pipeline->intf;
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
if (!mdp5_cfg_intf_is_virtual(intf->type))
set_display_intf(mdp5_kms, intf);
set_ctl_op(ctl, pipeline);
return 0;
}
static bool start_signal_needed(struct mdp5_ctl *ctl,
struct mdp5_pipeline *pipeline)
{
struct mdp5_interface *intf = pipeline->intf;
if (!ctl->encoder_enabled)
return false;
switch (intf->type) {
case INTF_WB:
return true;
case INTF_DSI:
return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
default:
return false;
}
}
/*
* send_start_signal() - Overlay Processor Start Signal
*
* For a given control operation (display pipeline), a START signal needs to be
* executed in order to kick off operation and activate all layers.
* e.g.: DSI command mode, Writeback
*/
static void send_start_signal(struct mdp5_ctl *ctl)
{
unsigned long flags;
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
/**
* mdp5_ctl_set_encoder_state() - set the encoder state
*
* @ctl: the CTL instance
* @pipeline: the encoder's INTF + MIXER configuration
* @enabled: true, when encoder is ready for data streaming; false, otherwise.
*
* Note:
* This encoder state is needed to trigger START signal (data path kickoff).
*/
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
struct mdp5_pipeline *pipeline,
bool enabled)
{
struct mdp5_interface *intf = pipeline->intf;
if (WARN_ON(!ctl))
return -EINVAL;
ctl->encoder_enabled = enabled;
DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
}
return 0;
}
/*
* Note:
* CTL registers need to be flushed after calling this function
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
int cursor_id, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
struct mdp5_hw_mixer *mixer = pipeline->mixer;
if (WARN_ON(!mixer)) {
DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
ctl->id);
return -EINVAL;
}
if (pipeline->r_mixer) {
DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
return -EINVAL;
}
spin_lock_irqsave(&ctl->hw_lock, flags);
blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
if (enable)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
else
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
ctl->cursor_on = enable;
spin_unlock_irqrestore(&ctl->hw_lock, flags);
ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
return 0;
}
static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
enum mdp_mixer_stage_id stage)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
case SSPP_CURSOR0:
case SSPP_CURSOR1:
default: return 0;
}
}
static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
enum mdp_mixer_stage_id stage)
{
if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
return 0;
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
default: return 0;
}
}
static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
{
unsigned long flags;
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
int i;
spin_lock_irqsave(&ctl->hw_lock, flags);
for (i = 0; i < ctl_mgr->nlm; i++) {
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
}
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
#define PIPE_LEFT 0
#define PIPE_RIGHT 1
int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
enum mdp5_pipe stage[][MAX_PIPE_STAGE],
enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
u32 stage_cnt, u32 ctl_blend_op_flags)
{
struct mdp5_hw_mixer *mixer = pipeline->mixer;
struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
unsigned long flags;
u32 blend_cfg = 0, blend_ext_cfg = 0;
u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
int i, start_stage;
mdp5_ctl_reset_blend_regs(ctl);
if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
start_stage = STAGE0;
blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
if (r_mixer)
r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
} else {
start_stage = STAGE_BASE;
}
for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
blend_cfg |=
mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
blend_ext_cfg |=
mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
if (r_mixer) {
r_blend_cfg |=
mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
r_blend_ext_cfg |=
mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
}
}
spin_lock_irqsave(&ctl->hw_lock, flags);
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
blend_ext_cfg);
if (r_mixer) {
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
r_blend_cfg);
ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
r_blend_ext_cfg);
}
spin_unlock_irqrestore(&ctl->hw_lock, flags);
ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
if (r_mixer)
ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
blend_cfg, blend_ext_cfg);
if (r_mixer)
DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
return 0;
}
u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
{
if (intf->type == INTF_WB)
return MDP5_CTL_FLUSH_WB;
switch (intf->num) {
case 0: return MDP5_CTL_FLUSH_TIMING_0;
case 1: return MDP5_CTL_FLUSH_TIMING_1;
case 2: return MDP5_CTL_FLUSH_TIMING_2;
case 3: return MDP5_CTL_FLUSH_TIMING_3;
default: return 0;
}
}
u32 mdp_ctl_flush_mask_cursor(int cursor_id)
{
switch (cursor_id) {
case 0: return MDP5_CTL_FLUSH_CURSOR_0;
case 1: return MDP5_CTL_FLUSH_CURSOR_1;
default: return 0;
}
}
u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
default: return 0;
}
}
u32 mdp_ctl_flush_mask_lm(int lm)
{
switch (lm) {
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
case 3: return MDP5_CTL_FLUSH_LM3;
case 4: return MDP5_CTL_FLUSH_LM4;
case 5: return MDP5_CTL_FLUSH_LM5;
default: return 0;
}
}
static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
u32 sw_mask = 0;
#define BIT_NEEDS_SW_FIX(bit) \
(!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
/* for some targets, cursor bit is the same as LM bit */
if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
return sw_mask;
}
static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
u32 *flush_id)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
if (ctl->pair) {
DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
ctl->flush_pending = true;
ctl_mgr->single_flush_pending_mask |= (*flush_mask);
*flush_mask = 0;
if (ctl->pair->flush_pending) {
*flush_id = min_t(u32, ctl->id, ctl->pair->id);
*flush_mask = ctl_mgr->single_flush_pending_mask;
ctl->flush_pending = false;
ctl->pair->flush_pending = false;
ctl_mgr->single_flush_pending_mask = 0;
DBG("Single FLUSH mask %x,ID %d", *flush_mask,
*flush_id);
}
}
}
/**
* mdp5_ctl_commit() - Register Flush
*
* @ctl: the CTL instance
* @pipeline: the encoder's INTF + MIXER configuration
* @flush_mask: bitmask of display controller hw blocks to flush
* @start: if true, immediately update flush registers and set START
* bit, otherwise accumulate flush_mask bits until we are
* ready to START
*
* The flush register is used to indicate several registers are all
* programmed, and are safe to update to the back copy of the double
* buffered registers.
*
* Some registers FLUSH bits are shared when the hardware does not have
* dedicated bits for them; handling these is the job of fix_sw_flush().
*
* CTL registers need to be flushed in some circumstances; if that is the
* case, some trigger bits will be present in both flush mask and
* ctl->pending_ctl_trigger.
*
* Return H/W flushed bit mask.
*/
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
struct mdp5_pipeline *pipeline,
u32 flush_mask, bool start)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 flush_id = ctl->id;
u32 curr_ctl_flush_mask;
VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
if (ctl->pending_ctl_trigger & flush_mask) {
flush_mask |= MDP5_CTL_FLUSH_CTL;
ctl->pending_ctl_trigger = 0;
}
flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
flush_mask &= ctl_mgr->flush_hw_mask;
curr_ctl_flush_mask = flush_mask;
fix_for_single_flush(ctl, &flush_mask, &flush_id);
if (!start) {
ctl->flush_mask |= flush_mask;
return curr_ctl_flush_mask;
} else {
flush_mask |= ctl->flush_mask;
ctl->flush_mask = 0;
}
if (flush_mask) {
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
}
return curr_ctl_flush_mask;
}
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
{
return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
}
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
{
return WARN_ON(!ctl) ? -EINVAL : ctl->id;
}
/*
* mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
*/
int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
/* do nothing silently if hw doesn't support */
if (!ctl_mgr->single_flush_supported)
return 0;
if (!enable) {
ctlx->pair = NULL;
ctly->pair = NULL;
mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
return -EINVAL;
} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
return -EINVAL;
}
ctlx->pair = ctly;
ctly->pair = ctlx;
mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
return 0;
}
/*
* mdp5_ctl_request() - CTL allocation
*
* Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
* If no CTL is available in preferred category, allocate from the other one.
*
* @return fail if no CTL is available.
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
int intf_num)
{
struct mdp5_ctl *ctl = NULL;
const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
unsigned long flags;
int c;
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
/* search the preferred */
for (c = 0; c < ctl_mgr->nctl; c++)
if ((ctl_mgr->ctls[c].status & checkm) == match)
goto found;
dev_warn(ctl_mgr->dev->dev,
"fall back to the other CTL category for INTF %d!\n", intf_num);
match ^= CTL_STAT_BOOKED;
for (c = 0; c < ctl_mgr->nctl; c++)
if ((ctl_mgr->ctls[c].status & checkm) == match)
goto found;
DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
goto unlock;
found:
ctl = &ctl_mgr->ctls[c];
ctl->status |= CTL_STAT_BUSY;
ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
unlock:
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
return ctl;
}
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
{
unsigned long flags;
int c;
for (c = 0; c < ctl_mgr->nctl; c++) {
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
}
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
{
kfree(ctl_mgr);
}
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
{
struct mdp5_ctl_manager *ctl_mgr;
const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
unsigned dsi_cnt = 0;
const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
ret = -ENOMEM;
goto fail;
}
if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
goto fail;
}
/* initialize the CTL manager: */
ctl_mgr->dev = dev;
ctl_mgr->nlm = hw_cfg->lm.count;
ctl_mgr->nctl = ctl_cfg->count;
ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
spin_lock_init(&ctl_mgr->pool_lock);
/* initialize each CTL of the pool: */
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
for (c = 0; c < ctl_mgr->nctl; c++) {
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
goto fail;
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
ctl->reg_offset = ctl_cfg->base[c];
ctl->status = 0;
spin_lock_init(&ctl->hw_lock);
}
/*
* In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI
* interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
* only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
* Single FLUSH is supported from hw rev v3.0.
*/
for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
if (hw_cfg->intf.connect[c] == INTF_DSI)
dsi_cnt++;
if ((rev >= 3) && (dsi_cnt > 1)) {
ctl_mgr->single_flush_supported = true;
/* Reserve CTL0/1 for INTF1/2 */
ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
}
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
return ctl_mgr;
fail:
if (ctl_mgr)
mdp5_ctlm_destroy(ctl_mgr);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_fourcc.h>
#include <drm/drm_util.h>
#include "mdp5_kms.h"
#include "mdp5_smp.h"
struct mdp5_smp {
struct drm_device *dev;
uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
int blk_cnt;
int blk_size;
/* register cache */
u32 alloc_w[22];
u32 alloc_r[22];
u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
};
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
struct msm_drm_private *priv = smp->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
{
#define CID_UNUSED 0
if (WARN_ON(plane >= pipe2nclients(pipe)))
return CID_UNUSED;
/*
* Note on SMP clients:
* For ViG pipes, fetch Y/Cr/Cb-components clients are always
* consecutive, and in that order.
*
* e.g.:
* if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
* Y plane's client ID is N
* Cr plane's client ID is N + 1
* Cb plane's client ID is N + 2
*/
return mdp5_cfg->smp.clients[pipe] + plane;
}
/* allocate blocks for the specified request: */
static int smp_request_block(struct mdp5_smp *smp,
struct mdp5_smp_state *state,
u32 cid, int nblks)
{
void *cs = state->client_state[cid];
int i, avail, cnt = smp->blk_cnt;
uint8_t reserved;
/* we shouldn't be requesting blocks for an in-use client: */
WARN_ON(!bitmap_empty(cs, cnt));
reserved = smp->reserved[cid];
if (reserved) {
nblks = max(0, nblks - reserved);
DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
}
avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
return -ENOSPC;
}
for (i = 0; i < nblks; i++) {
int blk = find_first_zero_bit(state->state, cnt);
set_bit(blk, cs);
set_bit(blk, state->state);
}
return 0;
}
static void set_fifo_thresholds(struct mdp5_smp *smp,
enum mdp5_pipe pipe, int nblks)
{
u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
u32 val;
/* 1/4 of SMP pool that is being fetched */
val = (nblks * smp_entries_per_blk) / 4;
smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
}
/*
* NOTE: looks like if horizontal decimation is used (if we supported that)
* then the width used to calculate SMP block requirements is the post-
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
const struct mdp_format *format,
u32 width, bool hdecim)
{
const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(smp);
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines;
uint32_t blkcfg = 0;
nplanes = info->num_planes;
hsub = info->hsub;
/* different if BWC (compressed framebuffer?) enabled: */
nlines = 2;
/* Newer MDPs have split/packing logic, which fetches sub-sampled
* U and V components (splits them from Y if necessary) and packs
* them together, writes to SMP using a single client.
*/
if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
nplanes = 2;
/* if decimation is enabled, HW decimates less on the
* sub sampled chroma components
*/
if (hdecim && (hsub > 1))
hsub = 1;
}
for (i = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
cpp = info->cpp[i];
fetch_stride = width * cpp / (i ? hsub : 1);
n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
/* for hw rev v1.00 */
if (rev == 0)
n = roundup_pow_of_two(n);
blkcfg |= (n << (8 * i));
}
return blkcfg;
}
int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
enum mdp5_pipe pipe, uint32_t blkcfg)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct drm_device *dev = mdp5_kms->dev;
int i, ret;
for (i = 0; i < pipe2nclients(pipe); i++) {
u32 cid = pipe2client(pipe, i);
int n = blkcfg & 0xff;
if (!n)
continue;
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
ret = smp_request_block(smp, state, cid, n);
if (ret) {
DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
blkcfg >>= 8;
}
state->assigned |= (1 << pipe);
return 0;
}
/* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
enum mdp5_pipe pipe)
{
int i;
int cnt = smp->blk_cnt;
for (i = 0; i < pipe2nclients(pipe); i++) {
u32 cid = pipe2client(pipe, i);
void *cs = state->client_state[cid];
/* update global state: */
bitmap_andnot(state->state, state->state, cs, cnt);
/* clear client's state */
bitmap_zero(cs, cnt);
}
state->released |= (1 << pipe);
}
/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
* happen after scanout completes.
*/
static unsigned update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned)
{
int cnt = smp->blk_cnt;
unsigned nblks = 0;
u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) {
int idx = blk / 3;
int fld = blk % 3;
val = smp->alloc_w[idx];
switch (fld) {
case 0:
val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
break;
case 1:
val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
break;
case 2:
val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
break;
}
smp->alloc_w[idx] = val;
smp->alloc_r[idx] = val;
nblks++;
}
return nblks;
}
static void write_smp_alloc_regs(struct mdp5_smp *smp)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
int i, num_regs;
num_regs = smp->blk_cnt / 3 + 1;
for (i = 0; i < num_regs; i++) {
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
smp->alloc_w[i]);
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
smp->alloc_r[i]);
}
}
static void write_smp_fifo_regs(struct mdp5_smp *smp)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
int i;
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
enum mdp5_pipe pipe = hwpipe->pipe;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
smp->pipe_reqprio_fifo_wm0[pipe]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
smp->pipe_reqprio_fifo_wm1[pipe]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
smp->pipe_reqprio_fifo_wm2[pipe]);
}
}
void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
enum mdp5_pipe pipe;
for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
unsigned i, nblks = 0;
for (i = 0; i < pipe2nclients(pipe); i++) {
u32 cid = pipe2client(pipe, i);
void *cs = state->client_state[cid];
nblks += update_smp_state(smp, cid, cs);
DBG("assign %s:%u, %u blks",
pipe2name(pipe), i, nblks);
}
set_fifo_thresholds(smp, pipe, nblks);
}
write_smp_alloc_regs(smp);
write_smp_fifo_regs(smp);
state->assigned = 0;
}
void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
enum mdp5_pipe pipe;
for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
DBG("release %s", pipe2name(pipe));
set_fifo_thresholds(smp, pipe, 0);
}
write_smp_fifo_regs(smp);
state->released = 0;
}
void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct mdp5_hw_pipe_state *hwpstate;
struct mdp5_smp_state *state;
struct mdp5_global_state *global_state;
int total = 0, i, j;
drm_printf(p, "name\tinuse\tplane\n");
drm_printf(p, "----\t-----\t-----\n");
if (drm_can_sleep())
drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
global_state = mdp5_get_existing_global_state(mdp5_kms);
/* grab these *after* we hold the state_lock */
hwpstate = &global_state->hwpipe;
state = &global_state->smp;
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
enum mdp5_pipe pipe = hwpipe->pipe;
for (j = 0; j < pipe2nclients(pipe); j++) {
u32 cid = pipe2client(pipe, j);
void *cs = state->client_state[cid];
int inuse = bitmap_weight(cs, smp->blk_cnt);
drm_printf(p, "%s:%d\t%d\t%s\n",
pipe2name(pipe), j, inuse,
plane ? plane->name : NULL);
total += inuse;
}
}
drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
bitmap_weight(state->state, smp->blk_cnt));
if (drm_can_sleep())
drm_modeset_unlock(&mdp5_kms->glob_state_lock);
}
void mdp5_smp_destroy(struct mdp5_smp *smp)
{
kfree(smp);
}
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
struct mdp5_smp_state *state;
struct mdp5_global_state *global_state;
struct mdp5_smp *smp = NULL;
int ret;
smp = kzalloc(sizeof(*smp), GFP_KERNEL);
if (unlikely(!smp)) {
ret = -ENOMEM;
goto fail;
}
smp->dev = mdp5_kms->dev;
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
global_state = mdp5_get_existing_global_state(mdp5_kms);
state = &global_state->smp;
/* statically tied MMBs cannot be re-allocated: */
bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
return smp;
fail:
if (smp)
mdp5_smp_destroy(smp);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 The Linux Foundation. All rights reserved.
*/
#include "mdp5_kms.h"
/*
* As of now, there are only 2 combinations possible for source split:
*
* Left | Right
* -----|------
* LM0 | LM1
* LM2 | LM5
*
*/
static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
{
int i;
int pair_lm;
pair_lm = lm_right_pair[lm];
if (pair_lm < 0)
return -EINVAL;
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
if (mixer->lm == pair_lm)
return mixer->idx;
}
return -1;
}
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
struct mdp5_hw_mixer_state *new_state;
int i;
if (IS_ERR(global_state))
return PTR_ERR(global_state);
new_state = &global_state->hwmixer;
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
/*
* skip if already in-use by a different CRTC. If there is a
* mixer already assigned to this CRTC, it means this call is
* a request to get an additional right mixer. Assume that the
* existing mixer is the 'left' one, and try to see if we can
* get its corresponding 'right' pair.
*/
if (new_state->hwmixer_to_crtc[cur->idx] &&
new_state->hwmixer_to_crtc[cur->idx] != crtc)
continue;
/* skip if doesn't support some required caps: */
if (caps & ~cur->caps)
continue;
if (r_mixer) {
int pair_idx;
pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
if (pair_idx < 0)
return -EINVAL;
if (new_state->hwmixer_to_crtc[pair_idx])
continue;
*r_mixer = mdp5_kms->hwmixers[pair_idx];
}
/*
* prefer a pair-able LM over an unpairable one. We can
* switch the CRTC from Normal mode to Source Split mode
* without requiring a full modeset if we had already
* assigned this CRTC a pair-able LM.
*
* TODO: There will be assignment sequences which would
* result in the CRTC requiring a full modeset, even
* if we have the LM resources to prevent it. For a platform
* with a few displays, we don't run out of pair-able LMs
* so easily. For now, ignore the possibility of requiring
* a full modeset.
*/
if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
*mixer = cur;
}
if (!(*mixer))
return -ENOMEM;
if (r_mixer && !(*r_mixer))
return -ENOMEM;
DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
if (r_mixer) {
DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
crtc->name);
new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
}
return 0;
}
int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
struct mdp5_hw_mixer_state *new_state;
if (!mixer)
return 0;
if (IS_ERR(global_state))
return PTR_ERR(global_state);
new_state = &global_state->hwmixer;
if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
return -EINVAL;
DBG("%s: release from crtc %s", mixer->name,
new_state->hwmixer_to_crtc[mixer->idx]->name);
new_state->hwmixer_to_crtc[mixer->idx] = NULL;
return 0;
}
void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
{
kfree(mixer);
}
static const char * const mixer_names[] = {
"LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
};
struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
{
struct mdp5_hw_mixer *mixer;
mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return ERR_PTR(-ENOMEM);
mixer->name = mixer_names[lm->id];
mixer->lm = lm->id;
mixer->caps = lm->caps;
mixer->pp = lm->pp;
mixer->dspp = lm->dspp;
mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
return mixer;
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include <drm/drm_crtc.h>
#include <drm/drm_probe_helper.h>
#include "mdp5_kms.h"
#ifdef CONFIG_DRM_MSM_DSI
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
#define VSYNC_CLK_RATE 19200000
static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct device *dev = encoder->dev->dev;
u32 total_lines, vclks_line, cfg;
long vsync_clk_speed;
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
return -EINVAL;
}
total_lines = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines) {
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
__func__, mode->vtotal, drm_mode_vrefresh(mode));
return -EINVAL;
}
vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
if (vsync_clk_speed <= 0) {
DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
vsync_clk_speed);
return -EINVAL;
}
vclks_line = vsync_clk_speed / total_lines;
cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
| MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
/*
* Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
* the vsync_clk equating to roughly half the desired panel refresh rate.
* This is only necessary as stability fallback if interrupts from the
* panel arrive too late or not at all, but is currently used by default
* because these panel interrupts are not wired up yet.
*/
mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
mdp5_write(mdp5_kms,
REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
mdp5_write(mdp5_kms,
REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay);
mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
MDP5_PP_SYNC_THRESH_START(4) |
MDP5_PP_SYNC_THRESH_CONTINUE(4));
mdp5_write(mdp5_kms, REG_MDP5_PP_AUTOREFRESH_CONFIG(pp_id), 0x0);
return 0;
}
static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
int ret;
ret = clk_set_rate(mdp5_kms->vsync_clk,
clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
if (ret) {
DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_set_rate failed, %d\n", ret);
return ret;
}
ret = clk_prepare_enable(mdp5_kms->vsync_clk);
if (ret) {
DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_prepare_enable failed, %d\n", ret);
return ret;
}
mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1);
return 0;
}
static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
clk_disable_unprepare(mdp5_kms->vsync_clk);
}
void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
mode = adjusted_mode;
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
pingpong_tearcheck_setup(encoder, mode);
mdp5_crtc_set_pipeline(encoder->crtc);
}
void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = mdp5_cmd_enc->intf;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
if (WARN_ON(!mdp5_cmd_enc->enabled))
return;
pingpong_tearcheck_disable(encoder);
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_cmd_enc->enabled = false;
}
void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = mdp5_cmd_enc->intf;
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
if (WARN_ON(mdp5_cmd_enc->enabled))
return;
if (pingpong_tearcheck_enable(encoder))
return;
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
mdp5_cmd_enc->enabled = true;
}
int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms;
struct device *dev;
int intf_num;
u32 data = 0;
if (!encoder || !slave_encoder)
return -EINVAL;
mdp5_kms = get_kms(encoder);
intf_num = mdp5_cmd_enc->intf->num;
/* Switch slave encoder's trigger MUX, to use the master's
* start signal for the slave encoder
*/
if (intf_num == 1)
data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
else if (intf_num == 2)
data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
else
return -EINVAL;
/* Smart Panel, Sync mode */
data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
dev = &mdp5_kms->pdev->dev;
/* Make sure clocks are on when connectors calling this function. */
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
pm_runtime_put_sync(dev);
return 0;
}
#endif /* CONFIG_DRM_MSM_DSI */
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_print.h>
#include "mdp5_kms.h"
struct mdp5_plane {
struct drm_plane base;
uint32_t nformats;
uint32_t formats[32];
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest);
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static bool plane_enabled(struct drm_plane_state *state)
{
return state->visible;
}
static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
}
/* helper to install properties which are common to planes and crtcs */
static void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
unsigned int zpos;
drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
drm_plane_create_alpha_property(plane);
drm_plane_create_blend_mode_property(plane,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
zpos = STAGE_BASE;
else
zpos = STAGE0 + drm_plane_index(plane);
drm_plane_create_zpos_property(plane, zpos, 1, 255);
}
static void
mdp5_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
struct mdp5_kms *mdp5_kms = get_kms(state->plane);
drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
pstate->hwpipe->name : "(null)");
if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
drm_printf(p, "\tright-hwpipe=%s\n",
pstate->r_hwpipe ? pstate->r_hwpipe->name :
"(null)");
drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
}
static void mdp5_plane_reset(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(to_mdp5_plane_state(plane->state));
plane->state = NULL;
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
if (!mdp5_state)
return;
__drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
}
static struct drm_plane_state *
mdp5_plane_duplicate_state(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
if (WARN_ON(!plane->state))
return NULL;
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
sizeof(*mdp5_state), GFP_KERNEL);
if (!mdp5_state)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
return &mdp5_state->base;
}
static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
__drm_atomic_helper_plane_destroy_state(state);
kfree(pstate);
}
static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
.atomic_print_state = mdp5_plane_atomic_print_state,
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
if (!new_state->fb)
return 0;
drm_gem_plane_helper_prepare_fb(plane, new_state);
return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_framebuffer *fb = old_state->fb;
bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
if (!fb)
return;
DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
}
static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct drm_plane_state *state)
{
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_plane *plane = state->plane;
struct drm_plane_state *old_state = plane->state;
struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
bool new_hwpipe = false;
bool need_right_hwpipe = false;
uint32_t max_width, max_height;
bool out_of_bounds = false;
uint32_t caps = 0;
int min_scale, max_scale;
int ret;
DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state));
max_width = config->hw->lm.max_width << 16;
max_height = config->hw->lm.max_height << 16;
/* Make sure source dimensions are within bounds. */
if (state->src_h > max_height)
out_of_bounds = true;
if (state->src_w > max_width) {
/* If source split is supported, we can go up to 2x
* the max LM width, but we'd need to stage another
* hwpipe to the right LM. So, the drm_plane would
* consist of 2 hwpipes.
*/
if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
(state->src_w <= 2 * max_width))
need_right_hwpipe = true;
else
out_of_bounds = true;
}
if (out_of_bounds) {
struct drm_rect src = drm_plane_state_src(state);
DBG("Invalid source size "DRM_RECT_FP_FMT,
DRM_RECT_FP_ARG(&src));
return -ERANGE;
}
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
min_scale, max_scale,
true, true);
if (ret)
return ret;
if (plane_enabled(state)) {
unsigned int rotation;
const struct mdp_format *format;
struct mdp5_kms *mdp5_kms = get_kms(plane);
uint32_t blkcfg = 0;
format = to_mdp_format(msm_framebuffer_format(state->fb));
if (MDP_FORMAT_IS_YUV(format))
caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
if (((state->src_w >> 16) != state->crtc_w) ||
((state->src_h >> 16) != state->crtc_h))
caps |= MDP_PIPE_CAP_SCALE;
rotation = drm_rotation_simplify(state->rotation,
DRM_MODE_ROTATE_0 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
if (rotation & DRM_MODE_REFLECT_X)
caps |= MDP_PIPE_CAP_HFLIP;
if (rotation & DRM_MODE_REFLECT_Y)
caps |= MDP_PIPE_CAP_VFLIP;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
caps |= MDP_PIPE_CAP_CURSOR;
/* (re)allocate hw pipe if we don't have one or caps-mismatch: */
if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
new_hwpipe = true;
/*
* (re)allocte hw pipe if we're either requesting for 2 hw pipes
* or we're switching from 2 hw pipes to 1 hw pipe because the
* new src_w can be supported by 1 hw pipe itself.
*/
if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
(!need_right_hwpipe && mdp5_state->r_hwpipe))
new_hwpipe = true;
if (mdp5_kms->smp) {
const struct mdp_format *format =
to_mdp_format(msm_framebuffer_format(state->fb));
blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
state->src_w >> 16, false);
if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
new_hwpipe = true;
}
/* (re)assign hwpipe if needed, otherwise keep old one: */
if (new_hwpipe) {
/* TODO maybe we want to re-assign hwpipe sometimes
* in cases when we no-longer need some caps to make
* it available for other planes?
*/
struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
struct mdp5_hw_pipe *old_right_hwpipe =
mdp5_state->r_hwpipe;
struct mdp5_hw_pipe *new_hwpipe = NULL;
struct mdp5_hw_pipe *new_right_hwpipe = NULL;
ret = mdp5_pipe_assign(state->state, plane, caps,
blkcfg, &new_hwpipe,
need_right_hwpipe ?
&new_right_hwpipe : NULL);
if (ret) {
DBG("%s: failed to assign hwpipe(s)!",
plane->name);
return ret;
}
mdp5_state->hwpipe = new_hwpipe;
if (need_right_hwpipe)
mdp5_state->r_hwpipe = new_right_hwpipe;
else
/*
* set it to NULL so that the driver knows we
* don't have a right hwpipe when committing a
* new state
*/
mdp5_state->r_hwpipe = NULL;
ret = mdp5_pipe_release(state->state, old_hwpipe);
if (ret)
return ret;
ret = mdp5_pipe_release(state->state, old_right_hwpipe);
if (ret)
return ret;
}
} else {
ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
if (ret)
return ret;
ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
if (ret)
return ret;
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
}
return 0;
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
if (!crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
}
static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
DBG("%s: update", plane->name);
if (plane_enabled(new_state)) {
int ret;
ret = mdp5_plane_mode_set(plane,
new_state->crtc, new_state->fb,
&new_state->src, &new_state->dst);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
}
}
static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
int ret;
crtc_state = drm_atomic_get_existing_crtc_state(state,
new_plane_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
if (!crtc_state->active)
return -EINVAL;
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_state->hwpipe)
return -EINVAL;
/* only allow changing of position(crtc x/y or src x/y) in fast path */
if (plane->state->crtc != new_plane_state->crtc ||
plane->state->src_w != new_plane_state->src_w ||
plane->state->src_h != new_plane_state->src_h ||
plane->state->crtc_w != new_plane_state->crtc_w ||
plane->state->crtc_h != new_plane_state->crtc_h ||
!plane->state->fb ||
plane->state->fb != new_plane_state->fb)
return -EINVAL;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale, max_scale,
true, true);
if (ret)
return ret;
/*
* if the visibility of the plane changes (i.e, if the cursor is
* clipped out completely, we can't take the async path because
* we need to stage/unstage the plane from the Layer Mixer(s). We
* also assign/unassign the hwpipe(s) tied to the plane. We avoid
* taking the fast path for both these reasons.
*/
if (new_plane_state->visible != plane->state->visible)
return -EINVAL;
return 0;
}
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *old_fb = plane->state->fb;
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->crtc_x = new_state->crtc_x;
plane->state->crtc_y = new_state->crtc_y;
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
mdp5_crtc_get_pipeline(new_state->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
&new_state->src, &new_state->dst);
WARN_ON(ret < 0);
ctl = mdp5_crtc_get_ctl(new_state->crtc);
mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
}
*to_mdp5_plane_state(plane->state) =
*to_mdp5_plane_state(new_state);
new_state->fb = old_fb;
}
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
.prepare_fb = mdp5_plane_prepare_fb,
.cleanup_fb = mdp5_plane_cleanup_fb,
.atomic_check = mdp5_plane_atomic_check,
.atomic_update = mdp5_plane_atomic_update,
.atomic_async_check = mdp5_plane_atomic_async_check,
.atomic_async_update = mdp5_plane_atomic_async_update,
};
static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
enum mdp5_pipe pipe,
struct drm_framebuffer *fb)
{
struct msm_kms *kms = &mdp5_kms->base.base;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
msm_framebuffer_iova(fb, kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
msm_framebuffer_iova(fb, kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
msm_framebuffer_iova(fb, kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
msm_framebuffer_iova(fb, kms->aspace, 3));
}
/* Note: mdp5_plane->pipe_lock must be locked */
static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
{
uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
~MDP5_PIPE_OP_MODE_CSC_1_EN;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
}
/* Note: mdp5_plane->pipe_lock must be locked */
static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
struct csc_cfg *csc)
{
uint32_t i, mode = 0; /* RGB, no CSC */
uint32_t *matrix;
if (unlikely(!csc))
return;
if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
matrix = csc->matrix;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
uint32_t *pre_clamp = csc->pre_clamp;
uint32_t *post_clamp = csc->post_clamp;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
}
}
#define PHASE_STEP_SHIFT 21
#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */
static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
{
uint32_t unit;
if (src == 0 || dst == 0)
return -EINVAL;
/*
* PHASE_STEP_X/Y is coded on 26 bits (25:0),
* where 2^21 represents the unity "1" in fixed-point hardware design.
* This leaves 5 bits for the integer part (downscale case):
* -> maximum downscale ratio = 0b1_1111 = 31
*/
if (src > (dst * DOWN_SCALE_RATIO_MAX))
return -EOVERFLOW;
unit = 1 << PHASE_STEP_SHIFT;
*out_phase = mult_frac(unit, src, dst);
return 0;
}
static int calc_scalex_steps(struct drm_plane *plane,
uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasex_steps[COMP_MAX])
{
const struct drm_format_info *info = drm_format_info(pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct device *dev = mdp5_kms->dev->dev;
uint32_t phasex_step;
int ret;
ret = calc_phase_step(src, dest, &phasex_step);
if (ret) {
DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
phasex_steps[COMP_0] = phasex_step;
phasex_steps[COMP_3] = phasex_step;
phasex_steps[COMP_1_2] = phasex_step / info->hsub;
return 0;
}
static int calc_scaley_steps(struct drm_plane *plane,
uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasey_steps[COMP_MAX])
{
const struct drm_format_info *info = drm_format_info(pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct device *dev = mdp5_kms->dev->dev;
uint32_t phasey_step;
int ret;
ret = calc_phase_step(src, dest, &phasey_step);
if (ret) {
DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
phasey_steps[COMP_0] = phasey_step;
phasey_steps[COMP_3] = phasey_step;
phasey_steps[COMP_1_2] = phasey_step / info->vsub;
return 0;
}
static uint32_t get_scale_config(const struct mdp_format *format,
uint32_t src, uint32_t dst, bool horz)
{
const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
bool scaling = format->is_yuv ? true : (src != dst);
uint32_t sub;
uint32_t ya_filter, uv_filter;
bool yuv = format->is_yuv;
if (!scaling)
return 0;
if (yuv) {
sub = horz ? info->hsub : info->vsub;
uv_filter = ((src / sub) <= dst) ?
SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
}
ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
if (horz)
return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
else
return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
}
static void calc_pixel_ext(const struct mdp_format *format,
uint32_t src, uint32_t dst, uint32_t phase_step[2],
int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
bool horz)
{
bool scaling = format->is_yuv ? true : (src != dst);
int i;
/*
* Note:
* We assume here that:
* 1. PCMN filter is used for downscale
* 2. bilinear filter is used for upscale
* 3. we are in a single pipe configuration
*/
for (i = 0; i < COMP_MAX; i++) {
pix_ext_edge1[i] = 0;
pix_ext_edge2[i] = scaling ? 1 : 0;
}
}
static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
const struct mdp_format *format,
uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
{
const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
uint32_t lr, tb, req;
int i;
for (i = 0; i < COMP_MAX; i++) {
uint32_t roi_w = src_w;
uint32_t roi_h = src_h;
if (format->is_yuv && i == COMP_1_2) {
roi_w /= info->hsub;
roi_h /= info->vsub;
}
lr = (pe_left[i] >= 0) ?
MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
lr |= (pe_right[i] >= 0) ?
MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
tb = (pe_top[i] >= 0) ?
MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
tb |= (pe_bottom[i] >= 0) ?
MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
pe_left[i] + pe_right[i]);
req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
pe_top[i] + pe_bottom[i]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
}
}
struct pixel_ext {
int left[COMP_MAX];
int right[COMP_MAX];
int top[COMP_MAX];
int bottom[COMP_MAX];
};
struct phase_step {
u32 x[COMP_MAX];
u32 y[COMP_MAX];
};
static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
struct mdp5_hw_pipe *hwpipe,
struct drm_framebuffer *fb,
struct phase_step *step,
struct pixel_ext *pe,
u32 scale_config, u32 hdecm, u32 vdecm,
bool hflip, bool vflip,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
u32 src_img_w, u32 src_img_h,
u32 src_x, u32 src_y,
u32 src_w, u32 src_h)
{
enum mdp5_pipe pipe = hwpipe->pipe;
bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
const struct mdp_format *format =
to_mdp_format(msm_framebuffer_format(fb));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
MDP5_PIPE_SRC_XY_X(src_x) |
MDP5_PIPE_SRC_XY_Y(src_y));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
MDP5_PIPE_OUT_XY_X(crtc_x) |
MDP5_PIPE_OUT_XY_Y(crtc_y));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
(hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
(vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
/* not using secure mode: */
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
mdp5_write_pixel_ext(mdp5_kms, pipe, format,
src_w, pe->left, pe->right,
src_h, pe->top, pe->bottom);
if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
step->x[COMP_0]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
step->y[COMP_0]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
step->x[COMP_1_2]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
step->y[COMP_1_2]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
MDP5_PIPE_DECIMATION_VERT(vdecm) |
MDP5_PIPE_DECIMATION_HORZ(hdecm));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
scale_config);
}
if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
if (MDP_FORMAT_IS_YUV(format))
csc_enable(mdp5_kms, pipe,
mdp_get_default_csc_cfg(CSC_YUV2RGB));
else
csc_disable(mdp5_kms, pipe);
}
set_scanout_locked(mdp5_kms, pipe, fb);
}
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest)
{
struct drm_plane_state *pstate = plane->state;
struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = hwpipe->pipe;
struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
struct phase_step step = { { 0 } };
struct pixel_ext pe = { { 0 } };
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
unsigned int rotation;
bool vflip, hflip;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
uint32_t src_img_w, src_img_h;
int ret;
nplanes = fb->format->num_planes;
/* bad formats should already be rejected: */
if (WARN_ON(nplanes > pipe2nclients(pipe)))
return -EINVAL;
format = to_mdp_format(msm_framebuffer_format(fb));
pix_format = format->base.pixel_format;
src_x = src->x1;
src_y = src->y1;
src_w = drm_rect_width(src);
src_h = drm_rect_height(src);
crtc_x = dest->x1;
crtc_y = dest->y1;
crtc_w = drm_rect_width(dest);
crtc_h = drm_rect_height(dest);
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
src_w = src_w >> 16;
src_h = src_h >> 16;
src_img_w = min(fb->width, src_w);
src_img_h = min(fb->height, src_h);
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
if (right_hwpipe) {
/*
* if the plane comprises of 2 hw pipes, assume that the width
* is split equally across them. The only parameters that varies
* between the 2 pipes are src_x and crtc_x
*/
crtc_w /= 2;
src_w /= 2;
src_img_w /= 2;
}
ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
if (ret)
return ret;
ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
if (ret)
return ret;
if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
calc_pixel_ext(format, src_w, crtc_w, step.x,
pe.left, pe.right, true);
calc_pixel_ext(format, src_h, crtc_h, step.y,
pe.top, pe.bottom, false);
}
/* TODO calc hdecm, vdecm */
/* SCALE is used to both scale and up-sample chroma components */
config |= get_scale_config(format, src_w, crtc_w, true);
config |= get_scale_config(format, src_h, crtc_h, false);
DBG("scale config = %x", config);
rotation = drm_rotation_simplify(pstate->rotation,
DRM_MODE_ROTATE_0 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
hflip = !!(rotation & DRM_MODE_REFLECT_X);
vflip = !!(rotation & DRM_MODE_REFLECT_Y);
mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
config, hdecm, vdecm, hflip, vflip,
crtc_x, crtc_y, crtc_w, crtc_h,
src_img_w, src_img_h,
src_x, src_y, src_w, src_h);
if (right_hwpipe)
mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
config, hdecm, vdecm, hflip, vflip,
crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
src_img_w, src_img_h,
src_x + src_w, src_y, src_w, src_h);
return ret;
}
/*
* Use this func and the one below only after the atomic state has been
* successfully swapped
*/
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
if (WARN_ON(!pstate->hwpipe))
return SSPP_NONE;
return pstate->hwpipe->pipe;
}
enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
if (!pstate->r_hwpipe)
return SSPP_NONE;
return pstate->r_hwpipe->pipe;
}
uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
u32 mask;
if (WARN_ON(!pstate->hwpipe))
return 0;
mask = pstate->hwpipe->flush_mask;
if (pstate->r_hwpipe)
mask |= pstate->r_hwpipe->flush_mask;
return mask;
}
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum drm_plane_type type)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
int ret;
mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
if (!mdp5_plane) {
ret = -ENOMEM;
goto fail;
}
plane = &mdp5_plane->base;
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false);
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
NULL, type, NULL);
if (ret)
goto fail;
drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
mdp5_plane_install_properties(plane, &plane->base);
drm_plane_enable_fb_damage_clips(plane);
return plane;
fail:
if (plane)
mdp5_plane_destroy(plane);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/sort.h>
#include <drm/drm_atomic.h>
#include <drm/drm_blend.h>
#include <drm/drm_mode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "mdp5_kms.h"
#include "msm_gem.h"
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
struct mdp5_crtc {
struct drm_crtc base;
int id;
bool enabled;
spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
/* Bits have been flushed at the last commit,
* used to decide if a vsync has happened since last commit.
*/
u32 flushed_mask;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
/* for unref'ing cursor bo's after scanout completes: */
struct drm_flip_work unref_cursor_work;
struct mdp_irq vblank;
struct mdp_irq err;
struct mdp_irq pp_done;
struct completion pp_completion;
bool lm_cursor_enabled;
struct {
/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
spinlock_t lock;
/* current cursor being scanned out: */
struct drm_gem_object *scanout_bo;
uint64_t iova;
uint32_t width, height;
int x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
atomic_or(pending, &mdp5_crtc->pending);
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
static void request_pp_done_pending(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
reinit_completion(&mdp5_crtc->pp_completion);
}
static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
bool start = !mdp5_cstate->defer_start;
mdp5_cstate->defer_start = false;
DBG("%s: flush=%08x", crtc->name, flush_mask);
return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
}
/*
* flush updates, to make sure hw is updated to new scanout fb,
* so that we can safely queue unref to current fb (ie. next
* vblank we know hw is done w/ previous scanout_fb).
*/
static u32 crtc_flush_all(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_hw_mixer *mixer, *r_mixer;
struct drm_plane *plane;
uint32_t flush_mask = 0;
/* this should not happen: */
if (WARN_ON(!mdp5_cstate->ctl))
return 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
if (!plane->state->visible)
continue;
flush_mask |= mdp5_plane_get_flush(plane);
}
mixer = mdp5_cstate->pipeline.mixer;
flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
r_mixer = mdp5_cstate->pipeline.r_mixer;
if (r_mixer)
flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
return crtc_flush(crtc, flush_mask);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp5_crtc->event;
if (event) {
mdp5_crtc->event = NULL;
DBG("%s: send event: %p", crtc->name, event);
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
if (ctl && !crtc->state->enable) {
/* set STAGE_UNUSED for all layers */
mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
/* XXX: What to do here? */
/* mdp5_crtc->ctl = NULL; */
}
}
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
struct mdp5_crtc *mdp5_crtc =
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put(val);
}
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
kfree(mdp5_crtc);
}
static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
{
switch (stage) {
case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
default:
return 0;
}
}
/*
* left/right pipe offsets for the stage array used in blend_setup()
*/
#define PIPE_LEFT 0
#define PIPE_RIGHT 1
/*
* blend_setup() - blend all the planes of a CRTC
*
* If no base layer is available, border will be enabled as the base layer.
* Otherwise all layers will be blended based on their stage calculated
* in mdp5_crtc_atomic_check.
*/
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
const struct mdp_format *format;
struct mdp5_hw_mixer *mixer = pipeline->mixer;
uint32_t lm = mixer->lm;
struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
u32 val;
#define blender(stage) ((stage) - STAGE0)
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
/* ctl could be released already when we are shutting down: */
/* XXX: Can this happen now? */
if (!ctl)
goto out;
/* Collect all plane information */
drm_atomic_crtc_for_each_plane(plane, crtc) {
enum mdp5_pipe right_pipe;
if (!plane->state->visible)
continue;
pstate = to_mdp5_plane_state(plane->state);
pstates[pstate->stage] = pstate;
stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
/*
* if we have a right mixer, stage the same pipe as we
* have on the left mixer
*/
if (r_mixer)
r_stage[pstate->stage][PIPE_LEFT] =
mdp5_plane_pipe(plane);
/*
* if we have a right pipe (i.e, the plane comprises of 2
* hwpipes, then stage the right pipe on the right side of both
* the layer mixers
*/
right_pipe = mdp5_plane_right_pipe(plane);
if (right_pipe) {
stage[pstate->stage][PIPE_RIGHT] = right_pipe;
r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
}
plane_cnt++;
}
if (!pstates[STAGE_BASE]) {
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled");
} else if (plane_cnt) {
format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
if (format->alpha_enable)
bg_alpha_enabled = true;
}
/* The reset for blending */
for (i = STAGE0; i <= STAGE_MAX; i++) {
if (!pstates[i])
continue;
format = to_mdp_format(
msm_framebuffer_format(pstates[i]->base.fb));
plane = pstates[i]->base.plane;
blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
fg_alpha = pstates[i]->base.alpha >> 8;
bg_alpha = 0xFF - fg_alpha;
if (!format->alpha_enable && bg_alpha_enabled)
mixer_op_mode = 0;
else
mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
if (format->alpha_enable &&
pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
if (fg_alpha != 0xff) {
bg_alpha = fg_alpha;
blend_op |=
MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
} else {
blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
}
} else if (format->alpha_enable &&
pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
if (fg_alpha != 0xff) {
bg_alpha = fg_alpha;
blend_op |=
MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
} else {
blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
}
}
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
blender(i)), blend_op);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
blender(i)), fg_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
blender(i)), bg_alpha);
if (r_mixer) {
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
blender(i)), blend_op);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
blender(i)), fg_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
blender(i)), bg_alpha);
}
}
val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
val | mixer_op_mode);
if (r_mixer) {
val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
val | mixer_op_mode);
}
mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
ctl_blend_flags);
out:
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
uint32_t lm = mixer->lm;
u32 mixer_width, val;
unsigned long flags;
struct drm_display_mode *mode;
if (WARN_ON(!crtc->state))
return;
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
mixer_width = mode->hdisplay;
if (r_mixer)
mixer_width /= 2;
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
/* Assign mixer to LEFT side in source split mode */
val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
if (r_mixer) {
u32 r_lm = r_mixer->lm;
mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
/* Assign mixer to RIGHT side in source split mode */
val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
}
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc)
return encoder;
return NULL;
}
static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
unsigned int pipe = crtc->index;
struct drm_encoder *encoder;
int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
encoder = get_encoder_from_crtc(crtc);
if (!encoder) {
DRM_ERROR("no encoder found for crtc %d\n", pipe);
return false;
}
vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
/*
* the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
* the end of VFP. Translate the porch values relative to the line
* counter positions.
*/
vactive_start = vsw + vbp + 1;
vactive_end = vactive_start + mode->crtc_vdisplay;
/* last scan line before VSYNC */
vfp_end = mode->crtc_vtotal;
if (stime)
*stime = ktime_get();
line = mdp5_encoder_get_linecount(encoder);
if (line < vactive_start)
line -= vactive_start;
else if (line > vactive_end)
line = line - vfp_end - vactive_start;
else
line -= vactive_start;
*vpos = line;
*hpos = 0;
if (etime)
*etime = ktime_get();
return true;
}
static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
encoder = get_encoder_from_crtc(crtc);
if (!encoder)
return 0;
return mdp5_encoder_get_framecount(encoder);
}
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct device *dev = &mdp5_kms->pdev->dev;
unsigned long flags;
DBG("%s", crtc->name);
if (WARN_ON(!mdp5_crtc->enabled))
return;
/* Disable/save vblank irq handling before power is disabled */
drm_crtc_vblank_off(crtc);
if (mdp5_cstate->cmd_mode)
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
pm_runtime_put_sync(dev);
if (crtc->state->event && !crtc->state->active) {
WARN_ON(mdp5_crtc->event);
spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
}
mdp5_crtc->enabled = false;
}
static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
u32 count;
count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
drm_crtc_set_max_vblank_count(crtc, count);
drm_crtc_vblank_on(crtc);
}
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct device *dev = &mdp5_kms->pdev->dev;
DBG("%s", crtc->name);
if (WARN_ON(mdp5_crtc->enabled))
return;
pm_runtime_get_sync(dev);
if (mdp5_crtc->lm_cursor_enabled) {
/*
* Restore LM cursor state, as it might have been lost
* with suspend:
*/
if (mdp5_crtc->cursor.iova) {
unsigned long flags;
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
mdp5_ctl_set_cursor(mdp5_cstate->ctl,
&mdp5_cstate->pipeline, 0, true);
} else {
mdp5_ctl_set_cursor(mdp5_cstate->ctl,
&mdp5_cstate->pipeline, 0, false);
}
}
/* Restore vblank irq handling after power is enabled */
mdp5_crtc_vblank_on(crtc);
mdp5_crtc_mode_set_nofb(crtc);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
if (mdp5_cstate->cmd_mode)
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp5_crtc->enabled = true;
}
static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state,
bool need_right_mixer)
{
struct mdp5_crtc_state *mdp5_cstate =
to_mdp5_crtc_state(new_crtc_state);
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_interface *intf;
bool new_mixer = false;
new_mixer = !pipeline->mixer;
if ((need_right_mixer && !pipeline->r_mixer) ||
(!need_right_mixer && pipeline->r_mixer))
new_mixer = true;
if (new_mixer) {
struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
u32 caps;
int ret;
caps = MDP_LM_CAP_DISPLAY;
if (need_right_mixer)
caps |= MDP_LM_CAP_PAIR;
ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
&pipeline->mixer, need_right_mixer ?
&pipeline->r_mixer : NULL);
if (ret)
return ret;
ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
if (ret)
return ret;
if (old_r_mixer) {
ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
if (ret)
return ret;
if (!need_right_mixer)
pipeline->r_mixer = NULL;
}
}
/*
* these should have been already set up in the encoder's atomic
* check (called by drm_atomic_helper_check_modeset)
*/
intf = pipeline->intf;
mdp5_cstate->err_irqmask = intf2err(intf->num);
mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
if ((intf->type == INTF_DSI) &&
(intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
mdp5_cstate->cmd_mode = true;
} else {
mdp5_cstate->pp_done_irqmask = 0;
mdp5_cstate->cmd_mode = false;
}
return 0;
}
struct plane_state {
struct drm_plane *plane;
struct mdp5_plane_state *state;
};
static int pstate_cmp(const void *a, const void *b)
{
struct plane_state *pa = (struct plane_state *)a;
struct plane_state *pb = (struct plane_state *)b;
return pa->state->base.normalized_zpos - pb->state->base.normalized_zpos;
}
/* is there a helper for this? */
static bool is_fullscreen(struct drm_crtc_state *cstate,
struct drm_plane_state *pstate)
{
return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}
static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state,
struct drm_plane_state *bpstate)
{
struct mdp5_crtc_state *mdp5_cstate =
to_mdp5_crtc_state(new_crtc_state);
/*
* if we're in source split mode, it's mandatory to have
* border out on the base stage
*/
if (mdp5_cstate->pipeline.r_mixer)
return STAGE0;
/* if the bottom-most layer is not fullscreen, we need to use
* it for solid-color:
*/
if (!is_fullscreen(new_crtc_state, bpstate))
return STAGE0;
return STAGE_BASE;
}
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
bool cursor_plane = false;
bool need_right_mixer = false;
int cnt = 0, i;
int ret;
enum mdp_mixer_stage_id start;
DBG("%s: check", crtc->name);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
struct mdp5_plane_state *mdp5_pstate =
to_mdp5_plane_state(pstate);
if (!pstate->visible)
continue;
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
mdp5_pstate->needs_dirtyfb =
intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
/*
* if any plane on this crtc uses 2 hwpipes, then we need
* the crtc to have a right hwmixer.
*/
if (pstates[cnt].state->r_hwpipe)
need_right_mixer = true;
cnt++;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
cursor_plane = true;
}
/* bail out early if there aren't any planes */
if (!cnt)
return 0;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
/*
* we need a right hwmixer if the mode's width is greater than a single
* LM's max width
*/
if (mode->hdisplay > hw_cfg->lm.max_width)
need_right_mixer = true;
ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
if (ret) {
DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
return ret;
}
/* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
/* trigger a warning if cursor isn't the highest zorder */
WARN_ON(cursor_plane &&
(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
cnt, start);
return -EINVAL;
}
for (i = 0; i < cnt; i++) {
if (cursor_plane && (i == (cnt - 1)))
pstates[i].state->stage = hw_cfg->lm.nb_stages;
else
pstates[i].state->stage = start + i;
DBG("%s: assign pipe %s on stage=%d", crtc->name,
pstates[i].plane->name,
pstates[i].state->stage);
}
return 0;
}
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
DBG("%s: begin", crtc->name);
}
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
unsigned long flags;
DBG("%s: event: %p", crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
mdp5_crtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
/*
* If no CTL has been allocated in mdp5_crtc_atomic_check(),
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
/* XXX: Can this happen now ? */
if (unlikely(!mdp5_cstate->ctl))
return;
blend_setup(crtc);
/* PP_DONE irq is only used by command mode for now.
* It is better to request pending before FLUSH and START trigger
* to make sure no pp_done irq missed.
* This is safe because no pp_done will happen before SW trigger
* in command mode.
*/
if (mdp5_cstate->cmd_mode)
request_pp_done_pending(crtc);
mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
/* XXX are we leaking out state here? */
mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
request_pending(crtc, PENDING_FLIP);
}
static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
uint32_t xres = crtc->mode.hdisplay;
uint32_t yres = crtc->mode.vdisplay;
/*
* Cursor Region Of Interest (ROI) is a plane read from cursor
* buffer to render. The ROI region is determined by the visibility of
* the cursor point. In the default Cursor image the cursor point will
* be at the top left of the cursor image.
*
* Without rotation:
* If the cursor point reaches the right (xres - x < cursor.width) or
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
* width and ROI height need to be evaluated to crop the cursor image
* accordingly.
* (xres-x) will be new cursor width when x > (xres - cursor.width)
* (yres-y) will be new cursor height when y > (yres - cursor.height)
*
* With rotation:
* We get negative x and/or y coordinates.
* (cursor.width - abs(x)) will be new cursor width when x < 0
* (cursor.height - abs(y)) will be new cursor width when y < 0
*/
if (mdp5_crtc->cursor.x >= 0)
*roi_w = min(mdp5_crtc->cursor.width, xres -
mdp5_crtc->cursor.x);
else
*roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
if (mdp5_crtc->cursor.y >= 0)
*roi_h = min(mdp5_crtc->cursor.height, yres -
mdp5_crtc->cursor.y);
else
*roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
}
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
{
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t blendcfg, stride;
uint32_t x, y, src_x, src_y, width, height;
uint32_t roi_w, roi_h;
int lm;
assert_spin_locked(&mdp5_crtc->cursor.lock);
lm = mdp5_cstate->pipeline.mixer->lm;
x = mdp5_crtc->cursor.x;
y = mdp5_crtc->cursor.y;
width = mdp5_crtc->cursor.width;
height = mdp5_crtc->cursor.height;
stride = width * info->cpp[0];
get_roi(crtc, &roi_w, &roi_h);
/* If cusror buffer overlaps due to rotation on the
* upper or left screen border the pixel offset inside
* the cursor buffer of the ROI is the positive overlap
* distance.
*/
if (mdp5_crtc->cursor.x < 0) {
src_x = abs(mdp5_crtc->cursor.x);
x = 0;
} else {
src_x = 0;
}
if (mdp5_crtc->cursor.y < 0) {
src_y = abs(mdp5_crtc->cursor.y);
y = 0;
} else {
src_y = 0;
}
DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
crtc->name, x, y, roi_w, roi_h, src_x, src_y);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
MDP5_LM_CURSOR_XY_SRC_X(src_x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
mdp5_crtc->cursor.iova);
blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
}
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle,
uint32_t width, uint32_t height)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct platform_device *pdev = mdp5_kms->pdev;
struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_gem_object *cursor_bo, *old_bo = NULL;
struct mdp5_ctl *ctl;
int ret;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
bool cursor_enable = true;
unsigned long flags;
if (!mdp5_crtc->lm_cursor_enabled) {
dev_warn(dev->dev,
"cursor_set is deprecated with cursor planes\n");
return -EINVAL;
}
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
ctl = mdp5_cstate->ctl;
if (!ctl)
return -EINVAL;
/* don't support LM cursors when we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
mdp5_crtc->cursor.iova = 0;
pm_runtime_get_sync(&pdev->dev);
goto set_cursor;
}
cursor_bo = drm_gem_object_lookup(file, handle);
if (!cursor_bo)
return -ENOENT;
ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
if (ret) {
drm_gem_object_put(cursor_bo);
return -EINVAL;
}
pm_runtime_get_sync(&pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo;
mdp5_crtc->cursor.scanout_bo = cursor_bo;
mdp5_crtc->cursor.width = width;
mdp5_crtc->cursor.height = height;
mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
cursor_enable ? "en" : "dis", ret);
goto end;
}
crtc_flush(crtc, flush_mask);
end:
pm_runtime_put_sync(&pdev->dev);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
request_pending(crtc, PENDING_CURSOR);
}
return ret;
}
static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
struct drm_device *dev = crtc->dev;
uint32_t roi_w;
uint32_t roi_h;
unsigned long flags;
if (!mdp5_crtc->lm_cursor_enabled) {
dev_warn(dev->dev,
"cursor_move is deprecated with cursor planes\n");
return -EINVAL;
}
/* don't support LM cursors when we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
/* In case the CRTC is disabled, just drop the cursor update */
if (unlikely(!crtc->state->enable))
return 0;
/* accept negative x/y coordinates up to maximum cursor overlap */
mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
get_roi(crtc, &roi_w, &roi_h);
pm_runtime_get_sync(&mdp5_kms->pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
crtc_flush(crtc, flush_mask);
pm_runtime_put_sync(&mdp5_kms->pdev->dev);
return 0;
}
static void
mdp5_crtc_atomic_print_state(struct drm_printer *p,
const struct drm_crtc_state *state)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
if (WARN_ON(!pipeline))
return;
if (mdp5_cstate->ctl)
drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
pipeline->mixer->name : "(null)");
if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
pipeline->r_mixer->name : "(null)");
drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
}
static struct drm_crtc_state *
mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate;
if (WARN_ON(!crtc->state))
return NULL;
mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
sizeof(*mdp5_cstate), GFP_KERNEL);
if (!mdp5_cstate)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
return &mdp5_cstate->base;
}
static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(state);
kfree(mdp5_cstate);
}
static void mdp5_crtc_reset(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate =
kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
if (crtc->state)
mdp5_crtc_destroy_state(crtc, crtc->state);
if (mdp5_cstate)
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
else
__drm_atomic_helper_crtc_reset(crtc, NULL);
}
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
.atomic_destroy_state = mdp5_crtc_destroy_state,
.atomic_print_state = mdp5_crtc_atomic_print_state,
.get_vblank_counter = mdp5_crtc_get_vblank_counter,
.enable_vblank = msm_crtc_enable_vblank,
.disable_vblank = msm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
.atomic_destroy_state = mdp5_crtc_destroy_state,
.cursor_set = mdp5_crtc_cursor_set,
.cursor_move = mdp5_crtc_cursor_move,
.atomic_print_state = mdp5_crtc_atomic_print_state,
.get_vblank_counter = mdp5_crtc_get_vblank_counter,
.enable_vblank = msm_crtc_enable_vblank,
.disable_vblank = msm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
.atomic_check = mdp5_crtc_atomic_check,
.atomic_begin = mdp5_crtc_atomic_begin,
.atomic_flush = mdp5_crtc_atomic_flush,
.atomic_enable = mdp5_crtc_atomic_enable,
.atomic_disable = mdp5_crtc_atomic_disable,
.get_scanout_position = mdp5_crtc_get_scanout_position,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
struct drm_crtc *crtc = &mdp5_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
pending = atomic_xchg(&mdp5_crtc->pending, 0);
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
}
if (pending & PENDING_CURSOR)
drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
}
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
}
static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
pp_done);
complete_all(&mdp5_crtc->pp_completion);
}
static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
int ret;
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
msecs_to_jiffies(50));
if (ret == 0)
dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
mdp5_cstate->pipeline.mixer->lm);
}
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
int ret;
/* Should not call this function if crtc is disabled. */
if (!ctl)
return;
ret = drm_crtc_vblank_get(crtc);
if (ret)
return;
ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
((mdp5_ctl_get_commit_status(ctl) &
mdp5_crtc->flushed_mask) == 0),
msecs_to_jiffies(50));
if (ret <= 0)
dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
mdp5_crtc->flushed_mask = 0;
drm_crtc_vblank_put(crtc);
}
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
return mdp5_crtc->vblank.irqmask;
}
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
/* should this be done elsewhere ? */
mdp_irq_update(&mdp5_kms->base);
mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
}
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
return mdp5_cstate->ctl;
}
struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate;
if (WARN_ON(!crtc))
return ERR_PTR(-EINVAL);
mdp5_cstate = to_mdp5_crtc_state(crtc->state);
return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
}
struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate;
if (WARN_ON(!crtc))
return ERR_PTR(-EINVAL);
mdp5_cstate = to_mdp5_crtc_state(crtc->state);
return &mdp5_cstate->pipeline;
}
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
if (mdp5_cstate->cmd_mode)
mdp5_crtc_wait_for_pp_done(crtc);
else
mdp5_crtc_wait_for_flush_done(crtc);
}
/* initialize crtc */
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane,
struct drm_plane *cursor_plane, int id)
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
if (!mdp5_crtc)
return ERR_PTR(-ENOMEM);
crtc = &mdp5_crtc->base;
mdp5_crtc->id = id;
spin_lock_init(&mdp5_crtc->lm_lock);
spin_lock_init(&mdp5_crtc->cursor.lock);
init_completion(&mdp5_crtc->pp_completion);
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
cursor_plane ?
&mdp5_crtc_no_lm_cursor_funcs :
&mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
return crtc;
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/irq.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "mdp5_kms.h"
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask)
{
mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
irqmask ^ (irqmask & old_irqmask));
mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
}
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler);
static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
extern bool dumpstate;
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
if (dumpstate && __ratelimit(&rs)) {
struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
drm_state_dump(mdp5_kms->dev, &p);
if (mdp5_kms->smp)
mdp5_smp_dump(mdp5_kms->smp, &p);
}
}
void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
pm_runtime_put_sync(dev);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct device *dev = &mdp5_kms->pdev->dev;
struct mdp_irq *error_handler = &mdp5_kms->error_handler;
error_handler->irq = mdp5_irq_error_handler;
error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
MDP5_IRQ_INTF1_UNDER_RUN |
MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN;
pm_runtime_get_sync(dev);
mdp_irq_register(mdp_kms, error_handler);
pm_runtime_put_sync(dev);
return 0;
}
void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
pm_runtime_put_sync(dev);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct drm_device *dev = mdp5_kms->dev;
struct drm_crtc *crtc;
uint32_t status, enable;
enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
VERB("status=%08x", status);
mdp_dispatch_irqs(mdp_kms, status);
drm_for_each_crtc(crtc, dev)
if (status & mdp5_crtc_vblank(crtc))
drm_crtc_handle_vblank(crtc);
return IRQ_HANDLED;
}
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
pm_runtime_put_sync(dev);
return 0;
}
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
pm_runtime_put_sync(dev);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "mdp5_kms.h"
int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_global_state *new_global_state, *old_global_state;
struct mdp5_hw_pipe_state *old_state, *new_state;
int i, j;
new_global_state = mdp5_get_global_state(s);
if (IS_ERR(new_global_state))
return PTR_ERR(new_global_state);
/* grab old_state after mdp5_get_global_state(), since now we hold lock: */
old_global_state = mdp5_get_existing_global_state(mdp5_kms);
old_state = &old_global_state->hwpipe;
new_state = &new_global_state->hwpipe;
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
/* skip if already in-use.. check both new and old state,
* since we cannot immediately re-use a pipe that is
* released in the current update in some cases:
* (1) mdp5 can have SMP (non-double-buffered)
* (2) hw pipe previously assigned to different CRTC
* (vblanks might not be aligned)
*/
if (new_state->hwpipe_to_plane[cur->idx] ||
old_state->hwpipe_to_plane[cur->idx])
continue;
/* skip if doesn't support some required caps: */
if (caps & ~cur->caps)
continue;
/*
* don't assign a cursor pipe to a plane that isn't going to
* be used as a cursor
*/
if (cur->caps & MDP_PIPE_CAP_CURSOR &&
plane->type != DRM_PLANE_TYPE_CURSOR)
continue;
/* possible candidate, take the one with the
* fewest unneeded caps bits set:
*/
if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) <
hweight_long((*hwpipe)->caps & ~caps))) {
bool r_found = false;
if (r_hwpipe) {
for (j = i + 1; j < mdp5_kms->num_hwpipes;
j++) {
struct mdp5_hw_pipe *r_cur =
mdp5_kms->hwpipes[j];
/* reject different types of hwpipes */
if (r_cur->caps != cur->caps)
continue;
/* respect priority, eg. VIG0 > VIG1 */
if (cur->pipe > r_cur->pipe)
continue;
*r_hwpipe = r_cur;
r_found = true;
break;
}
}
if (!r_hwpipe || r_found)
*hwpipe = cur;
}
}
if (!(*hwpipe))
return -ENOMEM;
if (r_hwpipe && !(*r_hwpipe))
return -ENOMEM;
if (mdp5_kms->smp) {
int ret;
/* We don't support SMP and 2 hwpipes/plane together */
WARN_ON(r_hwpipe);
DBG("%s: alloc SMP blocks", (*hwpipe)->name);
ret = mdp5_smp_assign(mdp5_kms->smp, &new_global_state->smp,
(*hwpipe)->pipe, blkcfg);
if (ret)
return -ENOMEM;
(*hwpipe)->blkcfg = blkcfg;
}
DBG("%s: assign to plane %s for caps %x",
(*hwpipe)->name, plane->name, caps);
new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane;
if (r_hwpipe) {
DBG("%s: assign to right of plane %s for caps %x",
(*r_hwpipe)->name, plane->name, caps);
new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane;
}
return 0;
}
int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_global_state *state;
struct mdp5_hw_pipe_state *new_state;
if (!hwpipe)
return 0;
state = mdp5_get_global_state(s);
if (IS_ERR(state))
return PTR_ERR(state);
new_state = &state->hwpipe;
if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
return -EINVAL;
DBG("%s: release from plane %s", hwpipe->name,
new_state->hwpipe_to_plane[hwpipe->idx]->name);
if (mdp5_kms->smp) {
DBG("%s: free SMP blocks", hwpipe->name);
mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe);
}
new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
return 0;
}
void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
{
kfree(hwpipe);
}
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps)
{
struct mdp5_hw_pipe *hwpipe;
hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
if (!hwpipe)
return ERR_PTR(-ENOMEM);
hwpipe->name = pipe2name(pipe);
hwpipe->pipe = pipe;
hwpipe->reg_offset = reg_offset;
hwpipe->caps = caps;
hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
return hwpipe;
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/delay.h>
#include <linux/interconnect.h>
#include <linux/of_irq.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
unsigned long flags;
pm_runtime_get_sync(dev);
/* Magic unknown register writes:
*
* W VBIF:0x004 00000001 (mdss_mdp.c:839)
* W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
* W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
* W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
* W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
* W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
* W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
* W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
* W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
*
* Downstream fbdev driver gets these register offsets/values
* from DT.. not really sure what these registers are or if
* different values for different boards/SoC's, etc. I guess
* they are the golden registers.
*
* Not setting these does not seem to cause any problem. But
* we may be getting lucky with the bootloader initializing
* them for us. OTOH, if we can always count on the bootloader
* setting the golden registers, then perhaps we don't need to
* care.
*/
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
pm_runtime_put_sync(dev);
return 0;
}
/* Global/shared object state funcs */
/*
* This is a helper that returns the private state currently in operation.
* Note that this would return the "old_state" if called in the atomic check
* path, and the "new_state" after the atomic swap has been done.
*/
struct mdp5_global_state *
mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
{
return to_mdp5_global_state(mdp5_kms->glob_state.state);
}
/*
* This acquires the modeset lock set aside for global state, creates
* a new duplicated private object state.
*/
struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct drm_private_state *priv_state;
int ret;
ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
if (ret)
return ERR_PTR(ret);
priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_mdp5_global_state(priv_state);
}
static struct drm_private_state *
mdp5_global_duplicate_state(struct drm_private_obj *obj)
{
struct mdp5_global_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void mdp5_global_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
kfree(mdp5_state);
}
static const struct drm_private_state_funcs mdp5_global_state_funcs = {
.atomic_duplicate_state = mdp5_global_duplicate_state,
.atomic_destroy_state = mdp5_global_destroy_state,
};
static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
{
struct mdp5_global_state *state;
drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->mdp5_kms = mdp5_kms;
drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
&state->base,
&mdp5_global_state_funcs);
return 0;
}
static void mdp5_enable_commit(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
pm_runtime_get_sync(&mdp5_kms->pdev->dev);
}
static void mdp5_disable_commit(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
pm_runtime_put_sync(&mdp5_kms->pdev->dev);
}
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct mdp5_global_state *global_state;
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
}
static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
{
/* TODO */
}
static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_crtc *crtc;
for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask)
mdp5_crtc_wait_for_commit_done(crtc);
}
static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct mdp5_global_state *global_state;
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
}
static int mdp5_set_split_display(struct msm_kms *kms,
struct drm_encoder *encoder,
struct drm_encoder *slave_encoder,
bool is_cmd_mode)
{
if (is_cmd_mode)
return mdp5_cmd_encoder_set_split_display(encoder,
slave_encoder);
else
return mdp5_vid_encoder_set_split_display(encoder,
slave_encoder);
}
static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_gem_address_space *aspace = kms->aspace;
int i;
for (i = 0; i < mdp5_kms->num_hwmixers; i++)
mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
for (i = 0; i < mdp5_kms->num_hwpipes; i++)
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
mdp_kms_destroy(&mdp5_kms->base);
mdp5_destroy(mdp5_kms);
}
#ifdef CONFIG_DEBUG_FS
static int smp_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct drm_printer p = drm_seq_file_printer(m);
if (!mdp5_kms->smp) {
drm_printf(&p, "no SMP pool\n");
return 0;
}
mdp5_smp_dump(mdp5_kms->smp, &p);
return 0;
}
static struct drm_info_list mdp5_debugfs_list[] = {
{"smp", smp_show },
};
static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
drm_debugfs_create_files(mdp5_debugfs_list,
ARRAY_SIZE(mdp5_debugfs_list),
minor->debugfs_root, minor);
return 0;
}
#endif
static const struct mdp_kms_funcs kms_funcs = {
.base = {
.hw_init = mdp5_hw_init,
.irq_preinstall = mdp5_irq_preinstall,
.irq_postinstall = mdp5_irq_postinstall,
.irq_uninstall = mdp5_irq_uninstall,
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
.flush_commit = mdp5_flush_commit,
.enable_commit = mdp5_enable_commit,
.disable_commit = mdp5_disable_commit,
.prepare_commit = mdp5_prepare_commit,
.wait_flush = mdp5_wait_flush,
.complete_commit = mdp5_complete_commit,
.get_format = mdp_get_format,
.set_split_display = mdp5_set_split_display,
.destroy = mdp5_kms_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = mdp5_kms_debugfs_init,
#endif
},
.set_irqmask = mdp5_set_irqmask,
};
static int mdp5_disable(struct mdp5_kms *mdp5_kms)
{
DBG("");
mdp5_kms->enable_count--;
WARN_ON(mdp5_kms->enable_count < 0);
clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
clk_disable_unprepare(mdp5_kms->tbu_clk);
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
clk_disable_unprepare(mdp5_kms->lut_clk);
return 0;
}
static int mdp5_enable(struct mdp5_kms *mdp5_kms)
{
DBG("");
mdp5_kms->enable_count++;
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
clk_prepare_enable(mdp5_kms->lut_clk);
clk_prepare_enable(mdp5_kms->tbu_clk);
clk_prepare_enable(mdp5_kms->tbu_rt_clk);
return 0;
}
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
struct mdp5_interface *intf,
struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
struct drm_encoder *encoder;
encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
return encoder;
}
return encoder;
}
static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
{
const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
int id = 0, i;
for (i = 0; i < intf_cnt; i++) {
if (intfs[i] == INTF_DSI) {
if (intf_num == i)
return id;
id++;
}
}
return -EINVAL;
}
static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
struct mdp5_interface *intf)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
struct mdp5_ctl *ctl;
struct drm_encoder *encoder;
int ret = 0;
switch (intf->type) {
case INTF_eDP:
DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
break;
case INTF_HDMI:
if (!priv->hdmi)
break;
ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
}
ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
break;
case INTF_DSI:
{
const struct mdp5_cfg_hw *hw_cfg =
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
}
if (!priv->dsi[dsi_id])
break;
ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
}
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (!ret)
mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
break;
}
default:
DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
return ret;
}
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int num_crtcs;
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
struct drm_plane *cursor[MAX_BASES] = { NULL };
struct drm_encoder *encoder;
unsigned int num_encoders;
/*
* Construct encoders and modeset initialize connector devices
* for each external display interface.
*/
for (i = 0; i < mdp5_kms->num_intfs; i++) {
ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
if (ret)
goto fail;
}
num_encoders = 0;
drm_for_each_encoder(encoder, dev)
num_encoders++;
/*
* We should ideally have less number of encoders (set up by parsing
* the MDP5 interfaces) than the number of layer mixers present in HW,
* but let's be safe here anyway
*/
num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers);
/*
* Construct planes equaling the number of hw pipes, and CRTCs for the
* N encoders set up by the driver. The first N planes become primary
* planes for the CRTCs, with the remainder as overlay planes:
*/
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
struct drm_plane *plane;
enum drm_plane_type type;
if (i < num_crtcs)
type = DRM_PLANE_TYPE_PRIMARY;
else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
type = DRM_PLANE_TYPE_CURSOR;
else
type = DRM_PLANE_TYPE_OVERLAY;
plane = mdp5_plane_init(dev, type);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
if (type == DRM_PLANE_TYPE_PRIMARY)
primary[pi++] = plane;
if (type == DRM_PLANE_TYPE_CURSOR)
cursor[ci++] = plane;
}
for (i = 0; i < num_crtcs; i++) {
struct drm_crtc *crtc;
crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->num_crtcs++;
}
/*
* Now that we know the number of crtcs we've created, set the possible
* crtcs for the encoders
*/
drm_for_each_encoder(encoder, dev)
encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
return 0;
fail:
return ret;
}
static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
u32 *major, u32 *minor)
{
struct device *dev = &mdp5_kms->pdev->dev;
u32 version;
pm_runtime_get_sync(dev);
version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
pm_runtime_put_sync(dev);
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name, bool mandatory)
{
struct device *dev = &pdev->dev;
struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
if (IS_ERR(clk))
DBG("skipping %s", name);
else
*clkp = clk;
return 0;
}
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev);
static int mdp5_kms_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev;
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms;
struct msm_gem_address_space *aspace;
int irq, i, ret;
ret = mdp5_init(to_platform_device(dev->dev), dev);
if (ret)
return ret;
/* priv->kms would have been populated by the MDP5 driver */
kms = priv->kms;
if (!kms)
return -ENOMEM;
mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
pdev = mdp5_kms->pdev;
ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n");
goto fail;
}
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!irq) {
ret = -EINVAL;
DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
goto fail;
}
kms->irq = irq;
config = mdp5_cfg_get_config(mdp5_kms->cfg);
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
pm_runtime_get_sync(&pdev->dev);
for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
!config->hw->intf.base[i])
continue;
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
}
mdelay(16);
aspace = msm_kms_init_aspace(mdp5_kms->dev);
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
}
kms->aspace = aspace;
pm_runtime_put_sync(&pdev->dev);
ret = modeset_init(mdp5_kms);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = 0xffff;
dev->mode_config.max_height = 0xffff;
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
return 0;
fail:
if (kms)
mdp5_kms_destroy(kms);
return ret;
}
static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
{
int i;
if (mdp5_kms->ctlm)
mdp5_ctlm_destroy(mdp5_kms->ctlm);
if (mdp5_kms->smp)
mdp5_smp_destroy(mdp5_kms->smp);
if (mdp5_kms->cfg)
mdp5_cfg_destroy(mdp5_kms->cfg);
for (i = 0; i < mdp5_kms->num_intfs; i++)
kfree(mdp5_kms->intfs[i]);
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&mdp5_kms->pdev->dev);
drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
}
static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
const enum mdp5_pipe *pipes, const uint32_t *offsets,
uint32_t caps)
{
struct drm_device *dev = mdp5_kms->dev;
int i, ret;
for (i = 0; i < cnt; i++) {
struct mdp5_hw_pipe *hwpipe;
hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
pipe2name(pipes[i]), ret);
return ret;
}
hwpipe->idx = mdp5_kms->num_hwpipes;
mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
}
return 0;
}
static int hwpipe_init(struct mdp5_kms *mdp5_kms)
{
static const enum mdp5_pipe rgb_planes[] = {
SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
static const enum mdp5_pipe vig_planes[] = {
SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
};
static const enum mdp5_pipe dma_planes[] = {
SSPP_DMA0, SSPP_DMA1,
};
static const enum mdp5_pipe cursor_planes[] = {
SSPP_CURSOR0, SSPP_CURSOR1,
};
const struct mdp5_cfg_hw *hw_cfg;
int ret;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
/* Construct RGB pipes: */
ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
if (ret)
return ret;
/* Construct video (VIG) pipes: */
ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
if (ret)
return ret;
/* Construct DMA pipes: */
ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
if (ret)
return ret;
/* Construct cursor pipes: */
ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
cursor_planes, hw_cfg->pipe_cursor.base,
hw_cfg->pipe_cursor.caps);
if (ret)
return ret;
return 0;
}
static int hwmixer_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
for (i = 0; i < hw_cfg->lm.count; i++) {
struct mdp5_hw_mixer *mixer;
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
i, ret);
return ret;
}
mixer->idx = mdp5_kms->num_hwmixers;
mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
}
return 0;
}
static int interface_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
const struct mdp5_cfg_hw *hw_cfg;
const enum mdp5_intf_type *intf_types;
int i;
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
intf_types = hw_cfg->intf.connect;
for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
struct mdp5_interface *intf;
if (intf_types[i] == INTF_DISABLED)
continue;
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf) {
DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
}
intf->num = i;
intf->type = intf_types[i];
intf->mode = MDP5_INTF_MODE_NONE;
intf->idx = mdp5_kms->num_intfs;
mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
}
return 0;
}
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
u32 major, minor;
int ret;
mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
if (!mdp5_kms) {
ret = -ENOMEM;
goto fail;
}
spin_lock_init(&mdp5_kms->resource_lock);
mdp5_kms->dev = dev;
mdp5_kms->pdev = pdev;
ret = mdp5_global_obj_init(mdp5_kms);
if (ret)
goto fail;
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
goto fail;
}
/* mandatory clocks: */
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret)
goto fail;
/* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
* more optimal rate:
*/
clk_set_rate(mdp5_kms->core_clk, 200000000);
/* set uninit-ed kms */
priv->kms = &mdp5_kms->base.base;
pm_runtime_enable(&pdev->dev);
mdp5_kms->rpm_enabled = true;
read_mdp_hw_revision(mdp5_kms, &major, &minor);
mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
if (IS_ERR(mdp5_kms->cfg)) {
ret = PTR_ERR(mdp5_kms->cfg);
mdp5_kms->cfg = NULL;
goto fail;
}
config = mdp5_cfg_get_config(mdp5_kms->cfg);
mdp5_kms->caps = config->hw->mdp.caps;
/* TODO: compute core clock rate at runtime */
clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
/*
* Some chipsets have a Shared Memory Pool (SMP), while others
* have dedicated latency buffering per source pipe instead;
* this section initializes the SMP:
*/
if (mdp5_kms->caps & MDP_CAP_SMP) {
mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
if (IS_ERR(mdp5_kms->smp)) {
ret = PTR_ERR(mdp5_kms->smp);
mdp5_kms->smp = NULL;
goto fail;
}
}
mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
if (IS_ERR(mdp5_kms->ctlm)) {
ret = PTR_ERR(mdp5_kms->ctlm);
mdp5_kms->ctlm = NULL;
goto fail;
}
ret = hwpipe_init(mdp5_kms);
if (ret)
goto fail;
ret = hwmixer_init(mdp5_kms);
if (ret)
goto fail;
ret = interface_init(mdp5_kms);
if (ret)
goto fail;
return 0;
fail:
if (mdp5_kms)
mdp5_destroy(mdp5_kms);
return ret;
}
static int mdp5_setup_interconnect(struct platform_device *pdev)
{
struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem");
struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem");
struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem");
if (IS_ERR(path0))
return PTR_ERR(path0);
if (!path0) {
/* no interconnect support is not necessarily a fatal
* condition, the platform may simply not have an
* interconnect driver yet. But warn about it in case
* bootloader didn't setup bus clocks high enough for
* scanout.
*/
dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
return 0;
}
icc_set_bw(path0, 0, MBps_to_icc(6400));
if (!IS_ERR_OR_NULL(path1))
icc_set_bw(path1, 0, MBps_to_icc(6400));
if (!IS_ERR_OR_NULL(path_rot))
icc_set_bw(path_rot, 0, MBps_to_icc(6400));
return 0;
}
static int mdp5_dev_probe(struct platform_device *pdev)
{
int ret;
DBG("");
ret = mdp5_setup_interconnect(pdev);
if (ret)
return ret;
return msm_drv_probe(&pdev->dev, mdp5_kms_init);
}
static int mdp5_dev_remove(struct platform_device *pdev)
{
DBG("");
component_master_del(&pdev->dev, &msm_drm_ops);
return 0;
}
static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
DBG("");
return mdp5_disable(mdp5_kms);
}
static __maybe_unused int mdp5_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
DBG("");
return mdp5_enable(mdp5_kms);
}
static const struct dev_pm_ops mdp5_pm_ops = {
SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
};
static const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
{ .compatible = "qcom,mdss_mdp", },
{}
};
MODULE_DEVICE_TABLE(of, mdp5_dt_match);
static struct platform_driver mdp5_driver = {
.probe = mdp5_dev_probe,
.remove = mdp5_dev_remove,
.shutdown = msm_drv_shutdown,
.driver = {
.name = "msm_mdp",
.of_match_table = mdp5_dt_match,
.pm = &mdp5_pm_ops,
},
};
void __init msm_mdp_register(void)
{
DBG("");
platform_driver_register(&mdp5_driver);
}
void __exit msm_mdp_unregister(void)
{
DBG("");
platform_driver_unregister(&mdp5_driver);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*/
#include "mdp5_kms.h"
#include "mdp5_cfg.h"
struct mdp5_cfg_handler {
int revision;
struct mdp5_cfg config;
};
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
static const struct mdp5_cfg_hw msm8x74v1_config = {
.name = "msm8x74v1",
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
0,
},
.smp = {
.mmb_count = 22,
.mmb_size = 4096,
.clients = {
[SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
[SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
},
},
.ctl = {
.count = 5,
.base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
.base = { 0x01100, 0x01500, 0x01900 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
0,
},
.pipe_rgb = {
.count = 3,
.base = { 0x01d00, 0x02100, 0x02500 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
0,
},
.pipe_dma = {
.count = 2,
.base = { 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
0,
},
.lm = {
.count = 5,
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = 2,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 3,
.base = { 0x04500, 0x04900, 0x04d00 },
},
.pp = {
.count = 3,
.base = { 0x21a00, 0x21b00, 0x21c00 },
},
.intf = {
.base = { 0x21000, 0x21200, 0x21400, 0x21600 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
[2] = INTF_DSI,
[3] = INTF_HDMI,
},
},
.perf = {
.ab_inefficiency = 200,
.ib_inefficiency = 120,
.clk_inefficiency = 125
},
.max_clk = 200000000,
};
static const struct mdp5_cfg_hw msm8x26_config = {
.name = "msm8x26",
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
0,
},
.smp = {
.mmb_count = 7,
.mmb_size = 4096,
.clients = {
[SSPP_VIG0] = 1,
[SSPP_DMA0] = 4,
[SSPP_RGB0] = 7,
},
},
.ctl = {
.count = 2,
.base = { 0x00500, 0x00600 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 1,
.base = { 0x01100 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
0,
},
.pipe_rgb = {
.count = 1,
.base = { 0x01d00 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
0,
},
.pipe_dma = {
.count = 1,
.base = { 0x02900 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
0,
},
.lm = {
.count = 2,
.base = { 0x03100, 0x03d00 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 2,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x04500 },
},
.pp = {
.count = 1,
.base = { 0x21a00 },
},
.intf = {
.base = { 0x00000, 0x21200 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
},
},
.perf = {
.ab_inefficiency = 100,
.ib_inefficiency = 200,
.clk_inefficiency = 125
},
.max_clk = 200000000,
};
static const struct mdp5_cfg_hw msm8x74v2_config = {
.name = "msm8x74",
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
0,
},
.smp = {
.mmb_count = 22,
.mmb_size = 4096,
.clients = {
[SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
[SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
},
},
.ctl = {
.count = 5,
.base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
.base = { 0x01100, 0x01500, 0x01900 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 3,
.base = { 0x01d00, 0x02100, 0x02500 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
.base = { 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 5,
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = 2,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
},
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 3,
.base = { 0x04500, 0x04900, 0x04d00 },
},
.ad = {
.count = 2,
.base = { 0x13000, 0x13200 },
},
.pp = {
.count = 3,
.base = { 0x12c00, 0x12d00, 0x12e00 },
},
.intf = {
.base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
[2] = INTF_DSI,
[3] = INTF_HDMI,
},
},
.perf = {
.ab_inefficiency = 200,
.ib_inefficiency = 120,
.clk_inefficiency = 125
},
.max_clk = 320000000,
};
static const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
MDP_CAP_SRC_SPLIT |
0,
},
.smp = {
.mmb_count = 44,
.mmb_size = 8192,
.clients = {
[SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
[SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
[SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
[SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
},
.reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
.reserved = {
/* Two SMP blocks are statically tied to RGB pipes: */
[16] = 2, [17] = 2, [18] = 2, [22] = 2,
},
},
.ctl = {
.count = 5,
.base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x003fffff,
},
.pipe_vig = {
.count = 4,
.base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
.base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
.base = { 0x03100, 0x03500 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
.base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = 2,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 5, .pp = 3, .dspp = 3,
.caps = MDP_LM_CAP_DISPLAY, },
},
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 4,
.base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
},
.ad = {
.count = 3,
.base = { 0x13400, 0x13600, 0x13800 },
},
.pp = {
.count = 4,
.base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
},
.intf = {
.base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
[2] = INTF_DSI,
[3] = INTF_HDMI,
},
},
.perf = {
.ab_inefficiency = 200,
.ib_inefficiency = 120,
.clk_inefficiency = 105
},
.max_clk = 320000000,
};
static const struct mdp5_cfg_hw msm8x16_config = {
.name = "msm8x16",
.mdp = {
.count = 1,
.base = { 0x0 },
.caps = MDP_CAP_SMP |
0,
},
.smp = {
.mmb_count = 8,
.mmb_size = 8192,
.clients = {
[SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
[SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
},
},
.ctl = {
.count = 5,
.base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0x4003ffff,
},
.pipe_vig = {
.count = 1,
.base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
.base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.intf = {
.base = { 0x00000, 0x6a800 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
},
},
.perf = {
.ab_inefficiency = 100,
.ib_inefficiency = 200,
.clk_inefficiency = 105
},
.max_clk = 320000000,
};
static const struct mdp5_cfg_hw msm8x36_config = {
.name = "msm8x36",
.mdp = {
.count = 1,
.base = { 0x0 },
.caps = MDP_CAP_SMP |
0,
},
.smp = {
.mmb_count = 8,
.mmb_size = 10240,
.clients = {
[SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
[SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
},
},
.ctl = {
.count = 3,
.base = { 0x01000, 0x01200, 0x01400 },
.flush_hw_mask = 0x4003ffff,
},
.pipe_vig = {
.count = 1,
.base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
.base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 2,
.base = { 0x44000, 0x47000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
},
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.pp = {
.count = 1,
.base = { 0x70000 },
},
.ad = {
.count = 1,
.base = { 0x78000 },
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.intf = {
.base = { 0x00000, 0x6a800, 0x6b000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
[2] = INTF_DSI,
},
},
.perf = {
.ab_inefficiency = 100,
.ib_inefficiency = 200,
.clk_inefficiency = 105
},
.max_clk = 366670000,
};
static const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
MDP_CAP_SRC_SPLIT |
0,
},
.smp = {
.mmb_count = 44,
.mmb_size = 8192,
.clients = {
[SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
[SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
[SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
[SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
},
.reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */
.reserved = {
[1] = 1, [4] = 1, [7] = 1, [19] = 1,
[16] = 5, [17] = 5, [18] = 5, [22] = 5,
},
},
.ctl = {
.count = 5,
.base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf0ffffff,
},
.pipe_vig = {
.count = 4,
.base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
.base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
.base = { 0x24000, 0x26000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = 2,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 5, .pp = 3, .dspp = 3,
.caps = MDP_LM_CAP_DISPLAY, },
},
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 4,
.base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
},
.ad = {
.count = 3,
.base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
.base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.intf = {
.base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
[2] = INTF_DSI,
[3] = INTF_HDMI,
},
},
.perf = {
.ab_inefficiency = 100,
.ib_inefficiency = 100,
.clk_inefficiency = 105
},
.max_clk = 400000000,
};
static const struct mdp5_cfg_hw msm8x96_config = {
.name = "msm8x96",
.mdp = {
.count = 1,
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
.count = 5,
.base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf4ffffff,
},
.pipe_vig = {
.count = 4,
.base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 4,
.base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 2,
.base = { 0x24000, 0x26000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 2,
.base = { 0x34000, 0x36000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 5, .pp = 3, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY, },
},
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.dspp = {
.count = 2,
.base = { 0x54000, 0x56000 },
},
.ad = {
.count = 3,
.base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
.base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.cdm = {
.count = 1,
.base = { 0x79200 },
},
.dsc = {
.count = 2,
.base = { 0x80000, 0x80400 },
},
.intf = {
.base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
[2] = INTF_DSI,
[3] = INTF_HDMI,
},
},
.perf = {
.ab_inefficiency = 100,
.ib_inefficiency = 200,
.clk_inefficiency = 105
},
.max_clk = 412500000,
};
static const struct mdp5_cfg_hw msm8x76_config = {
.name = "msm8x76",
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
MDP_CAP_DSC |
MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
.count = 3,
.base = { 0x01000, 0x01200, 0x01400 },
.flush_hw_mask = 0xffffffff,
},
.smp = {
.mmb_count = 10,
.mmb_size = 10240,
.clients = {
[SSPP_VIG0] = 1, [SSPP_VIG1] = 9,
[SSPP_DMA0] = 4,
[SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
},
},
.pipe_vig = {
.count = 2,
.base = { 0x04000, 0x06000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 1,
.base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 1,
.base = { 0x440DC },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 2,
.base = { 0x44000, 0x45000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.pp = {
.count = 3,
.base = { 0x70000, 0x70800, 0x72000 },
},
.dsc = {
.count = 2,
.base = { 0x80000, 0x80400 },
},
.intf = {
.base = { 0x6a000, 0x6a800, 0x6b000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
[2] = INTF_DSI,
},
},
.max_clk = 360000000,
};
static const struct mdp5_cfg_hw msm8x53_config = {
.name = "msm8x53",
.mdp = {
.count = 1,
.caps = MDP_CAP_CDM |
MDP_CAP_SRC_SPLIT,
},
.ctl = {
.count = 3,
.base = { 0x01000, 0x01200, 0x01400 },
.flush_hw_mask = 0xffffffff,
},
.pipe_vig = {
.count = 1,
.base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 1,
.base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 1,
.base = { 0x34000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 3,
.base = { 0x44000, 0x45000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR },
{ .id = 1, .pp = 1, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY },
},
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.pp = {
.count = 2,
.base = { 0x70000, 0x70800 },
},
.cdm = {
.count = 1,
.base = { 0x79200 },
},
.intf = {
.base = { 0x6a000, 0x6a800, 0x6b000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
[2] = INTF_DSI,
},
},
.perf = {
.ab_inefficiency = 100,
.ib_inefficiency = 200,
.clk_inefficiency = 105
},
.max_clk = 400000000,
};
static const struct mdp5_cfg_hw msm8917_config = {
.name = "msm8917",
.mdp = {
.count = 1,
.caps = MDP_CAP_CDM,
},
.ctl = {
.count = 3,
.base = { 0x01000, 0x01200, 0x01400 },
.flush_hw_mask = 0xffffffff,
},
.pipe_vig = {
.count = 1,
.base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 1,
.base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 1,
.base = { 0x34000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 2,
.base = { 0x44000, 0x45000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.pp = {
.count = 1,
.base = { 0x70000 },
},
.cdm = {
.count = 1,
.base = { 0x79200 },
},
.intf = {
.base = { 0x6a000, 0x6a800 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
},
},
.max_clk = 320000000,
};
static const struct mdp5_cfg_hw msm8998_config = {
.name = "msm8998",
.mdp = {
.count = 1,
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
.count = 5,
.base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf7ffffff,
},
.pipe_vig = {
.count = 4,
.base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 4,
.base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 2, /* driver supports max of 2 currently */
.base = { 0x24000, 0x26000, 0x28000, 0x2a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 2,
.base = { 0x34000, 0x36000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 3, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 4, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
{ .id = 5, .pp = 3, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY, },
},
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.dspp = {
.count = 2,
.base = { 0x54000, 0x56000 },
},
.ad = {
.count = 3,
.base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
.base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.cdm = {
.count = 1,
.base = { 0x79200 },
},
.dsc = {
.count = 2,
.base = { 0x80000, 0x80400 },
},
.intf = {
.base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
[2] = INTF_DSI,
[3] = INTF_HDMI,
},
},
.max_clk = 412500000,
};
static const struct mdp5_cfg_hw sdm630_config = {
.name = "sdm630",
.mdp = {
.count = 1,
.caps = MDP_CAP_CDM |
MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
.count = 5,
.base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf4ffffff,
},
.pipe_vig = {
.count = 1,
.base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 4,
.base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 2, /* driver supports max of 2 currently */
.base = { 0x24000, 0x26000, 0x28000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 1,
.base = { 0x34000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 2,
.base = { 0x44000, 0x46000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
},
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.ad = {
.count = 2,
.base = { 0x78000, 0x78800 },
},
.pp = {
.count = 3,
.base = { 0x70000, 0x71000, 0x72000 },
},
.cdm = {
.count = 1,
.base = { 0x79200 },
},
.intf = {
.base = { 0x6a000, 0x6a800 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
},
},
.max_clk = 412500000,
};
static const struct mdp5_cfg_hw sdm660_config = {
.name = "sdm660",
.mdp = {
.count = 1,
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
.count = 5,
.base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf4ffffff,
},
.pipe_vig = {
.count = 2,
.base = { 0x04000, 0x6000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 4,
.base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 2, /* driver supports max of 2 currently */
.base = { 0x24000, 0x26000, 0x28000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 1,
.base = { 0x34000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 4,
.base = { 0x44000, 0x45000, 0x46000, 0x49000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 1, .pp = 1, .dspp = 1,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 2, .pp = 2, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR, },
{ .id = 3, .pp = 3, .dspp = -1,
.caps = MDP_LM_CAP_WB, },
},
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.dspp = {
.count = 2,
.base = { 0x54000, 0x56000 },
},
.ad = {
.count = 2,
.base = { 0x78000, 0x78800 },
},
.pp = {
.count = 5,
.base = { 0x70000, 0x70800, 0x71000, 0x71800, 0x72000 },
},
.cdm = {
.count = 1,
.base = { 0x79200 },
},
.dsc = {
.count = 2,
.base = { 0x80000, 0x80400 },
},
.intf = {
.base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
[2] = INTF_DSI,
[3] = INTF_HDMI,
},
},
.max_clk = 412500000,
};
static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
{ .revision = 1, .config = { .hw = &msm8x26_config } },
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 8, .config = { .hw = &msm8x36_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 11, .config = { .hw = &msm8x76_config } },
{ .revision = 15, .config = { .hw = &msm8917_config } },
{ .revision = 16, .config = { .hw = &msm8x53_config } },
};
static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
{ .revision = 0, .config = { .hw = &msm8998_config } },
{ .revision = 2, .config = { .hw = &sdm660_config } },
{ .revision = 3, .config = { .hw = &sdm630_config } },
};
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
{
return cfg_handler->config.hw;
}
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
{
return &cfg_handler->config;
}
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
{
return cfg_handler->revision;
}
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
{
kfree(cfg_handler);
}
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
struct mdp5_cfg_handler *cfg_handler;
const struct mdp5_cfg_handler *cfg_handlers;
int i, ret = 0, num_handlers;
cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
if (unlikely(!cfg_handler)) {
ret = -ENOMEM;
goto fail;
}
switch (major) {
case 1:
cfg_handlers = cfg_handlers_v1;
num_handlers = ARRAY_SIZE(cfg_handlers_v1);
break;
case 3:
cfg_handlers = cfg_handlers_v3;
num_handlers = ARRAY_SIZE(cfg_handlers_v3);
break;
default:
DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
}
/* only after mdp5_cfg global pointer's init can we access the hw */
for (i = 0; i < num_handlers; i++) {
if (cfg_handlers[i].revision != minor)
continue;
mdp5_cfg = cfg_handlers[i].config.hw;
break;
}
if (unlikely(!mdp5_cfg)) {
DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
}
cfg_handler->revision = minor;
cfg_handler->config.hw = mdp5_cfg;
DBG("MDP5: %s hw config selected", mdp5_cfg->name);
return cfg_handler;
fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
#include <linux/debugfs.h>
#include <drm/drm_connector.h>
#include <drm/drm_file.h>
#include "dp_parser.h"
#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_ctrl.h"
#include "dp_debug.h"
#include "dp_display.h"
#define DEBUG_NAME "msm_dp"
struct dp_debug_private {
struct dentry *root;
struct dp_link *link;
struct dp_panel *panel;
struct drm_connector *connector;
struct device *dev;
struct drm_device *drm_dev;
struct dp_debug dp_debug;
};
static int dp_debug_show(struct seq_file *seq, void *p)
{
struct dp_debug_private *debug = seq->private;
u64 lclk = 0;
u32 link_params_rate;
const struct drm_display_mode *drm_mode;
if (!debug)
return -ENODEV;
drm_mode = &debug->panel->dp_mode.drm_mode;
seq_printf(seq, "\tname = %s\n", DEBUG_NAME);
seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n",
debug->panel->link_info.rate);
seq_printf(seq, "\t\tnum_lanes = %u\n",
debug->panel->link_info.num_lanes);
seq_printf(seq, "\t\tcapabilities = %lu\n",
debug->panel->link_info.capabilities);
seq_printf(seq, "\tdp_panel_info:\n\t\tactive = %dx%d\n",
drm_mode->hdisplay,
drm_mode->vdisplay);
seq_printf(seq, "\t\tback_porch = %dx%d\n",
drm_mode->htotal - drm_mode->hsync_end,
drm_mode->vtotal - drm_mode->vsync_end);
seq_printf(seq, "\t\tfront_porch = %dx%d\n",
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->vsync_start - drm_mode->vdisplay);
seq_printf(seq, "\t\tsync_width = %dx%d\n",
drm_mode->hsync_end - drm_mode->hsync_start,
drm_mode->vsync_end - drm_mode->vsync_start);
seq_printf(seq, "\t\tactive_low = %dx%d\n",
debug->panel->dp_mode.h_active_low,
debug->panel->dp_mode.v_active_low);
seq_printf(seq, "\t\th_skew = %d\n",
drm_mode->hskew);
seq_printf(seq, "\t\trefresh rate = %d\n",
drm_mode_vrefresh(drm_mode));
seq_printf(seq, "\t\tpixel clock khz = %d\n",
drm_mode->clock);
seq_printf(seq, "\t\tbpp = %d\n",
debug->panel->dp_mode.bpp);
/* Link Information */
seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n",
debug->link->sink_request);
seq_printf(seq, "\t\tnum_lanes = %d\n",
debug->link->link_params.num_lanes);
link_params_rate = debug->link->link_params.rate;
seq_printf(seq, "\t\tbw_code = %d\n",
drm_dp_link_rate_to_bw_code(link_params_rate));
lclk = debug->link->link_params.rate * 1000;
seq_printf(seq, "\t\tlclk = %lld\n", lclk);
seq_printf(seq, "\t\tv_level = %d\n",
debug->link->phy_params.v_level);
seq_printf(seq, "\t\tp_level = %d\n",
debug->link->phy_params.p_level);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dp_debug);
static int dp_test_data_show(struct seq_file *m, void *data)
{
const struct dp_debug_private *debug = m->private;
const struct drm_connector *connector = debug->connector;
u32 bpc;
if (connector->status == connector_status_connected) {
bpc = debug->link->test_video.test_bit_depth;
seq_printf(m, "hdisplay: %d\n",
debug->link->test_video.test_h_width);
seq_printf(m, "vdisplay: %d\n",
debug->link->test_video.test_v_height);
seq_printf(m, "bpc: %u\n",
dp_link_bit_depth_to_bpc(bpc));
} else {
seq_puts(m, "0");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dp_test_data);
static int dp_test_type_show(struct seq_file *m, void *data)
{
const struct dp_debug_private *debug = m->private;
const struct drm_connector *connector = debug->connector;
if (connector->status == connector_status_connected)
seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
else
seq_puts(m, "0");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dp_test_type);
static ssize_t dp_test_active_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
char *input_buffer;
int status = 0;
const struct dp_debug_private *debug;
const struct drm_connector *connector;
int val = 0;
debug = ((struct seq_file *)file->private_data)->private;
connector = debug->connector;
if (len == 0)
return 0;
input_buffer = memdup_user_nul(ubuf, len);
if (IS_ERR(input_buffer))
return PTR_ERR(input_buffer);
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
if (connector->status == connector_status_connected) {
status = kstrtoint(input_buffer, 10, &val);
if (status < 0) {
kfree(input_buffer);
return status;
}
DRM_DEBUG_DRIVER("Got %d for test active\n", val);
/* To prevent erroneous activation of the compliance
* testing code, only accept an actual value of 1 here
*/
if (val == 1)
debug->panel->video_test = true;
else
debug->panel->video_test = false;
}
kfree(input_buffer);
*offp += len;
return len;
}
static int dp_test_active_show(struct seq_file *m, void *data)
{
struct dp_debug_private *debug = m->private;
struct drm_connector *connector = debug->connector;
if (connector->status == connector_status_connected) {
if (debug->panel->video_test)
seq_puts(m, "1");
else
seq_puts(m, "0");
} else {
seq_puts(m, "0");
}
return 0;
}
static int dp_test_active_open(struct inode *inode,
struct file *file)
{
return single_open(file, dp_test_active_show,
inode->i_private);
}
static const struct file_operations test_active_fops = {
.owner = THIS_MODULE,
.open = dp_test_active_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = dp_test_active_write
};
static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
{
char path[64];
struct dp_debug_private *debug = container_of(dp_debug,
struct dp_debug_private, dp_debug);
snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name);
debug->root = debugfs_create_dir(path, minor->debugfs_root);
debugfs_create_file("dp_debug", 0444, debug->root,
debug, &dp_debug_fops);
debugfs_create_file("msm_dp_test_active", 0444,
debug->root,
debug, &test_active_fops);
debugfs_create_file("msm_dp_test_data", 0444,
debug->root,
debug, &dp_test_data_fops);
debugfs_create_file("msm_dp_test_type", 0444,
debug->root,
debug, &dp_test_type_fops);
}
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
struct drm_connector *connector, struct drm_minor *minor)
{
struct dp_debug_private *debug;
struct dp_debug *dp_debug;
int rc;
if (!dev || !panel || !link) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
}
debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL);
if (!debug) {
rc = -ENOMEM;
goto error;
}
debug->dp_debug.debug_en = false;
debug->link = link;
debug->panel = panel;
debug->dev = dev;
debug->drm_dev = minor->dev;
debug->connector = connector;
dp_debug = &debug->dp_debug;
dp_debug->vdisplay = 0;
dp_debug->hdisplay = 0;
dp_debug->vrefresh = 0;
dp_debug_init(dp_debug, minor);
return dp_debug;
error:
return ERR_PTR(rc);
}
static int dp_debug_deinit(struct dp_debug *dp_debug)
{
struct dp_debug_private *debug;
if (!dp_debug)
return -EINVAL;
debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
debugfs_remove_recursive(debug->root);
return 0;
}
void dp_debug_put(struct dp_debug *dp_debug)
{
struct dp_debug_private *debug;
if (!dp_debug)
return;
debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
dp_debug_deinit(dp_debug);
devm_kfree(debug->dev, debug);
}
| linux-master | drivers/gpu/drm/msm/dp/dp_debug.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/rational.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_print.h>
#include "dp_catalog.h"
#include "dp_reg.h"
#define POLLING_SLEEP_US 1000
#define POLLING_TIMEOUT_US 10000
#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
#define DP_INTERRUPT_STATUS1 \
(DP_INTR_AUX_XFER_DONE| \
DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
#define DP_INTERRUPT_STATUS1_ACK \
(DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
#define DP_INTERRUPT_STATUS1_MASK \
(DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
#define DP_INTERRUPT_STATUS2 \
(DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
#define DP_INTERRUPT_STATUS2_ACK \
(DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
#define DP_INTERRUPT_STATUS2_MASK \
(DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
#define DP_INTERRUPT_STATUS4 \
(PSR_UPDATE_INT | PSR_CAPTURE_INT | PSR_EXIT_INT | \
PSR_UPDATE_ERROR_INT | PSR_WAKE_ERROR_INT)
#define DP_INTERRUPT_MASK4 \
(PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \
PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK)
struct dp_catalog_private {
struct device *dev;
struct drm_device *drm_dev;
struct dp_io *io;
u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
struct dp_catalog dp_catalog;
u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX];
};
void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
struct dss_io_data *dss = &catalog->io->dp_controller;
msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux");
msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link");
msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
}
static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io->dp_controller.aux.base + offset);
}
static inline void dp_write_aux(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
* To make sure aux reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
writel(data, catalog->io->dp_controller.aux.base + offset);
}
static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io->dp_controller.ahb.base + offset);
}
static inline void dp_write_ahb(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
* To make sure phy reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
writel(data, catalog->io->dp_controller.ahb.base + offset);
}
static inline void dp_write_p0(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
* To make sure interface reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
writel(data, catalog->io->dp_controller.p0.base + offset);
}
static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
u32 offset)
{
/*
* To make sure interface reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
return readl_relaxed(catalog->io->dp_controller.p0.base + offset);
}
static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io->dp_controller.link.base + offset);
}
static inline void dp_write_link(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
* To make sure link reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
writel(data, catalog->io->dp_controller.link.base + offset);
}
/* aux related catalog functions */
u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
return dp_read_aux(catalog, REG_DP_AUX_DATA);
}
int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data);
return 0;
}
int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data);
return 0;
}
int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read)
{
u32 data;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
if (read) {
data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
data &= ~DP_AUX_TRANS_CTRL_GO;
dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
} else {
dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
}
return 0;
}
int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
return 0;
}
/**
* dp_catalog_aux_reset() - reset AUX controller
*
* @dp_catalog: DP catalog structure
*
* return: void
*
* This function reset AUX controller
*
* NOTE: reset AUX controller will also clear any pending HPD related interrupts
*
*/
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
{
u32 aux_ctrl;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
aux_ctrl |= DP_AUX_CTRL_RESET;
dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
usleep_range(1000, 1100); /* h/w recommended delay */
aux_ctrl &= ~DP_AUX_CTRL_RESET;
dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
{
u32 aux_ctrl;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
if (enable) {
dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
aux_ctrl |= DP_AUX_CTRL_ENABLE;
} else {
aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
}
dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
struct dp_io *dp_io = catalog->io;
struct phy *phy = dp_io->phy;
phy_calibrate(phy);
}
int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog)
{
u32 state;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
/* poll for hpd connected status every 2ms and timeout after 500ms */
return readl_poll_timeout(catalog->io->dp_controller.aux.base +
REG_DP_DP_HPD_INT_STATUS,
state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
2000, 500000);
}
static void dump_regs(void __iomem *base, int len)
{
int i;
u32 x0, x4, x8, xc;
u32 addr_off = 0;
len = DIV_ROUND_UP(len, 16);
for (i = 0; i < len; i++) {
x0 = readl_relaxed(base + addr_off);
x4 = readl_relaxed(base + addr_off + 0x04);
x8 = readl_relaxed(base + addr_off + 0x08);
xc = readl_relaxed(base + addr_off + 0x0c);
pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
addr_off += 16;
}
}
void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
struct dss_io_data *io = &catalog->io->dp_controller;
pr_info("AHB regs\n");
dump_regs(io->ahb.base, io->ahb.len);
pr_info("AUXCLK regs\n");
dump_regs(io->aux.base, io->aux.len);
pr_info("LCLK regs\n");
dump_regs(io->link.base, io->link.len);
pr_info("P0CLK regs\n");
dump_regs(io->p0.base, io->p0.len);
}
u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 intr, intr_ack;
intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS);
intr &= ~DP_INTERRUPT_STATUS1_MASK;
intr_ack = (intr & DP_INTERRUPT_STATUS1)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
DP_INTERRUPT_STATUS1_MASK);
return intr;
}
/* controller related catalog functions */
void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
u32 dp_tu, u32 valid_boundary,
u32 valid_boundary2)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
dp_write_link(catalog, REG_DP_TU, dp_tu);
dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
}
void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
dp_write_link(catalog, REG_DP_STATE_CTRL, state);
}
void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg);
dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
}
void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
u32 ln_mapping;
ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
ln_mapping);
}
void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog,
bool enable)
{
u32 val;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
val = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
if (enable)
val |= DP_MAINLINK_CTRL_ENABLE;
else
val &= ~DP_MAINLINK_CTRL_ENABLE;
dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val);
}
void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
bool enable)
{
u32 mainlink_ctrl;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable);
if (enable) {
/*
* To make sure link reg writes happens before other operation,
* dp_write_link() function uses writel()
*/
mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
DP_MAINLINK_CTRL_ENABLE);
dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
DP_MAINLINK_FB_BOUNDARY_SEL);
dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
} else {
mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
}
}
void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
u32 colorimetry_cfg,
u32 test_bits_depth)
{
u32 misc_val;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0);
/* clear bpp bits */
misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
/* Configure clock to synchronous mode */
misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val);
dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
}
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
u32 rate, u32 stream_rate_khz,
bool fixed_nvid)
{
u32 pixel_m, pixel_n;
u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
u32 const link_rate_hbr2 = 540000;
u32 const link_rate_hbr3 = 810000;
unsigned long den, num;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
if (rate == link_rate_hbr3)
pixel_div = 6;
else if (rate == 162000 || rate == 270000)
pixel_div = 2;
else if (rate == link_rate_hbr2)
pixel_div = 4;
else
DRM_ERROR("Invalid pixel mux divider\n");
dispcc_input_rate = (rate * 10) / pixel_div;
rational_best_approximation(dispcc_input_rate, stream_rate_khz,
(unsigned long)(1 << 16) - 1,
(unsigned long)(1 << 16) - 1, &den, &num);
den = ~(den - num);
den = den & 0xFFFF;
pixel_m = num;
pixel_n = den;
mvid = (pixel_m & 0xFFFF) * 5;
nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
if (nvid < nvid_fixed) {
u32 temp;
temp = (nvid_fixed / nvid) * nvid;
mvid = (nvid_fixed / nvid) * mvid;
nvid = temp;
}
if (link_rate_hbr2 == rate)
nvid *= 2;
if (link_rate_hbr3 == rate)
nvid *= 3;
drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
}
int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
u32 state_bit)
{
int bit, ret;
u32 data;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
bit = BIT(state_bit - 1);
drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
/* Poll for mainlink ready status */
ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base +
REG_DP_MAINLINK_READY,
data, data & bit,
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
if (ret < 0) {
DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
return ret;
}
return 0;
}
/**
* dp_catalog_hw_revision() - retrieve DP hw revision
*
* @dp_catalog: DP catalog structure
*
* Return: DP controller hw revision
*
*/
u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog)
{
const struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
return dp_read_ahb(catalog, REG_DP_HW_VERSION);
}
/**
* dp_catalog_ctrl_reset() - reset DP controller
*
* @dp_catalog: DP catalog structure
*
* return: void
*
* This function reset the DP controller
*
* NOTE: reset DP controller will also clear any pending HPD related interrupts
*
*/
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
{
u32 sw_reset;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET);
sw_reset |= DP_SW_RESET;
dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
usleep_range(1000, 1100); /* h/w recommended delay */
sw_reset &= ~DP_SW_RESET;
dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
}
bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
{
u32 data;
int ret;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
/* Poll for mainlink ready status */
ret = readl_poll_timeout(catalog->io->dp_controller.link.base +
REG_DP_MAINLINK_READY,
data, data & DP_MAINLINK_READY_FOR_VIDEO,
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
if (ret < 0) {
DRM_ERROR("mainlink not ready\n");
return false;
}
return true;
}
void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog,
bool enable)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
if (enable) {
dp_write_ahb(catalog, REG_DP_INTR_STATUS,
DP_INTERRUPT_STATUS1_MASK);
dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
DP_INTERRUPT_STATUS2_MASK);
} else {
dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
}
}
void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
u32 intr_mask, bool en)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
config = (en ? config | intr_mask : config & ~intr_mask);
drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n",
intr_mask, config);
dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
config & DP_DP_HPD_INT_MASK);
}
void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
/* Configure REFTIMER and enable it */
reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
/* Enable HPD */
dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
}
void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
}
static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog)
{
/* trigger sdp */
dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP);
dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0);
}
void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 config;
/* enable PSR1 function */
config = dp_read_link(catalog, REG_PSR_CONFIG);
config |= PSR1_SUPPORTED;
dp_write_link(catalog, REG_PSR_CONFIG, config);
dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
dp_catalog_enable_sdp(catalog);
}
void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 cmd;
cmd = dp_read_link(catalog, REG_PSR_CMD);
cmd &= ~(PSR_ENTER | PSR_EXIT);
if (enter)
cmd |= PSR_ENTER;
else
cmd |= PSR_EXIT;
dp_catalog_enable_sdp(catalog);
dp_write_link(catalog, REG_PSR_CMD, cmd);
}
u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 status;
status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status);
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
return status;
}
u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
int isr, mask;
isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
(isr & DP_DP_HPD_INT_MASK));
mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
/*
* We only want to return interrupts that are unmasked to the caller.
* However, the interrupt status field also contains other
* informational bits about the HPD state status, so we only mask
* out the part of the register that tells us about which interrupts
* are pending.
*/
return isr & (mask | ~DP_DP_HPD_INT_MASK);
}
u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 intr, intr_ack;
intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS4);
intr_ack = (intr & DP_INTERRUPT_STATUS4)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack);
return intr;
}
int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 intr, intr_ack;
intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
intr &= ~DP_INTERRUPT_STATUS2_MASK;
intr_ack = (intr & DP_INTERRUPT_STATUS2)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
intr_ack | DP_INTERRUPT_STATUS2_MASK);
return intr;
}
void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
dp_write_ahb(catalog, REG_DP_PHY_CTRL,
DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
usleep_range(1000, 1100); /* h/w recommended delay */
dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
}
int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog,
u8 v_level, u8 p_level)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
struct dp_io *dp_io = catalog->io;
struct phy *phy = dp_io->phy;
struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
/* TODO: Update for all lanes instead of just first one */
opts_dp->voltage[0] = v_level;
opts_dp->pre[0] = p_level;
opts_dp->set_voltages = 1;
phy_configure(phy, &dp_io->phy_opts);
opts_dp->set_voltages = 0;
return 0;
}
void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
u32 pattern)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 value = 0x0;
/* Make sure to clear the current pattern before starting a new one */
dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern);
switch (pattern) {
case DP_PHY_TEST_PATTERN_D10_2:
dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
break;
case DP_PHY_TEST_PATTERN_ERROR_COUNT:
value &= ~(1 << 16);
dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
value |= SCRAMBLER_RESET_COUNT_VALUE;
dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
break;
case DP_PHY_TEST_PATTERN_PRBS7:
dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_PRBS7);
break;
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
/* 00111110000011111000001111100000 */
dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
0x3E0F83E0);
/* 00001111100000111110000011111000 */
dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
0x0F83E0F8);
/* 1111100000111110 */
dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
0x0000F83E);
break;
case DP_PHY_TEST_PATTERN_CP2520:
value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
value = DP_HBR2_ERM_PATTERN;
dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
value |= SCRAMBLER_RESET_COUNT_VALUE;
dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
value |= DP_MAINLINK_CTRL_ENABLE;
dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
break;
case DP_PHY_TEST_PATTERN_SEL_MASK:
dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
DP_MAINLINK_CTRL_ENABLE);
dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
break;
default:
drm_dbg_dp(catalog->drm_dev,
"No valid test pattern requested: %#x\n", pattern);
break;
}
}
u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
return dp_read_link(catalog, REG_DP_MAINLINK_READY);
}
/* panel related catalog functions */
int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 reg;
dp_write_link(catalog, REG_DP_TOTAL_HOR_VER,
dp_catalog->total);
dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC,
dp_catalog->sync_start);
dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
dp_catalog->width_blanking);
dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
if (dp_catalog->wide_bus_en)
reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
else
reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg);
dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
return 0;
}
void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
struct drm_display_mode *drm_mode)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
u32 v_sync_width;
u32 hsync_ctl;
u32 display_hctl;
/* TPG config parameters*/
hsync_period = drm_mode->htotal;
vsync_period = drm_mode->vtotal;
display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
hsync_period);
display_v_end = ((vsync_period - (drm_mode->vsync_start -
drm_mode->vdisplay))
* hsync_period) - 1;
display_v_start += drm_mode->htotal - drm_mode->hsync_start;
display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
hsync_end_x = hsync_period - (drm_mode->hsync_start -
drm_mode->hdisplay) - 1;
v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
hsync_ctl = (hsync_period << 16) |
(drm_mode->hsync_end - drm_mode->hsync_start);
display_hctl = (hsync_end_x << 16) | hsync_start_x;
dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
hsync_period);
dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
hsync_period);
dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
DP_TPG_CHECKERED_RECT_PATTERN);
dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
DP_TPG_VIDEO_CONFIG_BPP_8BIT |
DP_TPG_VIDEO_CONFIG_RGB);
dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
DP_BIST_ENABLE_DPBIST_EN);
dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
DP_TIMING_ENGINE_EN_EN);
drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__);
}
void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
}
struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
{
struct dp_catalog_private *catalog;
if (!io) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
if (!catalog)
return ERR_PTR(-ENOMEM);
catalog->dev = dev;
catalog->io = io;
return &catalog->dp_catalog;
}
void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
enum dp_catalog_audio_sdp_type sdp;
enum dp_catalog_audio_header_type header;
if (!dp_catalog)
return;
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
sdp_map = catalog->audio_map;
sdp = dp_catalog->sdp_type;
header = dp_catalog->sdp_header;
dp_catalog->audio_data = dp_read_link(catalog,
sdp_map[sdp][header]);
}
void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
enum dp_catalog_audio_sdp_type sdp;
enum dp_catalog_audio_header_type header;
u32 data;
if (!dp_catalog)
return;
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
sdp_map = catalog->audio_map;
sdp = dp_catalog->sdp_type;
header = dp_catalog->sdp_header;
data = dp_catalog->audio_data;
dp_write_link(catalog, sdp_map[sdp][header], data);
}
void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog;
u32 acr_ctrl, select;
if (!dp_catalog)
return;
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
select = dp_catalog->audio_data;
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
select, acr_ctrl);
dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
void dp_catalog_audio_enable(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog;
bool enable;
u32 audio_ctrl;
if (!dp_catalog)
return;
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
enable = !!dp_catalog->audio_data;
audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
if (enable)
audio_ctrl |= BIT(0);
else
audio_ctrl &= ~BIT(0);
drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
/* make sure audio engine is disabled */
wmb();
}
void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog;
u32 sdp_cfg = 0;
u32 sdp_cfg2 = 0;
if (!dp_catalog)
return;
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
/* AUDIO_TIMESTAMP_SDP_EN */
sdp_cfg |= BIT(1);
/* AUDIO_STREAM_SDP_EN */
sdp_cfg |= BIT(2);
/* AUDIO_COPY_MANAGEMENT_SDP_EN */
sdp_cfg |= BIT(5);
/* AUDIO_ISRC_SDP_EN */
sdp_cfg |= BIT(6);
/* AUDIO_INFOFRAME_SDP_EN */
sdp_cfg |= BIT(20);
drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
/* IFRM_REGSRC -> Do not use reg values */
sdp_cfg2 &= ~BIT(0);
/* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
sdp_cfg2 &= ~BIT(1);
drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog;
static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
{
MMSS_DP_AUDIO_STREAM_0,
MMSS_DP_AUDIO_STREAM_1,
MMSS_DP_AUDIO_STREAM_1,
},
{
MMSS_DP_AUDIO_TIMESTAMP_0,
MMSS_DP_AUDIO_TIMESTAMP_1,
MMSS_DP_AUDIO_TIMESTAMP_1,
},
{
MMSS_DP_AUDIO_INFOFRAME_0,
MMSS_DP_AUDIO_INFOFRAME_1,
MMSS_DP_AUDIO_INFOFRAME_1,
},
{
MMSS_DP_AUDIO_COPYMANAGEMENT_0,
MMSS_DP_AUDIO_COPYMANAGEMENT_1,
MMSS_DP_AUDIO_COPYMANAGEMENT_1,
},
{
MMSS_DP_AUDIO_ISRC_0,
MMSS_DP_AUDIO_ISRC_1,
MMSS_DP_AUDIO_ISRC_1,
},
};
if (!dp_catalog)
return;
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
catalog->audio_map = sdp_map;
}
void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog;
u32 mainlink_levels, safe_to_exit_level;
if (!dp_catalog)
return;
catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
safe_to_exit_level = dp_catalog->audio_data;
mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
mainlink_levels &= 0xFE0;
mainlink_levels |= safe_to_exit_level;
drm_dbg_dp(catalog->drm_dev,
"mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
mainlink_levels, safe_to_exit_level);
dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
}
| linux-master | drivers/gpu/drm/msm/dp/dp_catalog.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_opp.h>
#include "dp_power.h"
#include "msm_drv.h"
struct dp_power_private {
struct dp_parser *parser;
struct device *dev;
struct drm_device *drm_dev;
struct clk *link_clk_src;
struct clk *pixel_provider;
struct clk *link_provider;
struct dp_power dp_power;
};
static int dp_power_clk_init(struct dp_power_private *power)
{
int rc = 0;
struct dss_module_power *core, *ctrl, *stream;
struct device *dev = power->dev;
core = &power->parser->mp[DP_CORE_PM];
ctrl = &power->parser->mp[DP_CTRL_PM];
stream = &power->parser->mp[DP_STREAM_PM];
rc = devm_clk_bulk_get(dev, core->num_clk, core->clocks);
if (rc)
return rc;
rc = devm_clk_bulk_get(dev, ctrl->num_clk, ctrl->clocks);
if (rc)
return -ENODEV;
rc = devm_clk_bulk_get(dev, stream->num_clk, stream->clocks);
if (rc)
return -ENODEV;
return 0;
}
int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
{
struct dp_power_private *power;
power = container_of(dp_power, struct dp_power_private, dp_power);
drm_dbg_dp(power->drm_dev,
"core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on);
if (pm_type == DP_CORE_PM)
return dp_power->core_clks_on;
if (pm_type == DP_CTRL_PM)
return dp_power->link_clks_on;
if (pm_type == DP_STREAM_PM)
return dp_power->stream_clks_on;
return 0;
}
int dp_power_clk_enable(struct dp_power *dp_power,
enum dp_pm_type pm_type, bool enable)
{
int rc = 0;
struct dp_power_private *power;
struct dss_module_power *mp;
power = container_of(dp_power, struct dp_power_private, dp_power);
if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM &&
pm_type != DP_STREAM_PM) {
DRM_ERROR("unsupported power module: %s\n",
dp_parser_pm_name(pm_type));
return -EINVAL;
}
if (enable) {
if (pm_type == DP_CORE_PM && dp_power->core_clks_on) {
drm_dbg_dp(power->drm_dev,
"core clks already enabled\n");
return 0;
}
if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) {
drm_dbg_dp(power->drm_dev,
"links clks already enabled\n");
return 0;
}
if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) {
drm_dbg_dp(power->drm_dev,
"pixel clks already enabled\n");
return 0;
}
if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
drm_dbg_dp(power->drm_dev,
"Enable core clks before link clks\n");
mp = &power->parser->mp[DP_CORE_PM];
rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
if (rc)
return rc;
dp_power->core_clks_on = true;
}
}
mp = &power->parser->mp[pm_type];
if (enable) {
rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
if (rc)
return rc;
} else {
clk_bulk_disable_unprepare(mp->num_clk, mp->clocks);
}
if (pm_type == DP_CORE_PM)
dp_power->core_clks_on = enable;
else if (pm_type == DP_STREAM_PM)
dp_power->stream_clks_on = enable;
else
dp_power->link_clks_on = enable;
drm_dbg_dp(power->drm_dev, "%s clocks for %s\n",
enable ? "enable" : "disable",
dp_parser_pm_name(pm_type));
drm_dbg_dp(power->drm_dev,
"strem_clks:%s link_clks:%s core_clks:%s\n",
dp_power->stream_clks_on ? "on" : "off",
dp_power->link_clks_on ? "on" : "off",
dp_power->core_clks_on ? "on" : "off");
return 0;
}
int dp_power_client_init(struct dp_power *dp_power)
{
struct dp_power_private *power;
power = container_of(dp_power, struct dp_power_private, dp_power);
pm_runtime_enable(power->dev);
return dp_power_clk_init(power);
}
void dp_power_client_deinit(struct dp_power *dp_power)
{
struct dp_power_private *power;
power = container_of(dp_power, struct dp_power_private, dp_power);
pm_runtime_disable(power->dev);
}
int dp_power_init(struct dp_power *dp_power)
{
int rc = 0;
struct dp_power_private *power = NULL;
power = container_of(dp_power, struct dp_power_private, dp_power);
pm_runtime_get_sync(power->dev);
rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
if (rc)
pm_runtime_put_sync(power->dev);
return rc;
}
int dp_power_deinit(struct dp_power *dp_power)
{
struct dp_power_private *power;
power = container_of(dp_power, struct dp_power_private, dp_power);
dp_power_clk_enable(dp_power, DP_CORE_PM, false);
pm_runtime_put_sync(power->dev);
return 0;
}
struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
{
struct dp_power_private *power;
struct dp_power *dp_power;
power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
if (!power)
return ERR_PTR(-ENOMEM);
power->parser = parser;
power->dev = dev;
dp_power = &power->dp_power;
return dp_power;
}
| linux-master | drivers/gpu/drm/msm/dp/dp_power.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/platform_device.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_panel.h"
#include "dp_display.h"
#define HEADER_BYTE_2_BIT 0
#define PARITY_BYTE_2_BIT 8
#define HEADER_BYTE_1_BIT 16
#define PARITY_BYTE_1_BIT 24
#define HEADER_BYTE_3_BIT 16
#define PARITY_BYTE_3_BIT 24
struct dp_audio_private {
struct platform_device *audio_pdev;
struct platform_device *pdev;
struct drm_device *drm_dev;
struct dp_catalog *catalog;
struct dp_panel *panel;
bool engine_on;
u32 channels;
struct dp_audio dp_audio;
};
static u8 dp_audio_get_g0_value(u8 data)
{
u8 c[4];
u8 g[4];
u8 ret_data = 0;
u8 i;
for (i = 0; i < 4; i++)
c[i] = (data >> i) & 0x01;
g[0] = c[3];
g[1] = c[0] ^ c[3];
g[2] = c[1];
g[3] = c[2];
for (i = 0; i < 4; i++)
ret_data = ((g[i] & 0x01) << i) | ret_data;
return ret_data;
}
static u8 dp_audio_get_g1_value(u8 data)
{
u8 c[4];
u8 g[4];
u8 ret_data = 0;
u8 i;
for (i = 0; i < 4; i++)
c[i] = (data >> i) & 0x01;
g[0] = c[0] ^ c[3];
g[1] = c[0] ^ c[1] ^ c[3];
g[2] = c[1] ^ c[2];
g[3] = c[2] ^ c[3];
for (i = 0; i < 4; i++)
ret_data = ((g[i] & 0x01) << i) | ret_data;
return ret_data;
}
static u8 dp_audio_calculate_parity(u32 data)
{
u8 x0 = 0;
u8 x1 = 0;
u8 ci = 0;
u8 iData = 0;
u8 i = 0;
u8 parity_byte;
u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
for (i = 0; i < num_byte; i++) {
iData = (data >> i*4) & 0xF;
ci = iData ^ x1;
x1 = x0 ^ dp_audio_get_g1_value(ci);
x0 = dp_audio_get_g0_value(ci);
}
parity_byte = x1 | (x0 << 4);
return parity_byte;
}
static u32 dp_audio_get_header(struct dp_catalog *catalog,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
{
catalog->sdp_type = sdp;
catalog->sdp_header = header;
dp_catalog_audio_get_header(catalog);
return catalog->audio_data;
}
static void dp_audio_set_header(struct dp_catalog *catalog,
u32 data,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
{
catalog->sdp_type = sdp;
catalog->sdp_header = header;
catalog->audio_data = data;
dp_catalog_audio_set_header(catalog);
}
static void dp_audio_stream_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
new_value = 0x02;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
new_value = value;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
new_value = audio->channels - 1;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
new_value = 0x1;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
new_value = 0x17;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
new_value = 0x84;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
new_value = 0x1b;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
new_value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
new_value = 0x05;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
new_value = 0x0;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
new_value = 0x06;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
}
static void dp_audio_setup_sdp(struct dp_audio_private *audio)
{
dp_catalog_audio_config_sdp(audio->catalog);
dp_audio_stream_sdp(audio);
dp_audio_timestamp_sdp(audio);
dp_audio_infoframe_sdp(audio);
dp_audio_copy_management_sdp(audio);
dp_audio_isrc_sdp(audio);
}
static void dp_audio_setup_acr(struct dp_audio_private *audio)
{
u32 select = 0;
struct dp_catalog *catalog = audio->catalog;
switch (audio->dp_audio.bw_code) {
case DP_LINK_BW_1_62:
select = 0;
break;
case DP_LINK_BW_2_7:
select = 1;
break;
case DP_LINK_BW_5_4:
select = 2;
break;
case DP_LINK_BW_8_1:
select = 3;
break;
default:
drm_dbg_dp(audio->drm_dev, "Unknown link rate\n");
select = 0;
break;
}
catalog->audio_data = select;
dp_catalog_audio_config_acr(catalog);
}
static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 safe_to_exit_level = 0;
switch (audio->dp_audio.lane_count) {
case 1:
safe_to_exit_level = 14;
break;
case 2:
safe_to_exit_level = 8;
break;
case 4:
safe_to_exit_level = 5;
break;
default:
drm_dbg_dp(audio->drm_dev,
"setting the default safe_to_exit_level = %u\n",
safe_to_exit_level);
safe_to_exit_level = 14;
break;
}
catalog->audio_data = safe_to_exit_level;
dp_catalog_audio_sfe_level(catalog);
}
static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
{
struct dp_catalog *catalog = audio->catalog;
catalog->audio_data = enable;
dp_catalog_audio_enable(catalog);
audio->engine_on = enable;
}
static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
{
struct dp_audio *dp_audio;
struct msm_dp *dp_display;
if (!pdev) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
dp_display = platform_get_drvdata(pdev);
if (!dp_display) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
dp_audio = dp_display->dp_audio;
if (!dp_audio) {
DRM_ERROR("invalid dp_audio data\n");
return ERR_PTR(-EINVAL);
}
return container_of(dp_audio, struct dp_audio_private, dp_audio);
}
static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
struct platform_device *pdev;
struct msm_dp *dp_display;
pdev = to_platform_device(dev);
if (!pdev) {
pr_err("invalid input\n");
return -ENODEV;
}
dp_display = platform_get_drvdata(pdev);
if (!dp_display) {
pr_err("invalid input\n");
return -ENODEV;
}
return dp_display_set_plugged_cb(dp_display, fn, codec_dev);
}
static int dp_audio_get_eld(struct device *dev,
void *data, uint8_t *buf, size_t len)
{
struct platform_device *pdev;
struct msm_dp *dp_display;
pdev = to_platform_device(dev);
if (!pdev) {
DRM_ERROR("invalid input\n");
return -ENODEV;
}
dp_display = platform_get_drvdata(pdev);
if (!dp_display) {
DRM_ERROR("invalid input\n");
return -ENODEV;
}
memcpy(buf, dp_display->connector->eld,
min(sizeof(dp_display->connector->eld), len));
return 0;
}
int dp_audio_hw_params(struct device *dev,
void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
int rc = 0;
struct dp_audio_private *audio;
struct platform_device *pdev;
struct msm_dp *dp_display;
pdev = to_platform_device(dev);
dp_display = platform_get_drvdata(pdev);
/*
* there could be cases where sound card can be opened even
* before OR even when DP is not connected . This can cause
* unclocked access as the audio subsystem relies on the DP
* driver to maintain the correct state of clocks. To protect
* such cases check for connection status and bail out if not
* connected.
*/
if (!dp_display->power_on) {
rc = -EINVAL;
goto end;
}
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
}
audio->channels = params->channels;
dp_audio_setup_sdp(audio);
dp_audio_setup_acr(audio);
dp_audio_safe_to_exit_level(audio);
dp_audio_enable(audio, true);
dp_display_signal_audio_start(dp_display);
dp_display->audio_enabled = true;
end:
return rc;
}
static void dp_audio_shutdown(struct device *dev, void *data)
{
struct dp_audio_private *audio;
struct platform_device *pdev;
struct msm_dp *dp_display;
pdev = to_platform_device(dev);
dp_display = platform_get_drvdata(pdev);
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
DRM_ERROR("failed to get audio data\n");
return;
}
/*
* if audio was not enabled there is no need
* to execute the shutdown and we can bail out early.
* This also makes sure that we dont cause an unclocked
* access when audio subsystem calls this without DP being
* connected. is_connected cannot be used here as its set
* to false earlier than this call
*/
if (!dp_display->audio_enabled)
return;
dp_audio_enable(audio, false);
/* signal the dp display to safely shutdown clocks */
dp_display_signal_audio_complete(dp_display);
}
static const struct hdmi_codec_ops dp_audio_codec_ops = {
.hw_params = dp_audio_hw_params,
.audio_shutdown = dp_audio_shutdown,
.get_eld = dp_audio_get_eld,
.hook_plugged_cb = dp_audio_hook_plugged_cb,
};
static struct hdmi_codec_pdata codec_data = {
.ops = &dp_audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
};
void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
{
struct dp_audio_private *audio_priv;
audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio);
if (audio_priv->audio_pdev) {
platform_device_unregister(audio_priv->audio_pdev);
audio_priv->audio_pdev = NULL;
}
}
int dp_register_audio_driver(struct device *dev,
struct dp_audio *dp_audio)
{
struct dp_audio_private *audio_priv;
audio_priv = container_of(dp_audio,
struct dp_audio_private, dp_audio);
audio_priv->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&codec_data,
sizeof(codec_data));
return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
}
struct dp_audio *dp_audio_get(struct platform_device *pdev,
struct dp_panel *panel,
struct dp_catalog *catalog)
{
int rc = 0;
struct dp_audio_private *audio;
struct dp_audio *dp_audio;
if (!pdev || !panel || !catalog) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
}
audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL);
if (!audio) {
rc = -ENOMEM;
goto error;
}
audio->pdev = pdev;
audio->panel = panel;
audio->catalog = catalog;
dp_audio = &audio->dp_audio;
dp_catalog_audio_init(catalog);
return dp_audio;
error:
return ERR_PTR(rc);
}
void dp_audio_put(struct dp_audio *dp_audio)
{
struct dp_audio_private *audio;
if (!dp_audio)
return;
audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
devm_kfree(&audio->pdev->dev, audio);
}
| linux-master | drivers/gpu/drm/msm/dp/dp_audio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
#include "dp_aux.h"
enum msm_dp_aux_err {
DP_AUX_ERR_NONE,
DP_AUX_ERR_ADDR,
DP_AUX_ERR_TOUT,
DP_AUX_ERR_NACK,
DP_AUX_ERR_DEFER,
DP_AUX_ERR_NACK_DEFER,
DP_AUX_ERR_PHY,
};
struct dp_aux_private {
struct device *dev;
struct dp_catalog *catalog;
struct mutex mutex;
struct completion comp;
enum msm_dp_aux_err aux_error_num;
u32 retry_cnt;
bool cmd_busy;
bool native;
bool read;
bool no_send_addr;
bool no_send_stop;
bool initted;
bool is_edp;
u32 offset;
u32 segment;
struct drm_dp_aux dp_aux;
};
#define MAX_AUX_RETRIES 5
static ssize_t dp_aux_write(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u8 data[4];
u32 reg;
ssize_t len;
u8 *msgdata = msg->buffer;
int const AUX_CMD_FIFO_LEN = 128;
int i = 0;
if (aux->read)
len = 0;
else
len = msg->size;
/*
* cmd fifo only has depth of 144 bytes
* limit buf length to 128 bytes here
*/
if (len > AUX_CMD_FIFO_LEN - 4) {
DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
return -EINVAL;
}
/* Pack cmd and write to HW */
data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
if (aux->read)
data[0] |= BIT(4); /* R/W */
data[1] = msg->address >> 8; /* addr[15:8] */
data[2] = msg->address; /* addr[7:0] */
data[3] = msg->size - 1; /* len[7:0] */
for (i = 0; i < len + 4; i++) {
reg = (i < 4) ? data[i] : msgdata[i - 4];
reg <<= DP_AUX_DATA_OFFSET;
reg &= DP_AUX_DATA_MASK;
reg |= DP_AUX_DATA_WRITE;
/* index = 0, write */
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
aux->catalog->aux_data = reg;
dp_catalog_aux_write_data(aux->catalog);
}
dp_catalog_aux_clear_trans(aux->catalog, false);
dp_catalog_aux_clear_hw_interrupts(aux->catalog);
reg = 0; /* Transaction number == 1 */
if (!aux->native) { /* i2c */
reg |= DP_AUX_TRANS_CTRL_I2C;
if (aux->no_send_addr)
reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
if (aux->no_send_stop)
reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
}
reg |= DP_AUX_TRANS_CTRL_GO;
aux->catalog->aux_data = reg;
dp_catalog_aux_write_trans(aux->catalog);
return len;
}
static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
unsigned long time_left;
reinit_completion(&aux->comp);
ret = dp_aux_write(aux, msg);
if (ret < 0)
return ret;
time_left = wait_for_completion_timeout(&aux->comp,
msecs_to_jiffies(250));
if (!time_left)
return -ETIMEDOUT;
return ret;
}
static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 data;
u8 *dp;
u32 i, actual_i;
u32 len = msg->size;
dp_catalog_aux_clear_trans(aux->catalog, true);
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= DP_AUX_DATA_READ; /* read */
aux->catalog->aux_data = data;
dp_catalog_aux_write_data(aux->catalog);
dp = msg->buffer;
/* discard first byte */
data = dp_catalog_aux_read_data(aux->catalog);
for (i = 0; i < len; i++) {
data = dp_catalog_aux_read_data(aux->catalog);
*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
if (i != actual_i)
break;
}
return i;
}
static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg)
{
u32 edid_address = 0x50;
u32 segment_address = 0x30;
bool i2c_read = input_msg->request &
(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
u8 *data;
if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
(input_msg->address != segment_address)))
return;
data = input_msg->buffer;
if (input_msg->address == segment_address)
aux->segment = *data;
else
aux->offset = *data;
}
/**
* dp_aux_transfer_helper() - helper function for EDID read transactions
*
* @aux: DP AUX private structure
* @input_msg: input message from DRM upstream APIs
* @send_seg: send the segment to sink
*
* return: void
*
* This helper function is used to fix EDID reads for non-compliant
* sinks that do not handle the i2c middle-of-transaction flag correctly.
*/
static void dp_aux_transfer_helper(struct dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg,
bool send_seg)
{
struct drm_dp_aux_msg helper_msg;
u32 message_size = 0x10;
u32 segment_address = 0x30;
u32 const edid_block_length = 0x80;
bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
bool i2c_read = input_msg->request &
(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
if (!i2c_mot || !i2c_read || (input_msg->size == 0))
return;
/*
* Sending the segment value and EDID offset will be performed
* from the DRM upstream EDID driver for each block. Avoid
* duplicate AUX transactions related to this while reading the
* first 16 bytes of each block.
*/
if (!(aux->offset % edid_block_length) || !send_seg)
goto end;
aux->read = false;
aux->cmd_busy = true;
aux->no_send_addr = true;
aux->no_send_stop = true;
/*
* Send the segment address for every i2c read in which the
* middle-of-tranaction flag is set. This is required to support EDID
* reads of more than 2 blocks as the segment address is reset to 0
* since we are overriding the middle-of-transaction flag for read
* transactions.
*/
if (aux->segment) {
memset(&helper_msg, 0, sizeof(helper_msg));
helper_msg.address = segment_address;
helper_msg.buffer = &aux->segment;
helper_msg.size = 1;
dp_aux_cmd_fifo_tx(aux, &helper_msg);
}
/*
* Send the offset address for every i2c read in which the
* middle-of-transaction flag is set. This will ensure that the sink
* will update its read pointer and return the correct portion of the
* EDID buffer in the subsequent i2c read trasntion triggered in the
* native AUX transfer function.
*/
memset(&helper_msg, 0, sizeof(helper_msg));
helper_msg.address = input_msg->address;
helper_msg.buffer = &aux->offset;
helper_msg.size = 1;
dp_aux_cmd_fifo_tx(aux, &helper_msg);
end:
aux->offset += message_size;
if (aux->offset == 0x80 || aux->offset == 0x100)
aux->segment = 0x0; /* reset segment at end of block */
}
/*
* This function does the real job to process an AUX transaction.
* It will call aux_reset() function to reset the AUX channel,
* if the waiting is timeout.
*/
static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
struct dp_aux_private *aux;
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
/* Ignore address only message */
if (msg->size == 0 || !msg->buffer) {
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
return msg->size;
}
/* msg sanity check */
if ((aux->native && msg->size > aux_cmd_native_max) ||
msg->size > aux_cmd_i2c_max) {
DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
__func__, msg->size, msg->request);
return -EINVAL;
}
mutex_lock(&aux->mutex);
if (!aux->initted) {
ret = -EIO;
goto exit;
}
/*
* For eDP it's important to give a reasonably long wait here for HPD
* to be asserted. This is because the panel driver may have _just_
* turned on the panel and then tried to do an AUX transfer. The panel
* driver has no way of knowing when the panel is ready, so it's up
* to us to wait. For DP we never get into this situation so let's
* avoid ever doing the extra long wait for DP.
*/
if (aux->is_edp) {
ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
if (ret) {
DRM_DEBUG_DP("Panel not ready for aux transactions\n");
goto exit;
}
}
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true);
aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
aux->cmd_busy = true;
if (aux->read) {
aux->no_send_addr = true;
aux->no_send_stop = false;
} else {
aux->no_send_addr = true;
aux->no_send_stop = true;
}
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
if (aux->native) {
aux->retry_cnt++;
if (!(aux->retry_cnt % MAX_AUX_RETRIES))
dp_catalog_aux_update_cfg(aux->catalog);
}
/* reset aux if link is in connected state */
if (dp_catalog_link_is_connected(aux->catalog))
dp_catalog_aux_reset(aux->catalog);
} else {
aux->retry_cnt = 0;
switch (aux->aux_error_num) {
case DP_AUX_ERR_NONE:
if (aux->read)
ret = dp_aux_cmd_fifo_rx(aux, msg);
msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
break;
case DP_AUX_ERR_DEFER:
msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
break;
case DP_AUX_ERR_PHY:
case DP_AUX_ERR_ADDR:
case DP_AUX_ERR_NACK:
case DP_AUX_ERR_NACK_DEFER:
msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_NACK : DP_AUX_I2C_REPLY_NACK;
break;
case DP_AUX_ERR_TOUT:
ret = -ETIMEDOUT;
break;
}
}
aux->cmd_busy = false;
exit:
mutex_unlock(&aux->mutex);
return ret;
}
irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
{
u32 isr;
struct dp_aux_private *aux;
if (!dp_aux) {
DRM_ERROR("invalid input\n");
return IRQ_NONE;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
isr = dp_catalog_aux_get_irq(aux->catalog);
/* no interrupts pending, return immediately */
if (!isr)
return IRQ_NONE;
if (!aux->cmd_busy) {
DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
return IRQ_NONE;
}
/*
* The logic below assumes only one error bit is set (other than "done"
* which can apparently be set at the same time as some of the other
* bits). Warn if more than one get set so we know we need to improve
* the logic.
*/
if (hweight32(isr & ~DP_INTR_AUX_XFER_DONE) > 1)
DRM_WARN("Some DP AUX interrupts unhandled: %#010x\n", isr);
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
dp_catalog_aux_clear_hw_interrupts(aux->catalog);
} else if (isr & DP_INTR_NACK_DEFER) {
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
} else if (isr & DP_INTR_WRONG_ADDR) {
aux->aux_error_num = DP_AUX_ERR_ADDR;
} else if (isr & DP_INTR_TIMEOUT) {
aux->aux_error_num = DP_AUX_ERR_TOUT;
} else if (!aux->native && (isr & DP_INTR_I2C_NACK)) {
aux->aux_error_num = DP_AUX_ERR_NACK;
} else if (!aux->native && (isr & DP_INTR_I2C_DEFER)) {
if (isr & DP_INTR_AUX_XFER_DONE)
aux->aux_error_num = DP_AUX_ERR_NACK;
else
aux->aux_error_num = DP_AUX_ERR_DEFER;
} else if (isr & DP_INTR_AUX_XFER_DONE) {
aux->aux_error_num = DP_AUX_ERR_NONE;
} else {
DRM_WARN("Unexpected interrupt: %#010x\n", isr);
return IRQ_NONE;
}
complete(&aux->comp);
return IRQ_HANDLED;
}
void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
dp_catalog_aux_update_cfg(aux->catalog);
dp_catalog_aux_reset(aux->catalog);
}
void dp_aux_init(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
DRM_ERROR("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_lock(&aux->mutex);
dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0;
aux->initted = true;
mutex_unlock(&aux->mutex);
}
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_lock(&aux->mutex);
aux->initted = false;
dp_catalog_aux_enable(aux->catalog, false);
mutex_unlock(&aux->mutex);
}
int dp_aux_register(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
int ret;
if (!dp_aux) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
aux->dp_aux.name = "dpu_dp_aux";
aux->dp_aux.dev = aux->dev;
aux->dp_aux.transfer = dp_aux_transfer;
ret = drm_dp_aux_register(&aux->dp_aux);
if (ret) {
DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
ret);
return ret;
}
return 0;
}
void dp_aux_unregister(struct drm_dp_aux *dp_aux)
{
drm_dp_aux_unregister(dp_aux);
}
struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
bool is_edp)
{
struct dp_aux_private *aux;
if (!catalog) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
if (!aux)
return ERR_PTR(-ENOMEM);
init_completion(&aux->comp);
aux->cmd_busy = false;
aux->is_edp = is_edp;
mutex_init(&aux->mutex);
aux->dev = dev;
aux->catalog = catalog;
aux->retry_cnt = 0;
return &aux->dp_aux;
}
void dp_aux_put(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux)
return;
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_destroy(&aux->mutex);
devm_kfree(aux->dev, aux);
}
| linux-master | drivers/gpu/drm/msm/dp/dp_aux.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include "dp_panel.h"
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
struct dp_panel_private {
struct device *dev;
struct drm_device *drm_dev;
struct dp_panel dp_panel;
struct drm_dp_aux *aux;
struct dp_link *link;
struct dp_catalog *catalog;
bool panel_on;
bool aux_cfg_update_done;
};
static void dp_panel_read_psr_cap(struct dp_panel_private *panel)
{
ssize_t rlen;
struct dp_panel *dp_panel;
dp_panel = &panel->dp_panel;
/* edp sink */
if (dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) {
rlen = drm_dp_dpcd_read(panel->aux, DP_PSR_SUPPORT,
&dp_panel->psr_cap, sizeof(dp_panel->psr_cap));
if (rlen == sizeof(dp_panel->psr_cap)) {
drm_dbg_dp(panel->drm_dev,
"psr version: 0x%x, psr_cap: 0x%x\n",
dp_panel->psr_cap.version,
dp_panel->psr_cap.capabilities);
} else
DRM_ERROR("failed to read psr info, rlen=%zd\n", rlen);
}
}
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
int rc = 0;
size_t len;
ssize_t rlen;
struct dp_panel_private *panel;
struct dp_link_info *link_info;
u8 *dpcd, major = 0, minor = 0, temp;
u32 offset = DP_DPCD_REV;
dpcd = dp_panel->dpcd;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
link_info = &dp_panel->link_info;
rlen = drm_dp_dpcd_read(panel->aux, offset,
dpcd, (DP_RECEIVER_CAP_SIZE + 1));
if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
if (rlen == -ETIMEDOUT)
rc = rlen;
else
rc = -EINVAL;
goto end;
}
temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL];
/* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
if (temp & BIT(7)) {
drm_dbg_dp(panel->drm_dev,
"using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
offset = DPRX_EXTENDED_DPCD_FIELD;
}
rlen = drm_dp_dpcd_read(panel->aux, offset,
dpcd, (DP_RECEIVER_CAP_SIZE + 1));
if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
if (rlen == -ETIMEDOUT)
rc = rlen;
else
rc = -EINVAL;
goto end;
}
link_info->revision = dpcd[DP_DPCD_REV];
major = (link_info->revision >> 4) & 0x0f;
minor = link_info->revision & 0x0f;
link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
/* Limit data lanes from data-lanes of endpoint property of dtsi */
if (link_info->num_lanes > dp_panel->max_dp_lanes)
link_info->num_lanes = dp_panel->max_dp_lanes;
/* Limit link rate from link-frequencies of endpoint property of dtsi */
if (link_info->rate > dp_panel->max_dp_link_rate)
link_info->rate = dp_panel->max_dp_link_rate;
drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes);
if (drm_dp_enhanced_frame_cap(dpcd))
link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT];
dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT;
if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) {
dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT];
dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK;
len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE;
rlen = drm_dp_dpcd_read(panel->aux,
DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len);
if (rlen < len) {
DRM_ERROR("ds port status failed, rlen=%zd\n", rlen);
rc = -EINVAL;
goto end;
}
}
dp_panel_read_psr_cap(panel);
end:
return rc;
}
static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
struct dp_link_info *link_info;
const u32 max_supported_bpp = 30, min_supported_bpp = 18;
u32 bpp = 0, data_rate_khz = 0;
bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
link_info = &dp_panel->link_info;
data_rate_khz = link_info->num_lanes * link_info->rate * 8;
while (bpp > min_supported_bpp) {
if (mode_pclk_khz * bpp <= data_rate_khz)
break;
bpp -= 6;
}
return bpp;
}
static int dp_panel_update_modes(struct drm_connector *connector,
struct edid *edid)
{
int rc = 0;
if (edid) {
rc = drm_connector_update_edid_property(connector, edid);
if (rc) {
DRM_ERROR("failed to update edid property %d\n", rc);
return rc;
}
rc = drm_add_edid_modes(connector, edid);
return rc;
}
rc = drm_connector_update_edid_property(connector, NULL);
if (rc)
DRM_ERROR("failed to update edid property %d\n", rc);
return rc;
}
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
int rc = 0, bw_code;
int rlen, count;
struct dp_panel_private *panel;
if (!dp_panel || !connector) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
if (rc) {
DRM_ERROR("read dpcd failed %d\n", rc);
return rc;
}
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
if (!is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
dp_panel->link_info.num_lanes);
return -EINVAL;
}
if (dp_panel->dfp_present) {
rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
&count, 1);
if (rlen == 1) {
count = DP_GET_SINK_COUNT(count);
if (!count) {
DRM_ERROR("no downstream ports connected\n");
panel->link->sink_count = 0;
rc = -ENOTCONN;
goto end;
}
}
}
kfree(dp_panel->edid);
dp_panel->edid = NULL;
dp_panel->edid = drm_get_edid(connector,
&panel->aux->ddc);
if (!dp_panel->edid) {
DRM_ERROR("panel edid read failed\n");
/* check edid read fail is due to unplug */
if (!dp_catalog_link_is_connected(panel->catalog)) {
rc = -ETIMEDOUT;
goto end;
}
}
if (panel->aux_cfg_update_done) {
drm_dbg_dp(panel->drm_dev,
"read DPCD with updated AUX config\n");
rc = dp_panel_read_dpcd(dp_panel);
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
if (rc || !is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes)
|| (bw_code > dp_panel->max_bw_code)) {
DRM_ERROR("read dpcd failed %d\n", rc);
return rc;
}
panel->aux_cfg_update_done = false;
}
end:
return rc;
}
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
struct dp_panel_private *panel;
u32 bpp;
if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
DRM_ERROR("invalid input\n");
return 0;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (dp_panel->video_test)
bpp = dp_link_bit_depth_to_bpp(
panel->link->test_video.test_bit_depth);
else
bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
mode_pclk_khz);
return bpp;
}
int dp_panel_get_modes(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
if (!dp_panel) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
if (dp_panel->edid)
return dp_panel_update_modes(connector, dp_panel->edid);
return 0;
}
static u8 dp_panel_get_edid_checksum(struct edid *edid)
{
struct edid *last_block;
u8 *raw_edid;
bool is_edid_corrupt = false;
if (!edid) {
DRM_ERROR("invalid edid input\n");
return 0;
}
raw_edid = (u8 *)edid;
raw_edid += (edid->extensions * EDID_LENGTH);
last_block = (struct edid *)raw_edid;
/* block type extension */
drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
if (!is_edid_corrupt)
return last_block->checksum;
DRM_ERROR("Invalid block, no checksum\n");
return 0;
}
void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
{
struct dp_panel_private *panel;
if (!dp_panel) {
DRM_ERROR("invalid input\n");
return;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
u8 checksum;
if (dp_panel->edid)
checksum = dp_panel_get_edid_checksum(dp_panel->edid);
else
checksum = dp_panel->connector->real_edid_checksum;
dp_link_send_edid_checksum(panel->link, checksum);
dp_link_send_test_response(panel->link);
}
}
void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
{
struct dp_catalog *catalog;
struct dp_panel_private *panel;
if (!dp_panel) {
DRM_ERROR("invalid input\n");
return;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
catalog = panel->catalog;
if (!panel->panel_on) {
drm_dbg_dp(panel->drm_dev,
"DP panel not enabled, handle TPG on next on\n");
return;
}
if (!enable) {
dp_catalog_panel_tpg_disable(catalog);
return;
}
drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n");
dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
}
void dp_panel_dump_regs(struct dp_panel *dp_panel)
{
struct dp_catalog *catalog;
struct dp_panel_private *panel;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
catalog = panel->catalog;
dp_catalog_dump_regs(catalog);
}
int dp_panel_timing_cfg(struct dp_panel *dp_panel)
{
u32 data, total_ver, total_hor;
struct dp_catalog *catalog;
struct dp_panel_private *panel;
struct drm_display_mode *drm_mode;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
catalog = panel->catalog;
drm_mode = &panel->dp_panel.dp_mode.drm_mode;
drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n",
drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->hsync_end - drm_mode->hsync_start);
drm_dbg_dp(panel->drm_dev, "height=%d vporch= %d %d %d\n",
drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end,
drm_mode->vsync_start - drm_mode->vdisplay,
drm_mode->vsync_end - drm_mode->vsync_start);
total_hor = drm_mode->htotal;
total_ver = drm_mode->vtotal;
data = total_ver;
data <<= 16;
data |= total_hor;
catalog->total = data;
data = (drm_mode->vtotal - drm_mode->vsync_start);
data <<= 16;
data |= (drm_mode->htotal - drm_mode->hsync_start);
catalog->sync_start = data;
data = drm_mode->vsync_end - drm_mode->vsync_start;
data <<= 16;
data |= (panel->dp_panel.dp_mode.v_active_low << 31);
data |= drm_mode->hsync_end - drm_mode->hsync_start;
data |= (panel->dp_panel.dp_mode.h_active_low << 15);
catalog->width_blanking = data;
data = drm_mode->vdisplay;
data <<= 16;
data |= drm_mode->hdisplay;
catalog->dp_active = data;
dp_catalog_panel_timing_cfg(catalog);
panel->panel_on = true;
return 0;
}
int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
struct drm_display_mode *drm_mode;
struct dp_panel_private *panel;
drm_mode = &dp_panel->dp_mode.drm_mode;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
/*
* print resolution info as this is a result
* of user initiated action of cable connection
*/
drm_dbg_dp(panel->drm_dev, "SET NEW RESOLUTION:\n");
drm_dbg_dp(panel->drm_dev, "%dx%d@%dfps\n",
drm_mode->hdisplay, drm_mode->vdisplay, drm_mode_vrefresh(drm_mode));
drm_dbg_dp(panel->drm_dev,
"h_porches(back|front|width) = (%d|%d|%d)\n",
drm_mode->htotal - drm_mode->hsync_end,
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->hsync_end - drm_mode->hsync_start);
drm_dbg_dp(panel->drm_dev,
"v_porches(back|front|width) = (%d|%d|%d)\n",
drm_mode->vtotal - drm_mode->vsync_end,
drm_mode->vsync_start - drm_mode->vdisplay,
drm_mode->vsync_end - drm_mode->vsync_start);
drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n",
drm_mode->clock);
drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
dp_panel->dp_mode.bpp = max_t(u32, 18,
min_t(u32, dp_panel->dp_mode.bpp, 30));
drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
dp_panel->dp_mode.bpp);
return 0;
}
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
{
struct dp_panel_private *panel;
struct dp_panel *dp_panel;
if (!in->dev || !in->catalog || !in->aux || !in->link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return ERR_PTR(-ENOMEM);
panel->dev = in->dev;
panel->aux = in->aux;
panel->catalog = in->catalog;
panel->link = in->link;
dp_panel = &panel->dp_panel;
dp_panel->max_bw_code = DP_LINK_BW_8_1;
panel->aux_cfg_update_done = false;
return dp_panel;
}
void dp_panel_put(struct dp_panel *dp_panel)
{
if (!dp_panel)
return;
kfree(dp_panel->edid);
}
| linux-master | drivers/gpu/drm/msm/dp/dp_panel.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <drm/drm_print.h>
#include "dp_link.h"
#include "dp_panel.h"
#define DP_TEST_REQUEST_MASK 0x7F
enum audio_sample_rate {
AUDIO_SAMPLE_RATE_32_KHZ = 0x00,
AUDIO_SAMPLE_RATE_44_1_KHZ = 0x01,
AUDIO_SAMPLE_RATE_48_KHZ = 0x02,
AUDIO_SAMPLE_RATE_88_2_KHZ = 0x03,
AUDIO_SAMPLE_RATE_96_KHZ = 0x04,
AUDIO_SAMPLE_RATE_176_4_KHZ = 0x05,
AUDIO_SAMPLE_RATE_192_KHZ = 0x06,
};
enum audio_pattern_type {
AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0x00,
AUDIO_TEST_PATTERN_SAWTOOTH = 0x01,
};
struct dp_link_request {
u32 test_requested;
u32 test_link_rate;
u32 test_lane_count;
};
struct dp_link_private {
u32 prev_sink_count;
struct device *dev;
struct drm_device *drm_dev;
struct drm_dp_aux *aux;
struct dp_link dp_link;
struct dp_link_request request;
struct mutex psm_mutex;
u8 link_status[DP_LINK_STATUS_SIZE];
};
static int dp_aux_link_power_up(struct drm_dp_aux *aux,
struct dp_link_info *link)
{
u8 value;
ssize_t len;
int i;
if (link->revision < 0x11)
return 0;
len = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
if (len < 0)
return len;
value &= ~DP_SET_POWER_MASK;
value |= DP_SET_POWER_D0;
/* retry for 1ms to give the sink time to wake up */
for (i = 0; i < 3; i++) {
len = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
usleep_range(1000, 2000);
if (len == 1)
break;
}
return 0;
}
static int dp_aux_link_power_down(struct drm_dp_aux *aux,
struct dp_link_info *link)
{
u8 value;
int err;
if (link->revision < 0x11)
return 0;
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
if (err < 0)
return err;
value &= ~DP_SET_POWER_MASK;
value |= DP_SET_POWER_D3;
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
if (err < 0)
return err;
return 0;
}
static int dp_link_get_period(struct dp_link_private *link, int const addr)
{
int ret = 0;
u8 data;
u32 const max_audio_period = 0xA;
/* TEST_AUDIO_PERIOD_CH_XX */
if (drm_dp_dpcd_readb(link->aux, addr, &data) < 0) {
DRM_ERROR("failed to read test_audio_period (0x%x)\n", addr);
ret = -EINVAL;
goto exit;
}
/* Period - Bits 3:0 */
data = data & 0xF;
if ((int)data > max_audio_period) {
DRM_ERROR("invalid test_audio_period_ch_1 = 0x%x\n", data);
ret = -EINVAL;
goto exit;
}
ret = data;
exit:
return ret;
}
static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
{
int ret = 0;
struct dp_link_test_audio *req = &link->dp_link.test_audio;
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_1 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_2 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret);
/* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_3 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_4 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_5 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_6 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_7 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_8 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_8 = 0x%x\n", ret);
exit:
return ret;
}
static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
{
int ret = 0;
u8 data;
ssize_t rlen;
int const max_audio_pattern_type = 0x1;
rlen = drm_dp_dpcd_readb(link->aux,
DP_TEST_AUDIO_PATTERN_TYPE, &data);
if (rlen < 0) {
DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
return rlen;
}
/* Audio Pattern Type - Bits 7:0 */
if ((int)data > max_audio_pattern_type) {
DRM_ERROR("invalid audio pattern type = 0x%x\n", data);
ret = -EINVAL;
goto exit;
}
link->dp_link.test_audio.test_audio_pattern_type = data;
drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data);
exit:
return ret;
}
static int dp_link_parse_audio_mode(struct dp_link_private *link)
{
int ret = 0;
u8 data;
ssize_t rlen;
int const max_audio_sampling_rate = 0x6;
int const max_audio_channel_count = 0x8;
int sampling_rate = 0x0;
int channel_count = 0x0;
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_AUDIO_MODE, &data);
if (rlen < 0) {
DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
return rlen;
}
/* Sampling Rate - Bits 3:0 */
sampling_rate = data & 0xF;
if (sampling_rate > max_audio_sampling_rate) {
DRM_ERROR("sampling rate (0x%x) greater than max (0x%x)\n",
sampling_rate, max_audio_sampling_rate);
ret = -EINVAL;
goto exit;
}
/* Channel Count - Bits 7:4 */
channel_count = ((data & 0xF0) >> 4) + 1;
if (channel_count > max_audio_channel_count) {
DRM_ERROR("channel_count (0x%x) greater than max (0x%x)\n",
channel_count, max_audio_channel_count);
ret = -EINVAL;
goto exit;
}
link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
link->dp_link.test_audio.test_audio_channel_count = channel_count;
drm_dbg_dp(link->drm_dev,
"sampling_rate = 0x%x, channel_count = 0x%x\n",
sampling_rate, channel_count);
exit:
return ret;
}
static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
{
int ret = 0;
ret = dp_link_parse_audio_mode(link);
if (ret)
goto exit;
ret = dp_link_parse_audio_pattern_type(link);
if (ret)
goto exit;
ret = dp_link_parse_audio_channel_period(link);
exit:
return ret;
}
static bool dp_link_is_video_pattern_valid(u32 pattern)
{
switch (pattern) {
case DP_NO_TEST_PATTERN:
case DP_COLOR_RAMP:
case DP_BLACK_AND_WHITE_VERTICAL_LINES:
case DP_COLOR_SQUARE:
return true;
default:
return false;
}
}
/**
* dp_link_is_bit_depth_valid() - validates the bit depth requested
* @tbd: bit depth requested by the sink
*
* Returns true if the requested bit depth is supported.
*/
static bool dp_link_is_bit_depth_valid(u32 tbd)
{
/* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
switch (tbd) {
case DP_TEST_BIT_DEPTH_6:
case DP_TEST_BIT_DEPTH_8:
case DP_TEST_BIT_DEPTH_10:
return true;
default:
return false;
}
}
static int dp_link_parse_timing_params1(struct dp_link_private *link,
int addr, int len, u32 *val)
{
u8 bp[2];
int rlen;
if (len != 2)
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
if (rlen < len) {
DRM_ERROR("failed to read 0x%x\n", addr);
return -EINVAL;
}
*val = bp[1] | (bp[0] << 8);
return 0;
}
static int dp_link_parse_timing_params2(struct dp_link_private *link,
int addr, int len,
u32 *val1, u32 *val2)
{
u8 bp[2];
int rlen;
if (len != 2)
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
if (rlen < len) {
DRM_ERROR("failed to read 0x%x\n", addr);
return -EINVAL;
}
*val1 = (bp[0] & BIT(7)) >> 7;
*val2 = bp[1] | ((bp[0] & 0x7F) << 8);
return 0;
}
static int dp_link_parse_timing_params3(struct dp_link_private *link,
int addr, u32 *val)
{
u8 bp;
u32 len = 1;
int rlen;
rlen = drm_dp_dpcd_read(link->aux, addr, &bp, len);
if (rlen < 1) {
DRM_ERROR("failed to read 0x%x\n", addr);
return -EINVAL;
}
*val = bp;
return 0;
}
/**
* dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD
* @link: Display Port Driver data
*
* Returns 0 if it successfully parses the video link pattern and the link
* bit depth requested by the sink and, and if the values parsed are valid.
*/
static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
{
int ret = 0;
ssize_t rlen;
u8 bp;
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_PATTERN, &bp);
if (rlen < 0) {
DRM_ERROR("failed to read link video pattern. rlen=%zd\n",
rlen);
return rlen;
}
if (!dp_link_is_video_pattern_valid(bp)) {
DRM_ERROR("invalid link video pattern = 0x%x\n", bp);
ret = -EINVAL;
return ret;
}
link->dp_link.test_video.test_video_pattern = bp;
/* Read the requested color bit depth and dynamic range (Byte 0x232) */
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp);
if (rlen < 0) {
DRM_ERROR("failed to read link bit depth. rlen=%zd\n", rlen);
return rlen;
}
/* Dynamic Range */
link->dp_link.test_video.test_dyn_range =
(bp & DP_TEST_DYNAMIC_RANGE_CEA);
/* Color bit depth */
bp &= DP_TEST_BIT_DEPTH_MASK;
if (!dp_link_is_bit_depth_valid(bp)) {
DRM_ERROR("invalid link bit depth = 0x%x\n", bp);
ret = -EINVAL;
return ret;
}
link->dp_link.test_video.test_bit_depth = bp;
/* resolution timing params */
ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
&link->dp_link.test_video.test_h_total);
if (ret) {
DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n");
return ret;
}
ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
&link->dp_link.test_video.test_v_total);
if (ret) {
DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n");
return ret;
}
ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
&link->dp_link.test_video.test_h_start);
if (ret) {
DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n");
return ret;
}
ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
&link->dp_link.test_video.test_v_start);
if (ret) {
DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n");
return ret;
}
ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
&link->dp_link.test_video.test_hsync_pol,
&link->dp_link.test_video.test_hsync_width);
if (ret) {
DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n");
return ret;
}
ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
&link->dp_link.test_video.test_vsync_pol,
&link->dp_link.test_video.test_vsync_width);
if (ret) {
DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n");
return ret;
}
ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
&link->dp_link.test_video.test_h_width);
if (ret) {
DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n");
return ret;
}
ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
&link->dp_link.test_video.test_v_height);
if (ret) {
DRM_ERROR("failed to parse test_v_height\n");
return ret;
}
ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1,
&link->dp_link.test_video.test_rr_d);
link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
if (ret) {
DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n");
return ret;
}
ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
&link->dp_link.test_video.test_rr_n);
if (ret) {
DRM_ERROR("failed to parse test_rr_n\n");
return ret;
}
drm_dbg_dp(link->drm_dev,
"link video pattern = 0x%x\n"
"link dynamic range = 0x%x\n"
"link bit depth = 0x%x\n"
"TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n"
"TEST_H_START = %d, TEST_V_START = %d\n"
"TEST_HSYNC_POL = %d\n"
"TEST_HSYNC_WIDTH = %d\n"
"TEST_VSYNC_POL = %d\n"
"TEST_VSYNC_WIDTH = %d\n"
"TEST_H_WIDTH = %d\n"
"TEST_V_HEIGHT = %d\n"
"TEST_REFRESH_DENOMINATOR = %d\n"
"TEST_REFRESH_NUMERATOR = %d\n",
link->dp_link.test_video.test_video_pattern,
link->dp_link.test_video.test_dyn_range,
link->dp_link.test_video.test_bit_depth,
link->dp_link.test_video.test_h_total,
link->dp_link.test_video.test_v_total,
link->dp_link.test_video.test_h_start,
link->dp_link.test_video.test_v_start,
link->dp_link.test_video.test_hsync_pol,
link->dp_link.test_video.test_hsync_width,
link->dp_link.test_video.test_vsync_pol,
link->dp_link.test_video.test_vsync_width,
link->dp_link.test_video.test_h_width,
link->dp_link.test_video.test_v_height,
link->dp_link.test_video.test_rr_d,
link->dp_link.test_video.test_rr_n);
return ret;
}
/**
* dp_link_parse_link_training_params() - parses link training parameters from
* DPCD
* @link: Display Port Driver data
*
* Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
* count (Byte 0x220), and if these values parse are valid.
*/
static int dp_link_parse_link_training_params(struct dp_link_private *link)
{
u8 bp;
ssize_t rlen;
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LINK_RATE, &bp);
if (rlen < 0) {
DRM_ERROR("failed to read link rate. rlen=%zd\n", rlen);
return rlen;
}
if (!is_link_rate_valid(bp)) {
DRM_ERROR("invalid link rate = 0x%x\n", bp);
return -EINVAL;
}
link->request.test_link_rate = bp;
drm_dbg_dp(link->drm_dev, "link rate = 0x%x\n",
link->request.test_link_rate);
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp);
if (rlen < 0) {
DRM_ERROR("failed to read lane count. rlen=%zd\n", rlen);
return rlen;
}
bp &= DP_MAX_LANE_COUNT_MASK;
if (!is_lane_count_valid(bp)) {
DRM_ERROR("invalid lane count = 0x%x\n", bp);
return -EINVAL;
}
link->request.test_lane_count = bp;
drm_dbg_dp(link->drm_dev, "lane count = 0x%x\n",
link->request.test_lane_count);
return 0;
}
/**
* dp_link_parse_phy_test_params() - parses the phy link parameters
* @link: Display Port Driver data
*
* Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
* requested.
*/
static int dp_link_parse_phy_test_params(struct dp_link_private *link)
{
u8 data;
ssize_t rlen;
rlen = drm_dp_dpcd_readb(link->aux, DP_PHY_TEST_PATTERN,
&data);
if (rlen < 0) {
DRM_ERROR("failed to read phy link pattern. rlen=%zd\n", rlen);
return rlen;
}
link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data);
switch (data) {
case DP_PHY_TEST_PATTERN_SEL_MASK:
case DP_PHY_TEST_PATTERN_NONE:
case DP_PHY_TEST_PATTERN_D10_2:
case DP_PHY_TEST_PATTERN_ERROR_COUNT:
case DP_PHY_TEST_PATTERN_PRBS7:
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
case DP_PHY_TEST_PATTERN_CP2520:
return 0;
default:
return -EINVAL;
}
}
/**
* dp_link_is_video_audio_test_requested() - checks for audio/video link request
* @link: link requested by the sink
*
* Returns true if the requested link is a permitted audio/video link.
*/
static bool dp_link_is_video_audio_test_requested(u32 link)
{
u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN |
DP_TEST_LINK_AUDIO_PATTERN |
DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
return ((link & video_audio_test) &&
!(link & ~video_audio_test));
}
/**
* dp_link_parse_request() - parses link request parameters from sink
* @link: Display Port Driver data
*
* Parses the DPCD to check if an automated link is requested (Byte 0x201),
* and what type of link automation is being requested (Byte 0x218).
*/
static int dp_link_parse_request(struct dp_link_private *link)
{
int ret = 0;
u8 data;
ssize_t rlen;
/**
* Read the device service IRQ vector (Byte 0x201) to determine
* whether an automated link has been requested by the sink.
*/
rlen = drm_dp_dpcd_readb(link->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR, &data);
if (rlen < 0) {
DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
return rlen;
}
drm_dbg_dp(link->drm_dev, "device service irq vector = 0x%x\n", data);
if (!(data & DP_AUTOMATED_TEST_REQUEST)) {
drm_dbg_dp(link->drm_dev, "no test requested\n");
return 0;
}
/**
* Read the link request byte (Byte 0x218) to determine what type
* of automated link has been requested by the sink.
*/
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_REQUEST, &data);
if (rlen < 0) {
DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
return rlen;
}
if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) {
drm_dbg_dp(link->drm_dev, "link 0x%x not supported\n", data);
goto end;
}
drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data);
link->request.test_requested = data;
if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
ret = dp_link_parse_phy_test_params(link);
if (ret)
goto end;
ret = dp_link_parse_link_training_params(link);
if (ret)
goto end;
}
if (link->request.test_requested == DP_TEST_LINK_TRAINING) {
ret = dp_link_parse_link_training_params(link);
if (ret)
goto end;
}
if (dp_link_is_video_audio_test_requested(
link->request.test_requested)) {
ret = dp_link_parse_video_pattern_params(link);
if (ret)
goto end;
ret = dp_link_parse_audio_pattern_params(link);
}
end:
/*
* Send a DP_TEST_ACK if all link parameters are valid, otherwise send
* a DP_TEST_NAK.
*/
if (ret) {
link->dp_link.test_response = DP_TEST_NAK;
} else {
if (link->request.test_requested != DP_TEST_LINK_EDID_READ)
link->dp_link.test_response = DP_TEST_ACK;
else
link->dp_link.test_response =
DP_TEST_EDID_CHECKSUM_WRITE;
}
return ret;
}
/**
* dp_link_parse_sink_count() - parses the sink count
* @dp_link: pointer to link module data
*
* Parses the DPCD to check if there is an update to the sink count
* (Byte 0x200), and whether all the sink devices connected have Content
* Protection enabled.
*/
static int dp_link_parse_sink_count(struct dp_link *dp_link)
{
ssize_t rlen;
bool cp_ready;
struct dp_link_private *link = container_of(dp_link,
struct dp_link_private, dp_link);
rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT,
&link->dp_link.sink_count);
if (rlen < 0) {
DRM_ERROR("sink count read failed. rlen=%zd\n", rlen);
return rlen;
}
cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY;
link->dp_link.sink_count =
DP_GET_SINK_COUNT(link->dp_link.sink_count);
drm_dbg_dp(link->drm_dev, "sink_count = 0x%x, cp_ready = 0x%x\n",
link->dp_link.sink_count, cp_ready);
return 0;
}
static int dp_link_parse_sink_status_field(struct dp_link_private *link)
{
int len = 0;
link->prev_sink_count = link->dp_link.sink_count;
len = dp_link_parse_sink_count(&link->dp_link);
if (len < 0) {
DRM_ERROR("DP parse sink count failed\n");
return len;
}
len = drm_dp_dpcd_read_link_status(link->aux,
link->link_status);
if (len < DP_LINK_STATUS_SIZE) {
DRM_ERROR("DP link status read failed\n");
return len;
}
return dp_link_parse_request(link);
}
/**
* dp_link_process_link_training_request() - processes new training requests
* @link: Display Port link data
*
* This function will handle new link training requests that are initiated by
* the sink. In particular, it will update the requested lane count and link
* rate, and then trigger the link retraining procedure.
*
* The function will return 0 if a link training request has been processed,
* otherwise it will return -EINVAL.
*/
static int dp_link_process_link_training_request(struct dp_link_private *link)
{
if (link->request.test_requested != DP_TEST_LINK_TRAINING)
return -EINVAL;
drm_dbg_dp(link->drm_dev,
"Test:0x%x link rate = 0x%x, lane count = 0x%x\n",
DP_TEST_LINK_TRAINING,
link->request.test_link_rate,
link->request.test_lane_count);
link->dp_link.link_params.num_lanes = link->request.test_lane_count;
link->dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
return 0;
}
bool dp_link_send_test_response(struct dp_link *dp_link)
{
struct dp_link_private *link = NULL;
int ret = 0;
if (!dp_link) {
DRM_ERROR("invalid input\n");
return false;
}
link = container_of(dp_link, struct dp_link_private, dp_link);
ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE,
dp_link->test_response);
return ret == 1;
}
int dp_link_psm_config(struct dp_link *dp_link,
struct dp_link_info *link_info, bool enable)
{
struct dp_link_private *link = NULL;
int ret = 0;
if (!dp_link) {
DRM_ERROR("invalid params\n");
return -EINVAL;
}
link = container_of(dp_link, struct dp_link_private, dp_link);
mutex_lock(&link->psm_mutex);
if (enable)
ret = dp_aux_link_power_down(link->aux, link_info);
else
ret = dp_aux_link_power_up(link->aux, link_info);
if (ret)
DRM_ERROR("Failed to %s low power mode\n", enable ?
"enter" : "exit");
else
dp_link->psm_enabled = enable;
mutex_unlock(&link->psm_mutex);
return ret;
}
bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
{
struct dp_link_private *link = NULL;
int ret = 0;
if (!dp_link) {
DRM_ERROR("invalid input\n");
return false;
}
link = container_of(dp_link, struct dp_link_private, dp_link);
ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM,
checksum);
return ret == 1;
}
static void dp_link_parse_vx_px(struct dp_link_private *link)
{
drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
drm_dp_get_adjust_request_voltage(link->link_status, 0),
drm_dp_get_adjust_request_voltage(link->link_status, 1),
drm_dp_get_adjust_request_voltage(link->link_status, 2),
drm_dp_get_adjust_request_voltage(link->link_status, 3));
drm_dbg_dp(link->drm_dev, "px: 0=%d, 1=%d, 2=%d, 3=%d\n",
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0),
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1),
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2),
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 3));
/**
* Update the voltage and pre-emphasis levels as per DPCD request
* vector.
*/
drm_dbg_dp(link->drm_dev,
"Current: v_level = 0x%x, p_level = 0x%x\n",
link->dp_link.phy_params.v_level,
link->dp_link.phy_params.p_level);
link->dp_link.phy_params.v_level =
drm_dp_get_adjust_request_voltage(link->link_status, 0);
link->dp_link.phy_params.p_level =
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0);
link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
drm_dbg_dp(link->drm_dev,
"Requested: v_level = 0x%x, p_level = 0x%x\n",
link->dp_link.phy_params.v_level,
link->dp_link.phy_params.p_level);
}
/**
* dp_link_process_phy_test_pattern_request() - process new phy link requests
* @link: Display Port Driver data
*
* This function will handle new phy link pattern requests that are initiated
* by the sink. The function will return 0 if a phy link pattern has been
* processed, otherwise it will return -EINVAL.
*/
static int dp_link_process_phy_test_pattern_request(
struct dp_link_private *link)
{
if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
drm_dbg_dp(link->drm_dev, "no phy test\n");
return -EINVAL;
}
if (!is_link_rate_valid(link->request.test_link_rate) ||
!is_lane_count_valid(link->request.test_lane_count)) {
DRM_ERROR("Invalid: link rate = 0x%x,lane count = 0x%x\n",
link->request.test_link_rate,
link->request.test_lane_count);
return -EINVAL;
}
drm_dbg_dp(link->drm_dev,
"Current: rate = 0x%x, lane count = 0x%x\n",
link->dp_link.link_params.rate,
link->dp_link.link_params.num_lanes);
drm_dbg_dp(link->drm_dev,
"Requested: rate = 0x%x, lane count = 0x%x\n",
link->request.test_link_rate,
link->request.test_lane_count);
link->dp_link.link_params.num_lanes = link->request.test_lane_count;
link->dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
dp_link_parse_vx_px(link);
return 0;
}
static bool dp_link_read_psr_error_status(struct dp_link_private *link)
{
u8 status;
drm_dp_dpcd_read(link->aux, DP_PSR_ERROR_STATUS, &status, 1);
if (status & DP_PSR_LINK_CRC_ERROR)
DRM_ERROR("PSR LINK CRC ERROR\n");
else if (status & DP_PSR_RFB_STORAGE_ERROR)
DRM_ERROR("PSR RFB STORAGE ERROR\n");
else if (status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
DRM_ERROR("PSR VSC SDP UNCORRECTABLE ERROR\n");
else
return false;
return true;
}
static bool dp_link_psr_capability_changed(struct dp_link_private *link)
{
u8 status;
drm_dp_dpcd_read(link->aux, DP_PSR_ESI, &status, 1);
if (status & DP_PSR_CAPS_CHANGE) {
drm_dbg_dp(link->drm_dev, "PSR Capability Change\n");
return true;
}
return false;
}
static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
/**
* dp_link_process_link_status_update() - processes link status updates
* @link: Display Port link module data
*
* This function will check for changes in the link status, e.g. clock
* recovery done on all lanes, and trigger link training if there is a
* failure/error on the link.
*
* The function will return 0 if the a link status update has been processed,
* otherwise it will return -EINVAL.
*/
static int dp_link_process_link_status_update(struct dp_link_private *link)
{
bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status,
link->dp_link.link_params.num_lanes);
bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status,
link->dp_link.link_params.num_lanes);
drm_dbg_dp(link->drm_dev,
"channel_eq_done = %d, clock_recovery_done = %d\n",
channel_eq_done, clock_recovery_done);
if (channel_eq_done && clock_recovery_done)
return -EINVAL;
return 0;
}
/**
* dp_link_process_ds_port_status_change() - process port status changes
* @link: Display Port Driver data
*
* This function will handle downstream port updates that are initiated by
* the sink. If the downstream port status has changed, the EDID is read via
* AUX.
*
* The function will return 0 if a downstream port update has been
* processed, otherwise it will return -EINVAL.
*/
static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
{
if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
DP_DOWNSTREAM_PORT_STATUS_CHANGED)
goto reset;
if (link->prev_sink_count == link->dp_link.sink_count)
return -EINVAL;
reset:
/* reset prev_sink_count */
link->prev_sink_count = link->dp_link.sink_count;
return 0;
}
static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
{
return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN)
&& !(link->request.test_requested &
DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
}
static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
{
return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN);
}
static void dp_link_reset_data(struct dp_link_private *link)
{
link->request = (const struct dp_link_request){ 0 };
link->dp_link.test_video = (const struct dp_link_test_video){ 0 };
link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 };
link->dp_link.phy_params.phy_test_pattern_sel = 0;
link->dp_link.sink_request = 0;
link->dp_link.test_response = 0;
}
/**
* dp_link_process_request() - handle HPD IRQ transition to HIGH
* @dp_link: pointer to link module data
*
* This function will handle the HPD IRQ state transitions from LOW to HIGH
* (including cases when there are back to back HPD IRQ HIGH) indicating
* the start of a new link training request or sink status update.
*/
int dp_link_process_request(struct dp_link *dp_link)
{
int ret = 0;
struct dp_link_private *link;
if (!dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
link = container_of(dp_link, struct dp_link_private, dp_link);
dp_link_reset_data(link);
ret = dp_link_parse_sink_status_field(link);
if (ret)
return ret;
if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
} else if (!dp_link_process_ds_port_status_change(link)) {
dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
} else if (!dp_link_process_link_training_request(link)) {
dp_link->sink_request |= DP_TEST_LINK_TRAINING;
} else if (!dp_link_process_phy_test_pattern_request(link)) {
dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
} else if (dp_link_read_psr_error_status(link)) {
DRM_ERROR("PSR IRQ_HPD received\n");
} else if (dp_link_psr_capability_changed(link)) {
drm_dbg_dp(link->drm_dev, "PSR Capability changed");
} else {
ret = dp_link_process_link_status_update(link);
if (!ret) {
dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
} else {
if (dp_link_is_video_pattern_requested(link)) {
ret = 0;
dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
}
if (dp_link_is_audio_pattern_requested(link)) {
dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
ret = -EINVAL;
}
}
}
drm_dbg_dp(link->drm_dev, "sink request=%#x",
dp_link->sink_request);
return ret;
}
int dp_link_get_colorimetry_config(struct dp_link *dp_link)
{
u32 cc;
struct dp_link_private *link;
if (!dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
link = container_of(dp_link, struct dp_link_private, dp_link);
/*
* Unless a video pattern CTS test is ongoing, use RGB_VESA
* Only RGB_VESA and RGB_CEA supported for now
*/
if (dp_link_is_video_pattern_requested(link))
cc = link->dp_link.test_video.test_dyn_range;
else
cc = DP_TEST_DYNAMIC_RANGE_VESA;
return cc;
}
int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
{
int i;
int v_max = 0, p_max = 0;
struct dp_link_private *link;
if (!dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
link = container_of(dp_link, struct dp_link_private, dp_link);
/* use the max level across lanes */
for (i = 0; i < dp_link->link_params.num_lanes; i++) {
u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i);
u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status,
i);
drm_dbg_dp(link->drm_dev,
"lane=%d req_vol_swing=%d req_pre_emphasis=%d\n",
i, data_v, data_p);
if (v_max < data_v)
v_max = data_v;
if (p_max < data_p)
p_max = data_p;
}
dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
/**
* Adjust the voltage swing and pre-emphasis level combination to within
* the allowable range.
*/
if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested vSwingLevel=%d, change to %d\n",
dp_link->phy_params.v_level,
DP_TRAIN_VOLTAGE_SWING_MAX);
dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
}
if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
dp_link->phy_params.p_level,
DP_TRAIN_PRE_EMPHASIS_MAX);
dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
}
if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
&& (dp_link->phy_params.v_level ==
DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
dp_link->phy_params.p_level,
DP_TRAIN_PRE_EMPHASIS_LVL_1);
dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
}
drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
dp_link->phy_params.v_level, dp_link->phy_params.p_level);
return 0;
}
void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
{
dp_link->phy_params.v_level = 0;
dp_link->phy_params.p_level = 0;
}
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
{
u32 tbd;
/*
* Few simplistic rules and assumptions made here:
* 1. Test bit depth is bit depth per color component
* 2. Assume 3 color components
*/
switch (bpp) {
case 18:
tbd = DP_TEST_BIT_DEPTH_6;
break;
case 24:
tbd = DP_TEST_BIT_DEPTH_8;
break;
case 30:
tbd = DP_TEST_BIT_DEPTH_10;
break;
default:
tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
break;
}
if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
return tbd;
}
struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
{
struct dp_link_private *link;
struct dp_link *dp_link;
if (!dev || !aux) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
if (!link)
return ERR_PTR(-ENOMEM);
link->dev = dev;
link->aux = aux;
mutex_init(&link->psm_mutex);
dp_link = &link->dp_link;
return dp_link;
}
| linux-master | drivers/gpu/drm/msm/dp/dp_link.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "dp_drm.h"
/**
* dp_bridge_detect - callback to determine if connector is connected
* @bridge: Pointer to drm bridge structure
* Returns: Bridge's 'is connected' status
*/
static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
{
struct msm_dp *dp;
dp = to_dp_bridge(bridge)->dp_display;
drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
(dp->is_connected) ? "true" : "false");
return (dp->is_connected) ? connector_status_connected :
connector_status_disconnected;
}
static int dp_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct msm_dp *dp;
dp = to_dp_bridge(bridge)->dp_display;
drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
(dp->is_connected) ? "true" : "false");
/*
* There is no protection in the DRM framework to check if the display
* pipeline has been already disabled before trying to disable it again.
* Hence if the sink is unplugged, the pipeline gets disabled, but the
* crtc->active is still true. Any attempt to set the mode or manually
* disable this encoder will result in the crash.
*
* TODO: add support for telling the DRM subsystem that the pipeline is
* disabled by the hardware and thus all access to it should be forbidden.
* After that this piece of code can be removed.
*/
if (bridge->ops & DRM_BRIDGE_OP_HPD)
return (dp->is_connected) ? 0 : -ENOTCONN;
return 0;
}
/**
* dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
* @bridge: Poiner to drm bridge
* @connector: Pointer to drm connector structure
* Returns: Number of modes added
*/
static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
{
int rc = 0;
struct msm_dp *dp;
if (!connector)
return 0;
dp = to_dp_bridge(bridge)->dp_display;
/* pluggable case assumes EDID is read when HPD */
if (dp->is_connected) {
rc = dp_display_get_modes(dp);
if (rc <= 0) {
DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
return rc;
}
} else {
drm_dbg_dp(connector->dev, "No sink connected\n");
}
return rc;
}
static const struct drm_bridge_funcs dp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = dp_bridge_atomic_enable,
.atomic_disable = dp_bridge_atomic_disable,
.atomic_post_disable = dp_bridge_atomic_post_disable,
.mode_set = dp_bridge_mode_set,
.mode_valid = dp_bridge_mode_valid,
.get_modes = dp_bridge_get_modes,
.detect = dp_bridge_detect,
.atomic_check = dp_bridge_atomic_check,
.hpd_enable = dp_bridge_hpd_enable,
.hpd_disable = dp_bridge_hpd_disable,
.hpd_notify = dp_bridge_hpd_notify,
};
static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct msm_dp *dp = to_dp_bridge(drm_bridge)->dp_display;
if (WARN_ON(!conn_state))
return -ENODEV;
conn_state->self_refresh_aware = dp->psr_supported;
if (!conn_state->crtc || !crtc_state)
return 0;
if (crtc_state->self_refresh_active && !dp->psr_supported)
return -EINVAL;
return 0;
}
static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = dp_bridge->dp_display;
/*
* Check the old state of the crtc to determine if the panel
* was put into psr state previously by the edp_bridge_atomic_disable.
* If the panel is in psr, just exit psr state and skip the full
* bridge enable sequence.
*/
crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state,
drm_bridge->encoder);
if (!crtc)
return;
old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
if (old_crtc_state && old_crtc_state->self_refresh_active) {
dp_display_set_psr(dp, false);
return;
}
dp_bridge_atomic_enable(drm_bridge, old_bridge_state);
}
static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL;
struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = dp_bridge->dp_display;
crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state,
drm_bridge->encoder);
if (!crtc)
goto out;
new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc);
if (!new_crtc_state)
goto out;
old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
if (!old_crtc_state)
goto out;
/*
* Set self refresh mode if current crtc state is active.
*
* If old crtc state is active, then this is a display disable
* call while the sink is in psr state. So, exit psr here.
* The eDP controller will be disabled in the
* edp_bridge_atomic_post_disable function.
*
* We observed sink is stuck in self refresh if psr exit is skipped
* when display disable occurs while the sink is in psr state.
*/
if (new_crtc_state->self_refresh_active) {
dp_display_set_psr(dp, true);
return;
} else if (old_crtc_state->self_refresh_active) {
dp_display_set_psr(dp, false);
return;
}
out:
dp_bridge_atomic_disable(drm_bridge, old_bridge_state);
}
static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state,
drm_bridge->encoder);
if (!crtc)
return;
new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc);
if (!new_crtc_state)
return;
/*
* Self refresh mode is already set in edp_bridge_atomic_disable.
*/
if (new_crtc_state->self_refresh_active)
return;
dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state);
}
/**
* edp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
* @info: display info
* @mode: Pointer to drm mode structure
* Returns: Validity status for specified mode
*/
static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct msm_dp *dp;
int mode_pclk_khz = mode->clock;
dp = to_dp_bridge(bridge)->dp_display;
if (!dp || !mode_pclk_khz || !dp->connector) {
DRM_ERROR("invalid params\n");
return -EINVAL;
}
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
/*
* The eDP controller currently does not have a reliable way of
* enabling panel power to read sink capabilities. So, we rely
* on the panel driver to populate only supported modes for now.
*/
return MODE_OK;
}
static const struct drm_bridge_funcs edp_bridge_ops = {
.atomic_enable = edp_bridge_atomic_enable,
.atomic_disable = edp_bridge_atomic_disable,
.atomic_post_disable = edp_bridge_atomic_post_disable,
.mode_set = dp_bridge_mode_set,
.mode_valid = edp_bridge_mode_valid,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_check = edp_bridge_atomic_check,
};
struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
{
int rc;
struct msm_dp_bridge *dp_bridge;
struct drm_bridge *bridge;
dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL);
if (!dp_bridge)
return ERR_PTR(-ENOMEM);
dp_bridge->dp_display = dp_display;
bridge = &dp_bridge->bridge;
bridge->funcs = dp_display->is_edp ? &edp_bridge_ops : &dp_bridge_ops;
bridge->type = dp_display->connector_type;
/*
* Many ops only make sense for DP. Why?
* - Detect/HPD are used by DRM to know if a display is _physically_
* there, not whether the display is powered on / finished initting.
* On eDP we assume the display is always there because you can't
* know until power is applied. If we don't implement the ops DRM will
* assume our display is always there.
* - Currently eDP mode reading is driven by the panel driver. This
* allows the panel driver to properly power itself on to read the
* modes.
*/
if (!dp_display->is_edp) {
bridge->ops =
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_MODES;
}
drm_bridge_add(bridge);
rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc) {
DRM_ERROR("failed to attach bridge, rc=%d\n", rc);
drm_bridge_remove(bridge);
return ERR_PTR(rc);
}
if (dp_display->next_bridge) {
rc = drm_bridge_attach(encoder,
dp_display->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc < 0) {
DRM_ERROR("failed to attach panel bridge: %d\n", rc);
drm_bridge_remove(bridge);
return ERR_PTR(rc);
}
}
return bridge;
}
/* connector initialization */
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
connector = drm_bridge_connector_init(dp_display->drm_dev, encoder);
if (IS_ERR(connector))
return connector;
drm_connector_attach_encoder(connector, encoder);
return connector;
}
| linux-master | drivers/gpu/drm/msm/dp/dp_drm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/types.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
#include "dp_ctrl.h"
#include "dp_link.h"
#define DP_KHZ_TO_HZ 1000
#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */
#define PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES (300 * HZ / 1000) /* 300 ms */
#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2)
#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0)
#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3)
#define MR_LINK_TRAINING1 0x8
#define MR_LINK_SYMBOL_ERM 0x80
#define MR_LINK_PRBS7 0x100
#define MR_LINK_CUSTOM80 0x200
#define MR_LINK_TRAINING4 0x40
enum {
DP_TRAINING_NONE,
DP_TRAINING_1,
DP_TRAINING_2,
};
struct dp_tu_calc_input {
u64 lclk; /* 162, 270, 540 and 810 */
u64 pclk_khz; /* in KHz */
u64 hactive; /* active h-width */
u64 hporch; /* bp + fp + pulse */
int nlanes; /* no.of.lanes */
int bpp; /* bits */
int pixel_enc; /* 444, 420, 422 */
int dsc_en; /* dsc on/off */
int async_en; /* async mode */
int fec_en; /* fec */
int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */
int num_of_dsc_slices; /* number of slices per line */
};
struct dp_vc_tu_mapping_table {
u32 vic;
u8 lanes;
u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
u8 bpp;
u8 valid_boundary_link;
u16 delay_start_link;
bool boundary_moderation_en;
u8 valid_lower_boundary_link;
u8 upper_boundary_count;
u8 lower_boundary_count;
u8 tu_size_minus1;
};
struct dp_ctrl_private {
struct dp_ctrl dp_ctrl;
struct drm_device *drm_dev;
struct device *dev;
struct drm_dp_aux *aux;
struct dp_panel *panel;
struct dp_link *link;
struct dp_power *power;
struct dp_parser *parser;
struct dp_catalog *catalog;
struct completion idle_comp;
struct completion psr_op_comp;
struct completion video_comp;
};
static int dp_aux_link_configure(struct drm_dp_aux *aux,
struct dp_link_info *link)
{
u8 values[2];
int err;
values[0] = drm_dp_link_rate_to_bw_code(link->rate);
values[1] = link->num_lanes;
if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
if (err < 0)
return err;
return 0;
}
void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
reinit_completion(&ctrl->idle_comp);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
if (!wait_for_completion_timeout(&ctrl->idle_comp,
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
pr_warn("PUSH_IDLE pattern timedout\n");
drm_dbg_dp(ctrl->drm_dev, "mainlink off\n");
}
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
{
u32 config = 0, tbd;
const u8 *dpcd = ctrl->panel->dpcd;
/* Default-> LSCLK DIV: 1/4 LCLK */
config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
/* Scrambler reset enable */
if (drm_dp_alternate_scrambler_reset_cap(dpcd))
config |= DP_CONFIGURATION_CTRL_ASSR;
tbd = dp_link_get_test_bits_depth(ctrl->link,
ctrl->panel->dp_mode.bpp);
if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
pr_debug("BIT_DEPTH not set. Configure default\n");
tbd = DP_TEST_BIT_DEPTH_8;
}
config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
/* Num of Lanes */
config |= ((ctrl->link->link_params.num_lanes - 1)
<< DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT);
if (drm_dp_enhanced_frame_cap(dpcd))
config |= DP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
config |= DP_CONFIGURATION_CTRL_P_INTERLACED; /* progressive video */
/* sync clock & static Mvid */
config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN;
config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK;
if (ctrl->panel->psr_cap.version)
config |= DP_CONFIGURATION_CTRL_SEND_VSC;
dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
}
static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
{
u32 cc, tb;
dp_catalog_ctrl_lane_mapping(ctrl->catalog);
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
dp_ctrl_config_ctrl(ctrl);
tb = dp_link_get_test_bits_depth(ctrl->link,
ctrl->panel->dp_mode.bpp);
cc = dp_link_get_colorimetry_config(ctrl->link);
dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
dp_panel_timing_cfg(ctrl->panel);
}
/*
* The structure and few functions present below are IP/Hardware
* specific implementation. Most of the implementation will not
* have coding comments
*/
struct tu_algo_data {
s64 lclk_fp;
s64 pclk_fp;
s64 lwidth;
s64 lwidth_fp;
s64 hbp_relative_to_pclk;
s64 hbp_relative_to_pclk_fp;
int nlanes;
int bpp;
int pixelEnc;
int dsc_en;
int async_en;
int bpc;
uint delay_start_link_extra_pixclk;
int extra_buffer_margin;
s64 ratio_fp;
s64 original_ratio_fp;
s64 err_fp;
s64 n_err_fp;
s64 n_n_err_fp;
int tu_size;
int tu_size_desired;
int tu_size_minus1;
int valid_boundary_link;
s64 resulting_valid_fp;
s64 total_valid_fp;
s64 effective_valid_fp;
s64 effective_valid_recorded_fp;
int n_tus;
int n_tus_per_lane;
int paired_tus;
int remainder_tus;
int remainder_tus_upper;
int remainder_tus_lower;
int extra_bytes;
int filler_size;
int delay_start_link;
int extra_pclk_cycles;
int extra_pclk_cycles_in_link_clk;
s64 ratio_by_tu_fp;
s64 average_valid2_fp;
int new_valid_boundary_link;
int remainder_symbols_exist;
int n_symbols;
s64 n_remainder_symbols_per_lane_fp;
s64 last_partial_tu_fp;
s64 TU_ratio_err_fp;
int n_tus_incl_last_incomplete_tu;
int extra_pclk_cycles_tmp;
int extra_pclk_cycles_in_link_clk_tmp;
int extra_required_bytes_new_tmp;
int filler_size_tmp;
int lower_filler_size_tmp;
int delay_start_link_tmp;
bool boundary_moderation_en;
int boundary_mod_lower_err;
int upper_boundary_count;
int lower_boundary_count;
int i_upper_boundary_count;
int i_lower_boundary_count;
int valid_lower_boundary_link;
int even_distribution_BF;
int even_distribution_legacy;
int even_distribution;
int min_hblank_violated;
s64 delay_start_time_fp;
s64 hbp_time_fp;
s64 hactive_time_fp;
s64 diff_abs_fp;
s64 ratio;
};
static int _tu_param_compare(s64 a, s64 b)
{
u32 a_sign;
u32 b_sign;
s64 a_temp, b_temp, minus_1;
if (a == b)
return 0;
minus_1 = drm_fixp_from_fraction(-1, 1);
a_sign = (a >> 32) & 0x80000000 ? 1 : 0;
b_sign = (b >> 32) & 0x80000000 ? 1 : 0;
if (a_sign > b_sign)
return 2;
else if (b_sign > a_sign)
return 1;
if (!a_sign && !b_sign) { /* positive */
if (a > b)
return 1;
else
return 2;
} else { /* negative */
a_temp = drm_fixp_mul(a, minus_1);
b_temp = drm_fixp_mul(b, minus_1);
if (a_temp > b_temp)
return 2;
else
return 1;
}
}
static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
struct tu_algo_data *tu)
{
int nlanes = in->nlanes;
int dsc_num_slices = in->num_of_dsc_slices;
int dsc_num_bytes = 0;
int numerator;
s64 pclk_dsc_fp;
s64 dwidth_dsc_fp;
s64 hbp_dsc_fp;
int tot_num_eoc_symbols = 0;
int tot_num_hor_bytes = 0;
int tot_num_dummy_bytes = 0;
int dwidth_dsc_bytes = 0;
int eoc_bytes = 0;
s64 temp1_fp, temp2_fp, temp3_fp;
tu->lclk_fp = drm_fixp_from_fraction(in->lclk, 1);
tu->pclk_fp = drm_fixp_from_fraction(in->pclk_khz, 1000);
tu->lwidth = in->hactive;
tu->hbp_relative_to_pclk = in->hporch;
tu->nlanes = in->nlanes;
tu->bpp = in->bpp;
tu->pixelEnc = in->pixel_enc;
tu->dsc_en = in->dsc_en;
tu->async_en = in->async_en;
tu->lwidth_fp = drm_fixp_from_fraction(in->hactive, 1);
tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1);
if (tu->pixelEnc == 420) {
temp1_fp = drm_fixp_from_fraction(2, 1);
tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp);
tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp);
tu->hbp_relative_to_pclk_fp =
drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2);
}
if (tu->pixelEnc == 422) {
switch (tu->bpp) {
case 24:
tu->bpp = 16;
tu->bpc = 8;
break;
case 30:
tu->bpp = 20;
tu->bpc = 10;
break;
default:
tu->bpp = 16;
tu->bpc = 8;
break;
}
} else {
tu->bpc = tu->bpp/3;
}
if (!in->dsc_en)
goto fec_check;
temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100);
temp2_fp = drm_fixp_from_fraction(in->bpp, 1);
temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp);
temp1_fp = drm_fixp_from_fraction(8, 1);
temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
numerator = drm_fixp2int(temp3_fp);
dsc_num_bytes = numerator / dsc_num_slices;
eoc_bytes = dsc_num_bytes % nlanes;
tot_num_eoc_symbols = nlanes * dsc_num_slices;
tot_num_hor_bytes = dsc_num_bytes * dsc_num_slices;
tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices;
if (dsc_num_bytes == 0)
pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes);
dwidth_dsc_bytes = (tot_num_hor_bytes +
tot_num_eoc_symbols +
(eoc_bytes == 0 ? 0 : tot_num_dummy_bytes));
dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3);
temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp);
temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp);
pclk_dsc_fp = temp1_fp;
temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp);
temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp);
hbp_dsc_fp = temp2_fp;
/* output */
tu->pclk_fp = pclk_dsc_fp;
tu->lwidth_fp = dwidth_dsc_fp;
tu->hbp_relative_to_pclk_fp = hbp_dsc_fp;
fec_check:
if (in->fec_en) {
temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */
tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp);
}
}
static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
{
s64 temp1_fp, temp2_fp, temp, temp1, temp2;
int compare_result_1, compare_result_2, compare_result_3;
temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
temp = (tu->i_upper_boundary_count *
tu->new_valid_boundary_link +
tu->i_lower_boundary_count *
(tu->new_valid_boundary_link-1));
tu->average_valid2_fp = drm_fixp_from_fraction(temp,
(tu->i_upper_boundary_count +
tu->i_lower_boundary_count));
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
temp2_fp = tu->lwidth_fp;
temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
tu->n_tus = drm_fixp2int(temp2_fp);
if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
tu->n_tus += 1;
temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1);
temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp);
temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1);
temp2_fp = temp1_fp - temp2_fp;
temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
tu->n_remainder_symbols_per_lane_fp = temp2_fp;
temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
tu->last_partial_tu_fp =
drm_fixp_div(tu->n_remainder_symbols_per_lane_fp,
temp1_fp);
if (tu->n_remainder_symbols_per_lane_fp != 0)
tu->remainder_symbols_exist = 1;
else
tu->remainder_symbols_exist = 0;
temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes);
tu->n_tus_per_lane = drm_fixp2int(temp1_fp);
tu->paired_tus = (int)((tu->n_tus_per_lane) /
(tu->i_upper_boundary_count +
tu->i_lower_boundary_count));
tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus *
(tu->i_upper_boundary_count +
tu->i_lower_boundary_count);
if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) {
tu->remainder_tus_upper = tu->i_upper_boundary_count;
tu->remainder_tus_lower = tu->remainder_tus -
tu->i_upper_boundary_count;
} else {
tu->remainder_tus_upper = tu->remainder_tus;
tu->remainder_tus_lower = 0;
}
temp = tu->paired_tus * (tu->i_upper_boundary_count *
tu->new_valid_boundary_link +
tu->i_lower_boundary_count *
(tu->new_valid_boundary_link - 1)) +
(tu->remainder_tus_upper *
tu->new_valid_boundary_link) +
(tu->remainder_tus_lower *
(tu->new_valid_boundary_link - 1));
tu->total_valid_fp = drm_fixp_from_fraction(temp, 1);
if (tu->remainder_symbols_exist) {
temp1_fp = tu->total_valid_fp +
tu->n_remainder_symbols_per_lane_fp;
temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
temp2_fp = temp2_fp + tu->last_partial_tu_fp;
temp1_fp = drm_fixp_div(temp1_fp, temp2_fp);
} else {
temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp);
}
tu->effective_valid_fp = temp1_fp;
temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp;
temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
tu->n_err_fp = tu->average_valid2_fp - temp2_fp;
tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
temp2_fp = tu->lwidth_fp;
temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
if (temp2_fp)
tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp);
else
tu->n_tus_incl_last_incomplete_tu = 0;
temp1 = 0;
temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
temp1_fp = tu->average_valid2_fp - temp2_fp;
temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1);
temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
if (temp1_fp)
temp1 = drm_fixp2int_ceil(temp1_fp);
temp = tu->i_upper_boundary_count * tu->nlanes;
temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1);
temp2_fp = temp1_fp - temp2_fp;
temp1_fp = drm_fixp_from_fraction(temp, 1);
temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
if (temp2_fp)
temp2 = drm_fixp2int_ceil(temp2_fp);
else
temp2 = 0;
tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2);
temp1_fp = drm_fixp_from_fraction(8, tu->bpp);
temp2_fp = drm_fixp_from_fraction(
tu->extra_required_bytes_new_tmp, 1);
temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
if (temp1_fp)
tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp);
else
tu->extra_pclk_cycles_tmp = 0;
temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1);
temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
if (temp1_fp)
tu->extra_pclk_cycles_in_link_clk_tmp =
drm_fixp2int_ceil(temp1_fp);
else
tu->extra_pclk_cycles_in_link_clk_tmp = 0;
tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link;
tu->lower_filler_size_tmp = tu->filler_size_tmp + 1;
tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp +
tu->lower_filler_size_tmp +
tu->extra_buffer_margin;
temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1);
tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp);
if (compare_result_1 == 2)
compare_result_1 = 1;
else
compare_result_1 = 0;
compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp);
if (compare_result_2 == 2)
compare_result_2 = 1;
else
compare_result_2 = 0;
compare_result_3 = _tu_param_compare(tu->hbp_time_fp,
tu->delay_start_time_fp);
if (compare_result_3 == 2)
compare_result_3 = 0;
else
compare_result_3 = 1;
if (((tu->even_distribution == 1) ||
((tu->even_distribution_BF == 0) &&
(tu->even_distribution_legacy == 0))) &&
tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 &&
compare_result_2 &&
(compare_result_1 || (tu->min_hblank_violated == 1)) &&
(tu->new_valid_boundary_link - 1) > 0 &&
compare_result_3 &&
(tu->delay_start_link_tmp <= 1023)) {
tu->upper_boundary_count = tu->i_upper_boundary_count;
tu->lower_boundary_count = tu->i_lower_boundary_count;
tu->err_fp = tu->n_n_err_fp;
tu->boundary_moderation_en = true;
tu->tu_size_desired = tu->tu_size;
tu->valid_boundary_link = tu->new_valid_boundary_link;
tu->effective_valid_recorded_fp = tu->effective_valid_fp;
tu->even_distribution_BF = 1;
tu->delay_start_link = tu->delay_start_link_tmp;
} else if (tu->boundary_mod_lower_err == 0) {
compare_result_1 = _tu_param_compare(tu->n_n_err_fp,
tu->diff_abs_fp);
if (compare_result_1 == 2)
tu->boundary_mod_lower_err = 1;
}
}
static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl,
struct dp_tu_calc_input *in,
struct dp_vc_tu_mapping_table *tu_table)
{
struct tu_algo_data *tu;
int compare_result_1, compare_result_2;
u64 temp = 0;
s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0;
s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */
s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */
s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */
s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000);
u8 DP_BRUTE_FORCE = 1;
s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */
uint EXTRA_PIXCLK_CYCLE_DELAY = 4;
uint HBLANK_MARGIN = 4;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (!tu)
return;
dp_panel_update_tu_timings(in, tu);
tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
temp1_fp = drm_fixp_from_fraction(4, 1);
temp2_fp = drm_fixp_mul(temp1_fp, tu->lclk_fp);
temp_fp = drm_fixp_div(temp2_fp, tu->pclk_fp);
tu->extra_buffer_margin = drm_fixp2int_ceil(temp_fp);
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
temp2_fp = drm_fixp_mul(tu->pclk_fp, temp1_fp);
temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
tu->ratio_fp = drm_fixp_div(temp2_fp, tu->lclk_fp);
tu->original_ratio_fp = tu->ratio_fp;
tu->boundary_moderation_en = false;
tu->upper_boundary_count = 0;
tu->lower_boundary_count = 0;
tu->i_upper_boundary_count = 0;
tu->i_lower_boundary_count = 0;
tu->valid_lower_boundary_link = 0;
tu->even_distribution_BF = 0;
tu->even_distribution_legacy = 0;
tu->even_distribution = 0;
tu->delay_start_time_fp = 0;
tu->err_fp = drm_fixp_from_fraction(1000, 1);
tu->n_err_fp = 0;
tu->n_n_err_fp = 0;
tu->ratio = drm_fixp2int(tu->ratio_fp);
temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
div64_u64_rem(tu->lwidth_fp, temp1_fp, &temp2_fp);
if (temp2_fp != 0 &&
!tu->ratio && tu->dsc_en == 0) {
tu->ratio_fp = drm_fixp_mul(tu->ratio_fp, RATIO_SCALE_fp);
tu->ratio = drm_fixp2int(tu->ratio_fp);
if (tu->ratio)
tu->ratio_fp = drm_fixp_from_fraction(1, 1);
}
if (tu->ratio > 1)
tu->ratio = 1;
if (tu->ratio == 1)
goto tu_size_calc;
compare_result_1 = _tu_param_compare(tu->ratio_fp, const_p49_fp);
if (!compare_result_1 || compare_result_1 == 1)
compare_result_1 = 1;
else
compare_result_1 = 0;
compare_result_2 = _tu_param_compare(tu->ratio_fp, const_p56_fp);
if (!compare_result_2 || compare_result_2 == 2)
compare_result_2 = 1;
else
compare_result_2 = 0;
if (tu->dsc_en && compare_result_1 && compare_result_2) {
HBLANK_MARGIN += 4;
drm_dbg_dp(ctrl->drm_dev,
"increase HBLANK_MARGIN to %d\n", HBLANK_MARGIN);
}
tu_size_calc:
for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) {
temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
temp = drm_fixp2int_ceil(temp2_fp);
temp1_fp = drm_fixp_from_fraction(temp, 1);
tu->n_err_fp = temp1_fp - temp2_fp;
if (tu->n_err_fp < tu->err_fp) {
tu->err_fp = tu->n_err_fp;
tu->tu_size_desired = tu->tu_size;
}
}
tu->tu_size_minus1 = tu->tu_size_desired - 1;
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
tu->valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
temp2_fp = tu->lwidth_fp;
temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp);
temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1);
temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
tu->n_tus = drm_fixp2int(temp2_fp);
if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
tu->n_tus += 1;
tu->even_distribution_legacy = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
drm_dbg_dp(ctrl->drm_dev,
"n_sym = %d, num_of_tus = %d\n",
tu->valid_boundary_link, tu->n_tus);
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1);
temp2_fp = temp1_fp - temp2_fp;
temp1_fp = drm_fixp_from_fraction(tu->n_tus + 1, 1);
temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
temp = drm_fixp2int(temp2_fp);
if (temp && temp2_fp)
tu->extra_bytes = drm_fixp2int_ceil(temp2_fp);
else
tu->extra_bytes = 0;
temp1_fp = drm_fixp_from_fraction(tu->extra_bytes, 1);
temp2_fp = drm_fixp_from_fraction(8, tu->bpp);
temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
if (temp && temp1_fp)
tu->extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp);
else
tu->extra_pclk_cycles = drm_fixp2int(temp1_fp);
temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
temp2_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles, 1);
temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
if (temp1_fp)
tu->extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp);
else
tu->extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp);
tu->filler_size = tu->tu_size_desired - tu->valid_boundary_link;
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
tu->ratio_by_tu_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
tu->delay_start_link = tu->extra_pclk_cycles_in_link_clk +
tu->filler_size + tu->extra_buffer_margin;
tu->resulting_valid_fp =
drm_fixp_from_fraction(tu->valid_boundary_link, 1);
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
temp2_fp = drm_fixp_div(tu->resulting_valid_fp, temp1_fp);
tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp;
temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1);
temp1_fp = tu->hbp_relative_to_pclk_fp - temp1_fp;
tu->hbp_time_fp = drm_fixp_div(temp1_fp, tu->pclk_fp);
temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1);
tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
compare_result_1 = _tu_param_compare(tu->hbp_time_fp,
tu->delay_start_time_fp);
if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */
tu->min_hblank_violated = 1;
tu->hactive_time_fp = drm_fixp_div(tu->lwidth_fp, tu->pclk_fp);
compare_result_2 = _tu_param_compare(tu->hactive_time_fp,
tu->delay_start_time_fp);
if (compare_result_2 == 2)
tu->min_hblank_violated = 1;
tu->delay_start_time_fp = 0;
/* brute force */
tu->delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY;
tu->diff_abs_fp = tu->resulting_valid_fp - tu->ratio_by_tu_fp;
temp = drm_fixp2int(tu->diff_abs_fp);
if (!temp && tu->diff_abs_fp <= 0xffff)
tu->diff_abs_fp = 0;
/* if(diff_abs < 0) diff_abs *= -1 */
if (tu->diff_abs_fp < 0)
tu->diff_abs_fp = drm_fixp_mul(tu->diff_abs_fp, -1);
tu->boundary_mod_lower_err = 0;
if ((tu->diff_abs_fp != 0 &&
((tu->diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) ||
(tu->even_distribution_legacy == 0) ||
(DP_BRUTE_FORCE == 1))) ||
(tu->min_hblank_violated == 1)) {
do {
tu->err_fp = drm_fixp_from_fraction(1000, 1);
temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
temp2_fp = drm_fixp_from_fraction(
tu->delay_start_link_extra_pixclk, 1);
temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
if (temp1_fp)
tu->extra_buffer_margin =
drm_fixp2int_ceil(temp1_fp);
else
tu->extra_buffer_margin = 0;
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp);
if (temp1_fp)
tu->n_symbols = drm_fixp2int_ceil(temp1_fp);
else
tu->n_symbols = 0;
for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) {
for (tu->i_upper_boundary_count = 1;
tu->i_upper_boundary_count <= 15;
tu->i_upper_boundary_count++) {
for (tu->i_lower_boundary_count = 1;
tu->i_lower_boundary_count <= 15;
tu->i_lower_boundary_count++) {
_tu_valid_boundary_calc(tu);
}
}
}
tu->delay_start_link_extra_pixclk--;
} while (tu->boundary_moderation_en != true &&
tu->boundary_mod_lower_err == 1 &&
tu->delay_start_link_extra_pixclk != 0);
if (tu->boundary_moderation_en == true) {
temp1_fp = drm_fixp_from_fraction(
(tu->upper_boundary_count *
tu->valid_boundary_link +
tu->lower_boundary_count *
(tu->valid_boundary_link - 1)), 1);
temp2_fp = drm_fixp_from_fraction(
(tu->upper_boundary_count +
tu->lower_boundary_count), 1);
tu->resulting_valid_fp =
drm_fixp_div(temp1_fp, temp2_fp);
temp1_fp = drm_fixp_from_fraction(
tu->tu_size_desired, 1);
tu->ratio_by_tu_fp =
drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
tu->valid_lower_boundary_link =
tu->valid_boundary_link - 1;
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp);
temp2_fp = drm_fixp_div(temp1_fp,
tu->resulting_valid_fp);
tu->n_tus = drm_fixp2int(temp2_fp);
tu->tu_size_minus1 = tu->tu_size_desired - 1;
tu->even_distribution_BF = 1;
temp1_fp =
drm_fixp_from_fraction(tu->tu_size_desired, 1);
temp2_fp =
drm_fixp_div(tu->resulting_valid_fp, temp1_fp);
tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp;
}
}
temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu->lwidth_fp);
if (temp2_fp)
temp = drm_fixp2int_ceil(temp2_fp);
else
temp = 0;
temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
temp2_fp = drm_fixp_div(temp1_fp, temp2_fp);
temp1_fp = drm_fixp_from_fraction(temp, 1);
temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
temp = drm_fixp2int(temp2_fp);
if (tu->async_en)
tu->delay_start_link += (int)temp;
temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1);
tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
/* OUTPUTS */
tu_table->valid_boundary_link = tu->valid_boundary_link;
tu_table->delay_start_link = tu->delay_start_link;
tu_table->boundary_moderation_en = tu->boundary_moderation_en;
tu_table->valid_lower_boundary_link = tu->valid_lower_boundary_link;
tu_table->upper_boundary_count = tu->upper_boundary_count;
tu_table->lower_boundary_count = tu->lower_boundary_count;
tu_table->tu_size_minus1 = tu->tu_size_minus1;
drm_dbg_dp(ctrl->drm_dev, "TU: valid_boundary_link: %d\n",
tu_table->valid_boundary_link);
drm_dbg_dp(ctrl->drm_dev, "TU: delay_start_link: %d\n",
tu_table->delay_start_link);
drm_dbg_dp(ctrl->drm_dev, "TU: boundary_moderation_en: %d\n",
tu_table->boundary_moderation_en);
drm_dbg_dp(ctrl->drm_dev, "TU: valid_lower_boundary_link: %d\n",
tu_table->valid_lower_boundary_link);
drm_dbg_dp(ctrl->drm_dev, "TU: upper_boundary_count: %d\n",
tu_table->upper_boundary_count);
drm_dbg_dp(ctrl->drm_dev, "TU: lower_boundary_count: %d\n",
tu_table->lower_boundary_count);
drm_dbg_dp(ctrl->drm_dev, "TU: tu_size_minus1: %d\n",
tu_table->tu_size_minus1);
kfree(tu);
}
static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
struct dp_vc_tu_mapping_table *tu_table)
{
struct dp_tu_calc_input in;
struct drm_display_mode *drm_mode;
drm_mode = &ctrl->panel->dp_mode.drm_mode;
in.lclk = ctrl->link->link_params.rate / 1000;
in.pclk_khz = drm_mode->clock;
in.hactive = drm_mode->hdisplay;
in.hporch = drm_mode->htotal - drm_mode->hdisplay;
in.nlanes = ctrl->link->link_params.num_lanes;
in.bpp = ctrl->panel->dp_mode.bpp;
in.pixel_enc = 444;
in.dsc_en = 0;
in.async_en = 0;
in.fec_en = 0;
in.num_of_dsc_slices = 0;
in.compress_ratio = 100;
_dp_ctrl_calc_tu(ctrl, &in, tu_table);
}
static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
{
u32 dp_tu = 0x0;
u32 valid_boundary = 0x0;
u32 valid_boundary2 = 0x0;
struct dp_vc_tu_mapping_table tu_calc_table;
dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
dp_tu |= tu_calc_table.tu_size_minus1;
valid_boundary |= tu_calc_table.valid_boundary_link;
valid_boundary |= (tu_calc_table.delay_start_link << 16);
valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
if (tu_calc_table.boundary_moderation_en)
valid_boundary2 |= BIT(0);
pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
dp_tu, valid_boundary, valid_boundary2);
dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
dp_tu, valid_boundary, valid_boundary2);
}
static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
{
int ret = 0;
if (!wait_for_completion_timeout(&ctrl->video_comp,
WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES)) {
DRM_ERROR("wait4video timedout\n");
ret = -ETIMEDOUT;
}
return ret;
}
static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
{
struct dp_link *link = ctrl->link;
int ret = 0, lane, lane_cnt;
u8 buf[4];
u32 max_level_reached = 0;
u32 voltage_swing_level = link->phy_params.v_level;
u32 pre_emphasis_level = link->phy_params.p_level;
drm_dbg_dp(ctrl->drm_dev,
"voltage level: %d emphasis level: %d\n",
voltage_swing_level, pre_emphasis_level);
ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
voltage_swing_level, pre_emphasis_level);
if (ret)
return ret;
if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
drm_dbg_dp(ctrl->drm_dev,
"max. voltage swing level reached %d\n",
voltage_swing_level);
max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
}
if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
drm_dbg_dp(ctrl->drm_dev,
"max. pre-emphasis level reached %d\n",
pre_emphasis_level);
max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
}
pre_emphasis_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT;
lane_cnt = ctrl->link->link_params.num_lanes;
for (lane = 0; lane < lane_cnt; lane++)
buf[lane] = voltage_swing_level | pre_emphasis_level
| max_level_reached;
drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n",
voltage_swing_level | pre_emphasis_level);
ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
buf, lane_cnt);
if (ret == lane_cnt)
ret = 0;
return ret;
}
static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
u8 pattern)
{
u8 buf;
int ret = 0;
drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern);
buf = pattern;
if (pattern && pattern != DP_TRAINING_PATTERN_4)
buf |= DP_LINK_SCRAMBLING_DISABLE;
ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
return ret == 1;
}
static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
u8 *link_status)
{
int ret = 0, len;
len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
if (len != DP_LINK_STATUS_SIZE) {
DRM_ERROR("DP link status read failed, err: %d\n", len);
ret = -EINVAL;
}
return ret;
}
static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
int *training_step)
{
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 4;
dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_1;
ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
if (ret)
return ret;
dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
ret = dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
for (tries = 0; tries < maximum_retries; tries++) {
drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
ret = dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
return ret;
if (drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes)) {
return 0;
}
if (ctrl->link->phy_params.v_level >=
DP_TRAIN_VOLTAGE_SWING_MAX) {
DRM_ERROR_RATELIMITED("max v_level reached\n");
return -EAGAIN;
}
if (old_v_level != ctrl->link->phy_params.v_level) {
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
}
dp_link_adjust_levels(ctrl->link, link_status);
ret = dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
}
DRM_ERROR("max tries reached\n");
return -ETIMEDOUT;
}
static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
{
int ret = 0;
switch (ctrl->link->link_params.rate) {
case 810000:
ctrl->link->link_params.rate = 540000;
break;
case 540000:
ctrl->link->link_params.rate = 270000;
break;
case 270000:
ctrl->link->link_params.rate = 162000;
break;
case 162000:
default:
ret = -EINVAL;
break;
}
if (!ret) {
drm_dbg_dp(ctrl->drm_dev, "new rate=0x%x\n",
ctrl->link->link_params.rate);
}
return ret;
}
static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
{
if (ctrl->link->link_params.num_lanes == 1)
return -1;
ctrl->link->link_params.num_lanes /= 2;
ctrl->link->link_params.rate = ctrl->panel->link_info.rate;
ctrl->link->phy_params.p_level = 0;
ctrl->link->phy_params.v_level = 0;
return 0;
}
static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
{
dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
}
static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
int *training_step)
{
int tries = 0, ret = 0;
u8 pattern;
u32 state_ctrl_bit;
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_2;
if (drm_dp_tps4_supported(ctrl->panel->dpcd)) {
pattern = DP_TRAINING_PATTERN_4;
state_ctrl_bit = 4;
} else if (drm_dp_tps3_supported(ctrl->panel->dpcd)) {
pattern = DP_TRAINING_PATTERN_3;
state_ctrl_bit = 3;
} else {
pattern = DP_TRAINING_PATTERN_2;
state_ctrl_bit = 2;
}
ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
if (ret)
return ret;
dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
ret = dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
return ret;
if (drm_dp_channel_eq_ok(link_status,
ctrl->link->link_params.num_lanes)) {
return 0;
}
dp_link_adjust_levels(ctrl->link, link_status);
ret = dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
}
return -ETIMEDOUT;
}
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
u8 assr;
struct dp_link_info link_info = {0};
dp_ctrl_config_ctrl(ctrl);
link_info.num_lanes = ctrl->link->link_params.num_lanes;
link_info.rate = ctrl->link->link_params.rate;
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd))
encoding[0] |= DP_SPREAD_AMP_0_5;
/* config DOWNSPREAD_CTRL and MAIN_LINK_CHANNEL_CODING_SET */
drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, encoding, 2);
if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
drm_dp_dpcd_write(ctrl->aux, DP_EDP_CONFIGURATION_SET,
&assr, 1);
}
ret = dp_ctrl_link_train_1(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #1 failed. ret=%d\n", ret);
goto end;
}
/* print success info as this is a result of user initiated action */
drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
ret = dp_ctrl_link_train_2(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #2 failed. ret=%d\n", ret);
goto end;
}
/* print success info as this is a result of user initiated action */
drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
end:
dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
return ret;
}
static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return ret;
/*
* As part of previous calls, DP controller state might have
* transitioned to PUSH_IDLE. In order to start transmitting
* a link training pattern, we have to first do soft reset.
*/
ret = dp_ctrl_link_train(ctrl, training_step);
return ret;
}
static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
enum dp_pm_type module, char *name, unsigned long rate)
{
u32 num = ctrl->parser->mp[module].num_clk;
struct clk_bulk_data *cfg = ctrl->parser->mp[module].clocks;
while (num && strcmp(cfg->id, name)) {
num--;
cfg++;
}
drm_dbg_dp(ctrl->drm_dev, "setting rate=%lu on clk=%s\n",
rate, name);
if (num)
clk_set_rate(cfg->clk, rate);
else
DRM_ERROR("%s clock doesn't exit to set rate %lu\n",
name, rate);
}
static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
{
int ret = 0;
struct dp_io *dp_io = &ctrl->parser->io;
struct phy *phy = dp_io->phy;
struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
const u8 *dpcd = ctrl->panel->dpcd;
opts_dp->lanes = ctrl->link->link_params.num_lanes;
opts_dp->link_rate = ctrl->link->link_params.rate / 100;
opts_dp->ssc = drm_dp_max_downspread(dpcd);
phy_configure(phy, &dp_io->phy_opts);
phy_power_on(phy);
dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true);
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
drm_dbg_dp(ctrl->drm_dev, "link rate=%d\n", ctrl->link->link_params.rate);
return ret;
}
void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
{
struct dp_ctrl_private *ctrl;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_catalog_ctrl_reset(ctrl->catalog);
/*
* all dp controller programmable registers will not
* be reset to default value after DP_SW_RESET
* therefore interrupt mask bits have to be updated
* to enable/disable interrupts
*/
dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
}
void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl)
{
u8 cfg;
struct dp_ctrl_private *ctrl = container_of(dp_ctrl,
struct dp_ctrl_private, dp_ctrl);
if (!ctrl->panel->psr_cap.version)
return;
dp_catalog_ctrl_config_psr(ctrl->catalog);
cfg = DP_PSR_ENABLE;
drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1);
}
void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter)
{
struct dp_ctrl_private *ctrl = container_of(dp_ctrl,
struct dp_ctrl_private, dp_ctrl);
if (!ctrl->panel->psr_cap.version)
return;
/*
* When entering PSR,
* 1. Send PSR enter SDP and wait for the PSR_UPDATE_INT
* 2. Turn off video
* 3. Disable the mainlink
*
* When exiting PSR,
* 1. Enable the mainlink
* 2. Send the PSR exit SDP
*/
if (enter) {
reinit_completion(&ctrl->psr_op_comp);
dp_catalog_ctrl_set_psr(ctrl->catalog, true);
if (!wait_for_completion_timeout(&ctrl->psr_op_comp,
PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) {
DRM_ERROR("PSR_ENTRY timedout\n");
dp_catalog_ctrl_set_psr(ctrl->catalog, false);
return;
}
dp_ctrl_push_idle(dp_ctrl);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false);
} else {
dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true);
dp_catalog_ctrl_set_psr(ctrl->catalog, false);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
dp_ctrl_wait4video_ready(ctrl);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
}
}
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
struct phy *phy;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_io = &ctrl->parser->io;
phy = dp_io->phy;
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
struct phy *phy;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_io = &ctrl->parser->io;
phy = dp_io->phy;
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_exit(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
{
const u8 *dpcd = ctrl->panel->dpcd;
/*
* For better interop experience, used a fixed NVID=0x8000
* whenever connected to a VGA dongle downstream.
*/
if (drm_dp_is_branch(dpcd))
return (drm_dp_has_quirk(&ctrl->panel->desc,
DP_DPCD_QUIRK_CONSTANT_N));
return false;
}
static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
{
int ret = 0;
struct dp_io *dp_io = &ctrl->parser->io;
struct phy *phy = dp_io->phy;
struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
opts_dp->lanes = ctrl->link->link_params.num_lanes;
phy_configure(phy, &dp_io->phy_opts);
/*
* Disable and re-enable the mainlink clock since the
* link clock might have been adjusted as part of the
* link maintenance.
*/
dev_pm_opp_set_rate(ctrl->dev, 0);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable clocks. ret=%d\n", ret);
return ret;
}
phy_power_off(phy);
/* hw recommended delay before re-enabling clocks */
msleep(20);
ret = dp_ctrl_enable_mainlink_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret);
return ret;
}
return ret;
}
static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
{
struct dp_io *dp_io;
struct phy *phy;
int ret;
dp_io = &ctrl->parser->io;
phy = dp_io->phy;
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
dp_catalog_ctrl_reset(ctrl->catalog);
dev_pm_opp_set_rate(ctrl->dev, 0);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
}
phy_power_off(phy);
/* aux channel down, reinit phy */
phy_exit(phy);
phy_init(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return 0;
}
static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
{
int ret = 0;
int training_step = DP_TRAINING_NONE;
dp_ctrl_push_idle(&ctrl->dp_ctrl);
ctrl->link->phy_params.p_level = 0;
ctrl->link->phy_params.v_level = 0;
ret = dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
dp_ctrl_clear_training_pattern(ctrl);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
ret = dp_ctrl_wait4video_ready(ctrl);
end:
return ret;
}
static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
{
bool success = false;
u32 pattern_sent = 0x0;
u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested);
if (dp_catalog_ctrl_update_vx_px(ctrl->catalog,
ctrl->link->phy_params.v_level,
ctrl->link->phy_params.p_level)) {
DRM_ERROR("Failed to set v/p levels\n");
return false;
}
dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
dp_ctrl_update_vx_px(ctrl);
dp_link_send_test_response(ctrl->link);
pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
switch (pattern_sent) {
case MR_LINK_TRAINING1:
success = (pattern_requested ==
DP_PHY_TEST_PATTERN_D10_2);
break;
case MR_LINK_SYMBOL_ERM:
success = ((pattern_requested ==
DP_PHY_TEST_PATTERN_ERROR_COUNT) ||
(pattern_requested ==
DP_PHY_TEST_PATTERN_CP2520));
break;
case MR_LINK_PRBS7:
success = (pattern_requested ==
DP_PHY_TEST_PATTERN_PRBS7);
break;
case MR_LINK_CUSTOM80:
success = (pattern_requested ==
DP_PHY_TEST_PATTERN_80BIT_CUSTOM);
break;
case MR_LINK_TRAINING4:
success = (pattern_requested ==
DP_PHY_TEST_PATTERN_SEL_MASK);
break;
default:
success = false;
}
drm_dbg_dp(ctrl->drm_dev, "%s: test->0x%x\n",
success ? "success" : "failed", pattern_requested);
return success;
}
static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
{
int ret;
unsigned long pixel_rate;
if (!ctrl->link->phy_params.phy_test_pattern_sel) {
drm_dbg_dp(ctrl->drm_dev,
"no test pattern selected by sink\n");
return 0;
}
/*
* The global reset will need DP link related clocks to be
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
ret = dp_ctrl_off(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to disable DP controller\n");
return ret;
}
ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to enable DP link controller\n");
return ret;
}
pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
return ret;
}
dp_ctrl_send_phy_test_pattern(ctrl);
return 0;
}
void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
u32 sink_request = 0x0;
if (!dp_ctrl) {
DRM_ERROR("invalid input\n");
return;
}
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
sink_request = ctrl->link->sink_request;
if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n");
if (dp_ctrl_process_phy_test_request(ctrl)) {
DRM_ERROR("process phy_test_req failed\n");
return;
}
}
if (sink_request & DP_LINK_STATUS_UPDATED) {
if (dp_ctrl_link_maintenance(ctrl)) {
DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
return;
}
}
if (sink_request & DP_TEST_LINK_TRAINING) {
dp_link_send_test_response(ctrl->link);
if (dp_ctrl_link_maintenance(ctrl)) {
DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
return;
}
}
}
static bool dp_ctrl_clock_recovery_any_ok(
const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int reduced_cnt;
if (lane_count <= 1)
return false;
/*
* only interested in the lane number after reduced
* lane_count = 4, then only interested in 2 lanes
* lane_count = 2, then only interested in 1 lane
*/
reduced_cnt = lane_count >> 1;
return drm_dp_clock_recovery_ok(link_status, reduced_cnt);
}
static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl)
{
u8 link_status[DP_LINK_STATUS_SIZE];
int num_lanes = ctrl->link->link_params.num_lanes;
dp_ctrl_read_link_status(ctrl, link_status);
return drm_dp_channel_eq_ok(link_status, num_lanes);
}
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
{
int rc = 0;
struct dp_ctrl_private *ctrl;
u32 rate;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
u8 link_status[DP_LINK_STATUS_SIZE];
unsigned int training_step;
unsigned long pixel_rate;
if (!dp_ctrl)
return -EINVAL;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
rate = ctrl->panel->link_info.rate;
pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev,
"using phy test link parameters\n");
if (!pixel_rate)
pixel_rate = phy_cts_pixel_clk_khz;
} else {
ctrl->link->link_params.rate = rate;
ctrl->link->link_params.num_lanes =
ctrl->panel->link_info.num_lanes;
}
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
pixel_rate);
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
return rc;
while (--link_train_max_retries) {
rc = dp_ctrl_reinitialize_mainlink(ctrl);
if (rc) {
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
rc);
break;
}
training_step = DP_TRAINING_NONE;
rc = dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
/* training completed successfully */
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
if (!dp_catalog_link_is_connected(ctrl->catalog))
break;
dp_ctrl_read_link_status(ctrl, link_status);
rc = dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
if (dp_ctrl_clock_recovery_any_ok(link_status,
ctrl->link->link_params.num_lanes)) {
/*
* some lanes are ready,
* reduce lane number
*/
rc = dp_ctrl_link_lane_down_shift(ctrl);
if (rc < 0) { /* lane == 1 already */
/* end with failure */
break;
}
} else {
/* end with failure */
break; /* lane == 1 already */
}
}
} else if (training_step == DP_TRAINING_2) {
/* link train_2 failed */
if (!dp_catalog_link_is_connected(ctrl->catalog))
break;
dp_ctrl_read_link_status(ctrl, link_status);
if (!drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes))
rc = dp_ctrl_link_rate_down_shift(ctrl);
else
rc = dp_ctrl_link_lane_down_shift(ctrl);
if (rc < 0) {
/* end with failure */
break; /* lane == 1 already */
}
/* stop link training before start re training */
dp_ctrl_clear_training_pattern(ctrl);
}
}
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return rc;
if (rc == 0) { /* link train successfully */
/*
* do not stop train pattern here
* stop link training at on_stream
* to pass compliance test
*/
} else {
/*
* link training failed
* end txing train pattern here
*/
dp_ctrl_clear_training_pattern(ctrl);
dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
}
return rc;
}
static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
{
int training_step = DP_TRAINING_NONE;
return dp_ctrl_setup_main_link(ctrl, &training_step);
}
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
struct dp_ctrl_private *ctrl;
unsigned long pixel_rate;
unsigned long pixel_rate_orig;
if (!dp_ctrl)
return -EINVAL;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
if (dp_ctrl->wide_bus_en)
pixel_rate >>= 1;
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate,
ctrl->link->link_params.num_lanes, pixel_rate);
if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
ret = dp_ctrl_enable_mainlink_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start link clocks. ret=%d\n", ret);
goto end;
}
}
dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
if (ret) {
DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret);
goto end;
}
if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
dp_ctrl_clear_training_pattern(ctrl);
/*
* Set up transfer unit values and set controller state to send
* video.
*/
reinit_completion(&ctrl->video_comp);
dp_ctrl_configure_source_params(ctrl);
dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
pixel_rate_orig, dp_ctrl_use_fixed_nvid(ctrl));
dp_ctrl_setup_tr_unit(ctrl);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
ret = dp_ctrl_wait4video_ready(ctrl);
if (ret)
return ret;
mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
drm_dbg_dp(ctrl->drm_dev,
"mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
end:
return ret;
}
int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
struct phy *phy;
int ret;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_io = &ctrl->parser->io;
phy = dp_io->phy;
/* set dongle to D3 (power off) mode */
dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
if (dp_power_clk_status(ctrl->power, DP_STREAM_PM)) {
ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
if (ret) {
DRM_ERROR("Failed to disable pclk. ret=%d\n", ret);
return ret;
}
}
dev_pm_opp_set_rate(ctrl->dev, 0);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
return ret;
}
phy_power_off(phy);
/* aux channel down, reinit phy */
phy_exit(phy);
phy_init(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return ret;
}
int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
struct phy *phy;
int ret;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_io = &ctrl->parser->io;
phy = dp_io->phy;
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
}
DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
phy_power_off(phy);
DRM_DEBUG_DP("After, phy=%p init_count=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return ret;
}
int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
struct phy *phy;
int ret = 0;
if (!dp_ctrl)
return -EINVAL;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_io = &ctrl->parser->io;
phy = dp_io->phy;
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
dp_catalog_ctrl_reset(ctrl->catalog);
ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
if (ret)
DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
dev_pm_opp_set_rate(ctrl->dev, 0);
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
if (ret) {
DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
}
phy_power_off(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return ret;
}
irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
u32 isr;
irqreturn_t ret = IRQ_NONE;
if (!dp_ctrl)
return IRQ_NONE;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
if (ctrl->panel->psr_cap.version) {
isr = dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog);
if (isr)
complete(&ctrl->psr_op_comp);
if (isr & PSR_EXIT_INT)
drm_dbg_dp(ctrl->drm_dev, "PSR exit done\n");
if (isr & PSR_UPDATE_INT)
drm_dbg_dp(ctrl->drm_dev, "PSR frame update done\n");
if (isr & PSR_CAPTURE_INT)
drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n");
}
isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
drm_dbg_dp(ctrl->drm_dev, "dp_video_ready\n");
complete(&ctrl->video_comp);
ret = IRQ_HANDLED;
}
if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) {
drm_dbg_dp(ctrl->drm_dev, "idle_patterns_sent\n");
complete(&ctrl->idle_comp);
ret = IRQ_HANDLED;
}
return ret;
}
struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
struct dp_panel *panel, struct drm_dp_aux *aux,
struct dp_power *power, struct dp_catalog *catalog,
struct dp_parser *parser)
{
struct dp_ctrl_private *ctrl;
int ret;
if (!dev || !panel || !aux ||
!link || !catalog) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) {
DRM_ERROR("Mem allocation failure\n");
return ERR_PTR(-ENOMEM);
}
ret = devm_pm_opp_set_clkname(dev, "ctrl_link");
if (ret) {
dev_err(dev, "invalid DP OPP table in device tree\n");
/* caller do PTR_ERR(opp_table) */
return (struct dp_ctrl *)ERR_PTR(ret);
}
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret)
dev_err(dev, "failed to add DP OPP table\n");
init_completion(&ctrl->idle_comp);
init_completion(&ctrl->psr_op_comp);
init_completion(&ctrl->video_comp);
/* in parameters */
ctrl->parser = parser;
ctrl->panel = panel;
ctrl->power = power;
ctrl->aux = aux;
ctrl->link = link;
ctrl->catalog = catalog;
ctrl->dev = dev;
return &ctrl->dp_ctrl;
}
| linux-master | drivers/gpu/drm/msm/dp/dp_ctrl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_bridge.h>
#include "dp_parser.h"
#include "dp_reg.h"
#define DP_DEFAULT_AHB_OFFSET 0x0000
#define DP_DEFAULT_AHB_SIZE 0x0200
#define DP_DEFAULT_AUX_OFFSET 0x0200
#define DP_DEFAULT_AUX_SIZE 0x0200
#define DP_DEFAULT_LINK_OFFSET 0x0400
#define DP_DEFAULT_LINK_SIZE 0x0C00
#define DP_DEFAULT_P0_OFFSET 0x1000
#define DP_DEFAULT_P0_SIZE 0x0400
static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
{
struct resource *res;
void __iomem *base;
base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
if (!IS_ERR(base))
*len = resource_size(res);
return base;
}
static int dp_parser_ctrl_res(struct dp_parser *parser)
{
struct platform_device *pdev = parser->pdev;
struct dp_io *io = &parser->io;
struct dss_io_data *dss = &io->dp_controller;
dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
if (IS_ERR(dss->ahb.base))
return PTR_ERR(dss->ahb.base);
dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
if (IS_ERR(dss->aux.base)) {
/*
* The initial binding had a single reg, but in order to
* support variation in the sub-region sizes this was split.
* dp_ioremap() will fail with -EINVAL here if only a single
* reg is specified, so fill in the sub-region offsets and
* lengths based on this single region.
*/
if (PTR_ERR(dss->aux.base) == -EINVAL) {
if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
DRM_ERROR("legacy memory region not large enough\n");
return -EINVAL;
}
dss->ahb.len = DP_DEFAULT_AHB_SIZE;
dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
dss->aux.len = DP_DEFAULT_AUX_SIZE;
dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
dss->link.len = DP_DEFAULT_LINK_SIZE;
dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
dss->p0.len = DP_DEFAULT_P0_SIZE;
} else {
DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
return PTR_ERR(dss->aux.base);
}
} else {
dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
if (IS_ERR(dss->link.base)) {
DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
return PTR_ERR(dss->link.base);
}
dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
if (IS_ERR(dss->p0.base)) {
DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
return PTR_ERR(dss->p0.base);
}
}
io->phy = devm_phy_get(&pdev->dev, "dp");
if (IS_ERR(io->phy))
return PTR_ERR(io->phy);
return 0;
}
static u32 dp_parser_link_frequencies(struct device_node *of_node)
{
struct device_node *endpoint;
u64 frequency = 0;
int cnt;
endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */
if (!endpoint)
return 0;
cnt = of_property_count_u64_elems(endpoint, "link-frequencies");
if (cnt > 0)
of_property_read_u64_index(endpoint, "link-frequencies",
cnt - 1, &frequency);
of_node_put(endpoint);
do_div(frequency,
10 * /* from symbol rate to link rate */
1000); /* kbytes */
return frequency;
}
static int dp_parser_misc(struct dp_parser *parser)
{
struct device_node *of_node = parser->pdev->dev.of_node;
int cnt;
/*
* data-lanes is the property of dp_out endpoint
*/
cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
if (cnt < 0) {
/* legacy code, data-lanes is the property of mdss_dp node */
cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
}
if (cnt > 0)
parser->max_dp_lanes = cnt;
else
parser->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
parser->max_dp_link_rate = dp_parser_link_frequencies(of_node);
if (!parser->max_dp_link_rate)
parser->max_dp_link_rate = DP_LINK_RATE_HBR2;
return 0;
}
static inline bool dp_parser_check_prefix(const char *clk_prefix,
const char *clk_name)
{
return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
}
static int dp_parser_init_clk_data(struct dp_parser *parser)
{
int num_clk, i, rc;
int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
const char *clk_name;
struct device *dev = &parser->pdev->dev;
struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
num_clk = of_property_count_strings(dev->of_node, "clock-names");
if (num_clk <= 0) {
DRM_ERROR("no clocks are defined\n");
return -EINVAL;
}
for (i = 0; i < num_clk; i++) {
rc = of_property_read_string_index(dev->of_node,
"clock-names", i, &clk_name);
if (rc < 0)
return rc;
if (dp_parser_check_prefix("core", clk_name))
core_clk_count++;
if (dp_parser_check_prefix("ctrl", clk_name))
ctrl_clk_count++;
if (dp_parser_check_prefix("stream", clk_name))
stream_clk_count++;
}
/* Initialize the CORE power module */
if (core_clk_count == 0) {
DRM_ERROR("no core clocks are defined\n");
return -EINVAL;
}
core_power->num_clk = core_clk_count;
core_power->clocks = devm_kcalloc(dev,
core_power->num_clk, sizeof(struct clk_bulk_data),
GFP_KERNEL);
if (!core_power->clocks)
return -ENOMEM;
/* Initialize the CTRL power module */
if (ctrl_clk_count == 0) {
DRM_ERROR("no ctrl clocks are defined\n");
return -EINVAL;
}
ctrl_power->num_clk = ctrl_clk_count;
ctrl_power->clocks = devm_kcalloc(dev,
ctrl_power->num_clk, sizeof(struct clk_bulk_data),
GFP_KERNEL);
if (!ctrl_power->clocks) {
ctrl_power->num_clk = 0;
return -ENOMEM;
}
/* Initialize the STREAM power module */
if (stream_clk_count == 0) {
DRM_ERROR("no stream (pixel) clocks are defined\n");
return -EINVAL;
}
stream_power->num_clk = stream_clk_count;
stream_power->clocks = devm_kcalloc(dev,
stream_power->num_clk, sizeof(struct clk_bulk_data),
GFP_KERNEL);
if (!stream_power->clocks) {
stream_power->num_clk = 0;
return -ENOMEM;
}
return 0;
}
static int dp_parser_clock(struct dp_parser *parser)
{
int rc = 0, i = 0;
int num_clk = 0;
int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
const char *clk_name;
struct device *dev = &parser->pdev->dev;
struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
rc = dp_parser_init_clk_data(parser);
if (rc) {
DRM_ERROR("failed to initialize power data %d\n", rc);
return -EINVAL;
}
core_clk_count = core_power->num_clk;
ctrl_clk_count = ctrl_power->num_clk;
stream_clk_count = stream_power->num_clk;
num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
for (i = 0; i < num_clk; i++) {
rc = of_property_read_string_index(dev->of_node, "clock-names",
i, &clk_name);
if (rc) {
DRM_ERROR("error reading clock-names %d\n", rc);
return rc;
}
if (dp_parser_check_prefix("core", clk_name) &&
core_clk_index < core_clk_count) {
core_power->clocks[core_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
core_clk_index++;
} else if (dp_parser_check_prefix("stream", clk_name) &&
stream_clk_index < stream_clk_count) {
stream_power->clocks[stream_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
stream_clk_index++;
} else if (dp_parser_check_prefix("ctrl", clk_name) &&
ctrl_clk_index < ctrl_clk_count) {
ctrl_power->clocks[ctrl_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
ctrl_clk_index++;
}
}
return 0;
}
int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser)
{
struct platform_device *pdev = parser->pdev;
struct drm_bridge *bridge;
bridge = devm_drm_of_get_bridge(dev, pdev->dev.of_node, 1, 0);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
parser->next_bridge = bridge;
return 0;
}
static int dp_parser_parse(struct dp_parser *parser)
{
int rc = 0;
if (!parser) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
rc = dp_parser_ctrl_res(parser);
if (rc)
return rc;
rc = dp_parser_misc(parser);
if (rc)
return rc;
rc = dp_parser_clock(parser);
if (rc)
return rc;
return 0;
}
struct dp_parser *dp_parser_get(struct platform_device *pdev)
{
struct dp_parser *parser;
parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
if (!parser)
return ERR_PTR(-ENOMEM);
parser->parse = dp_parser_parse;
parser->pdev = pdev;
return parser;
}
| linux-master | drivers/gpu/drm/msm/dp/dp_parser.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/component.h>
#include <linux/of_irq.h>
#include <linux/delay.h>
#include <drm/display/drm_dp_aux_bus.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "dp_parser.h"
#include "dp_power.h"
#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_reg.h"
#include "dp_link.h"
#include "dp_panel.h"
#include "dp_ctrl.h"
#include "dp_display.h"
#include "dp_drm.h"
#include "dp_audio.h"
#include "dp_debug.h"
static bool psr_enabled = false;
module_param(psr_enabled, bool, 0);
MODULE_PARM_DESC(psr_enabled, "enable PSR for eDP and DP displays");
#define HPD_STRING_SIZE 30
enum {
ISR_DISCONNECTED,
ISR_CONNECT_PENDING,
ISR_CONNECTED,
ISR_HPD_REPLUG_COUNT,
ISR_IRQ_HPD_PULSE_COUNT,
ISR_HPD_LO_GLITH_COUNT,
};
/* event thread connection state */
enum {
ST_DISCONNECTED,
ST_MAINLINK_READY,
ST_CONNECTED,
ST_DISCONNECT_PENDING,
ST_DISPLAY_OFF,
ST_SUSPENDED,
};
enum {
EV_NO_EVENT,
/* hpd events */
EV_HPD_INIT_SETUP,
EV_HPD_PLUG_INT,
EV_IRQ_HPD_INT,
EV_HPD_UNPLUG_INT,
EV_USER_NOTIFICATION,
};
#define EVENT_TIMEOUT (HZ/10) /* 100ms */
#define DP_EVENT_Q_MAX 8
#define DP_TIMEOUT_NONE 0
#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
struct dp_event {
u32 event_id;
u32 data;
u32 delay;
};
struct dp_display_private {
char *name;
int irq;
unsigned int id;
/* state variables */
bool core_initialized;
bool phy_initialized;
bool hpd_irq_on;
bool audio_supported;
struct drm_device *drm_dev;
struct platform_device *pdev;
struct dentry *root;
struct dp_parser *parser;
struct dp_power *power;
struct dp_catalog *catalog;
struct drm_dp_aux *aux;
struct dp_link *link;
struct dp_panel *panel;
struct dp_ctrl *ctrl;
struct dp_debug *debug;
struct dp_display_mode dp_mode;
struct msm_dp dp_display;
/* wait for audio signaling */
struct completion audio_comp;
/* event related only access by event thread */
struct mutex event_mutex;
wait_queue_head_t event_q;
u32 hpd_state;
u32 event_pndx;
u32 event_gndx;
struct task_struct *ev_tsk;
struct dp_event event_list[DP_EVENT_Q_MAX];
spinlock_t event_lock;
bool wide_bus_en;
struct dp_audio *audio;
};
struct msm_dp_desc {
phys_addr_t io_start;
unsigned int id;
unsigned int connector_type;
bool wide_bus_en;
};
static const struct msm_dp_desc sc7180_dp_descs[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
{}
};
static const struct msm_dp_desc sc7280_dp_descs[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{ .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
{}
};
static const struct msm_dp_desc sc8180x_dp_descs[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP },
{}
};
static const struct msm_dp_desc sc8280xp_dp_descs[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{ .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{ .io_start = 0x22090000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{ .io_start = 0x22098000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{ .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{ .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
{}
};
static const struct msm_dp_desc sc8280xp_edp_descs[] = {
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
{ .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
{ .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
{ .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
{}
};
static const struct msm_dp_desc sm8350_dp_descs[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
{}
};
static const struct of_device_id dp_dt_match[] = {
{ .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs },
{ .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs },
{ .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_descs },
{ .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs },
{ .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs },
{ .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs },
{ .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_edp_descs },
{ .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs },
{ .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_descs },
{}
};
static struct dp_display_private *dev_get_dp_display_private(struct device *dev)
{
struct msm_dp *dp = dev_get_drvdata(dev);
return container_of(dp, struct dp_display_private, dp_display);
}
static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
u32 data, u32 delay)
{
unsigned long flag;
struct dp_event *todo;
int pndx;
spin_lock_irqsave(&dp_priv->event_lock, flag);
pndx = dp_priv->event_pndx + 1;
pndx %= DP_EVENT_Q_MAX;
if (pndx == dp_priv->event_gndx) {
pr_err("event_q is full: pndx=%d gndx=%d\n",
dp_priv->event_pndx, dp_priv->event_gndx);
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
return -EPERM;
}
todo = &dp_priv->event_list[dp_priv->event_pndx++];
dp_priv->event_pndx %= DP_EVENT_Q_MAX;
todo->event_id = event;
todo->data = data;
todo->delay = delay;
wake_up(&dp_priv->event_q);
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
return 0;
}
static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
{
unsigned long flag;
struct dp_event *todo;
u32 gndx;
spin_lock_irqsave(&dp_priv->event_lock, flag);
if (dp_priv->event_pndx == dp_priv->event_gndx) {
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
return -ENOENT;
}
gndx = dp_priv->event_gndx;
while (dp_priv->event_pndx != gndx) {
todo = &dp_priv->event_list[gndx];
if (todo->event_id == event) {
todo->event_id = EV_NO_EVENT; /* deleted */
todo->delay = 0;
}
gndx++;
gndx %= DP_EVENT_Q_MAX;
}
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
return 0;
}
void dp_display_signal_audio_start(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
dp = container_of(dp_display, struct dp_display_private, dp_display);
reinit_completion(&dp->audio_comp);
}
void dp_display_signal_audio_complete(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
dp = container_of(dp_display, struct dp_display_private, dp_display);
complete_all(&dp->audio_comp);
}
static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv);
static int dp_display_bind(struct device *dev, struct device *master,
void *data)
{
int rc = 0;
struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
struct drm_device *drm = priv->dev;
dp->dp_display.drm_dev = drm;
priv->dp[dp->id] = &dp->dp_display;
rc = dp->parser->parse(dp->parser);
if (rc) {
DRM_ERROR("device tree parsing failed\n");
goto end;
}
dp->drm_dev = drm;
dp->aux->drm_dev = drm;
rc = dp_aux_register(dp->aux);
if (rc) {
DRM_ERROR("DRM DP AUX register failed\n");
goto end;
}
rc = dp_power_client_init(dp->power);
if (rc) {
DRM_ERROR("Power client create failed\n");
goto end;
}
rc = dp_register_audio_driver(dev, dp->audio);
if (rc) {
DRM_ERROR("Audio registration Dp failed\n");
goto end;
}
rc = dp_hpd_event_thread_start(dp);
if (rc) {
DRM_ERROR("Event thread create failed\n");
goto end;
}
return 0;
end:
return rc;
}
static void dp_display_unbind(struct device *dev, struct device *master,
void *data)
{
struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
/* disable all HPD interrupts */
if (dp->core_initialized)
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
kthread_stop(dp->ev_tsk);
of_dp_aux_depopulate_bus(dp->aux);
dp_power_client_deinit(dp->power);
dp_unregister_audio_driver(dev, dp->audio);
dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
priv->dp[dp->id] = NULL;
}
static const struct component_ops dp_display_comp_ops = {
.bind = dp_display_bind,
.unbind = dp_display_unbind,
};
static bool dp_display_is_ds_bridge(struct dp_panel *panel)
{
return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT);
}
static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "present=%#x sink_count=%d\n",
dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
dp->link->sink_count);
return dp_display_is_ds_bridge(dp->panel) &&
(dp->link->sink_count == 0);
}
static void dp_display_send_hpd_event(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
struct drm_connector *connector;
dp = container_of(dp_display, struct dp_display_private, dp_display);
connector = dp->dp_display.connector;
drm_helper_hpd_irq_event(connector->dev);
}
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
if ((hpd && dp->dp_display.is_connected) ||
(!hpd && !dp->dp_display.is_connected)) {
drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
(hpd ? "on" : "off"));
return 0;
}
/* reset video pattern flag on disconnect */
if (!hpd)
dp->panel->video_test = false;
dp->dp_display.is_connected = hpd;
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
dp_display_send_hpd_event(&dp->dp_display);
return 0;
}
static int dp_display_process_hpd_high(struct dp_display_private *dp)
{
int rc = 0;
struct edid *edid;
dp->panel->max_dp_lanes = dp->parser->max_dp_lanes;
dp->panel->max_dp_link_rate = dp->parser->max_dp_link_rate;
drm_dbg_dp(dp->drm_dev, "max_lanes=%d max_link_rate=%d\n",
dp->panel->max_dp_lanes, dp->panel->max_dp_link_rate);
rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector);
if (rc)
goto end;
dp_link_process_request(dp->link);
edid = dp->panel->edid;
dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
dp->audio_supported = drm_detect_monitor_audio(edid);
dp_panel_handle_sink_request(dp->panel);
dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
/*
* set sink to normal operation mode -- D0
* before dpcd read
*/
dp_link_psm_config(dp->link, &dp->panel->link_info, false);
dp_link_reset_phy_params_vx_px(dp->link);
rc = dp_ctrl_on_link(dp->ctrl);
if (rc) {
DRM_ERROR("failed to complete DP link training\n");
goto end;
}
dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
end:
return rc;
}
static void dp_display_host_phy_init(struct dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
if (!dp->phy_initialized) {
dp_ctrl_phy_init(dp->ctrl);
dp->phy_initialized = true;
}
}
static void dp_display_host_phy_exit(struct dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
if (dp->phy_initialized) {
dp_ctrl_phy_exit(dp->ctrl);
dp->phy_initialized = false;
}
}
static void dp_display_host_init(struct dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
dp_power_init(dp->power);
dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
dp_aux_init(dp->aux);
dp->core_initialized = true;
}
static void dp_display_host_deinit(struct dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
dp_aux_deinit(dp->aux);
dp_power_deinit(dp->power);
dp->core_initialized = false;
}
static int dp_display_usbpd_configure_cb(struct device *dev)
{
struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_display_host_phy_init(dp);
return dp_display_process_hpd_high(dp);
}
static int dp_display_notify_disconnect(struct device *dev)
{
struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
return 0;
}
static void dp_display_handle_video_request(struct dp_display_private *dp)
{
if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
dp->panel->video_test = true;
dp_link_send_test_response(dp->link);
}
}
static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
{
int rc = 0;
if (dp_display_is_sink_count_zero(dp)) {
drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n");
if (dp->hpd_state != ST_DISCONNECTED) {
dp->hpd_state = ST_DISCONNECT_PENDING;
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
}
} else {
if (dp->hpd_state == ST_DISCONNECTED) {
dp->hpd_state = ST_MAINLINK_READY;
rc = dp_display_process_hpd_high(dp);
if (rc)
dp->hpd_state = ST_DISCONNECTED;
}
}
return rc;
}
static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
{
u32 sink_request = dp->link->sink_request;
drm_dbg_dp(dp->drm_dev, "%d\n", sink_request);
if (dp->hpd_state == ST_DISCONNECTED) {
if (sink_request & DP_LINK_STATUS_UPDATED) {
drm_dbg_dp(dp->drm_dev, "Disconnected sink_request: %d\n",
sink_request);
DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
return -EINVAL;
}
}
dp_ctrl_handle_sink_request(dp->ctrl);
if (sink_request & DP_TEST_LINK_VIDEO_PATTERN)
dp_display_handle_video_request(dp);
return 0;
}
static int dp_display_usbpd_attention_cb(struct device *dev)
{
int rc = 0;
u32 sink_request;
struct dp_display_private *dp = dev_get_dp_display_private(dev);
/* check for any test request issued by sink */
rc = dp_link_process_request(dp->link);
if (!rc) {
sink_request = dp->link->sink_request;
drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
rc = dp_display_handle_port_ststus_changed(dp);
else
rc = dp_display_handle_irq_hpd(dp);
}
return rc;
}
static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
int ret;
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
mutex_unlock(&dp->event_mutex);
return 0;
}
if (state == ST_MAINLINK_READY || state == ST_CONNECTED) {
mutex_unlock(&dp->event_mutex);
return 0;
}
if (state == ST_DISCONNECT_PENDING) {
/* wait until ST_DISCONNECTED */
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
return 0;
}
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
} else {
dp->hpd_state = ST_MAINLINK_READY;
}
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
/* uevent will complete connection part */
return 0;
};
static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
bool plugged)
{
struct dp_display_private *dp;
dp = container_of(dp_display,
struct dp_display_private, dp_display);
/* notify audio subsystem only if sink supports audio */
if (dp_display->plugged_cb && dp_display->codec_dev &&
dp->audio_supported)
dp_display->plugged_cb(dp_display->codec_dev, plugged);
}
static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
/* unplugged, no more irq_hpd handle */
dp_del_event(dp, EV_IRQ_HPD_INT);
if (state == ST_DISCONNECTED) {
/* triggered by irq_hdp with sink_count = 0 */
if (dp->link->sink_count == 0) {
dp_display_host_phy_exit(dp);
}
dp_display_notify_disconnect(&dp->pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_DISCONNECT_PENDING) {
mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_MAINLINK_READY) {
dp_ctrl_off_link(dp->ctrl);
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
dp_display_notify_disconnect(&dp->pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
/*
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
*/
dp_display_notify_disconnect(&dp->pdev->dev);
if (state == ST_DISPLAY_OFF) {
dp->hpd_state = ST_DISCONNECTED;
} else {
dp->hpd_state = ST_DISCONNECT_PENDING;
}
/* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(&dp->dp_display, false);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
/* uevent will complete disconnection part */
mutex_unlock(&dp->event_mutex);
return 0;
}
static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
mutex_lock(&dp->event_mutex);
/* irq_hpd can happen at either connected or disconnected state */
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
mutex_unlock(&dp->event_mutex);
return 0;
}
if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) {
/* wait until ST_CONNECTED */
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
return 0;
}
dp_display_usbpd_attention_cb(&dp->pdev->dev);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
return 0;
}
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
{
dp_debug_put(dp->debug);
dp_audio_put(dp->audio);
dp_panel_put(dp->panel);
dp_aux_put(dp->aux);
}
static int dp_init_sub_modules(struct dp_display_private *dp)
{
int rc = 0;
struct device *dev = &dp->pdev->dev;
struct dp_panel_in panel_in = {
.dev = dev,
};
dp->parser = dp_parser_get(dp->pdev);
if (IS_ERR(dp->parser)) {
rc = PTR_ERR(dp->parser);
DRM_ERROR("failed to initialize parser, rc = %d\n", rc);
dp->parser = NULL;
goto error;
}
dp->catalog = dp_catalog_get(dev, &dp->parser->io);
if (IS_ERR(dp->catalog)) {
rc = PTR_ERR(dp->catalog);
DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
dp->catalog = NULL;
goto error;
}
dp->power = dp_power_get(dev, dp->parser);
if (IS_ERR(dp->power)) {
rc = PTR_ERR(dp->power);
DRM_ERROR("failed to initialize power, rc = %d\n", rc);
dp->power = NULL;
goto error;
}
dp->aux = dp_aux_get(dev, dp->catalog, dp->dp_display.is_edp);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
dp->aux = NULL;
goto error;
}
dp->link = dp_link_get(dev, dp->aux);
if (IS_ERR(dp->link)) {
rc = PTR_ERR(dp->link);
DRM_ERROR("failed to initialize link, rc = %d\n", rc);
dp->link = NULL;
goto error_link;
}
panel_in.aux = dp->aux;
panel_in.catalog = dp->catalog;
panel_in.link = dp->link;
dp->panel = dp_panel_get(&panel_in);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
dp->panel = NULL;
goto error_link;
}
dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
dp->power, dp->catalog, dp->parser);
if (IS_ERR(dp->ctrl)) {
rc = PTR_ERR(dp->ctrl);
DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
dp->ctrl = NULL;
goto error_ctrl;
}
dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
dp->audio = NULL;
goto error_ctrl;
}
/* populate wide_bus_en to differernt layers */
dp->ctrl->wide_bus_en = dp->wide_bus_en;
dp->catalog->wide_bus_en = dp->wide_bus_en;
return rc;
error_ctrl:
dp_panel_put(dp->panel);
error_link:
dp_aux_put(dp->aux);
error:
return rc;
}
static int dp_display_set_mode(struct msm_dp *dp_display,
struct dp_display_mode *mode)
{
struct dp_display_private *dp;
dp = container_of(dp_display, struct dp_display_private, dp_display);
drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode);
dp->panel->dp_mode.bpp = mode->bpp;
dp->panel->dp_mode.capabilities = mode->capabilities;
dp_panel_init_panel_info(dp->panel);
return 0;
}
static int dp_display_enable(struct dp_display_private *dp, bool force_link_train)
{
int rc = 0;
struct msm_dp *dp_display = &dp->dp_display;
drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count);
if (dp_display->power_on) {
drm_dbg_dp(dp->drm_dev, "Link already setup, return\n");
return 0;
}
rc = dp_ctrl_on_stream(dp->ctrl, force_link_train);
if (!rc)
dp_display->power_on = true;
return rc;
}
static int dp_display_post_enable(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
u32 rate;
dp = container_of(dp_display, struct dp_display_private, dp_display);
rate = dp->link->link_params.rate;
if (dp->audio_supported) {
dp->audio->bw_code = drm_dp_link_rate_to_bw_code(rate);
dp->audio->lane_count = dp->link->link_params.num_lanes;
}
/* signal the connect event late to synchronize video and display */
dp_display_handle_plugged_change(dp_display, true);
if (dp_display->psr_supported)
dp_ctrl_config_psr(dp->ctrl);
return 0;
}
static int dp_display_disable(struct dp_display_private *dp)
{
struct msm_dp *dp_display = &dp->dp_display;
if (!dp_display->power_on)
return 0;
/* wait only if audio was enabled */
if (dp_display->audio_enabled) {
/* signal the disconnect event */
dp_display_handle_plugged_change(dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))
DRM_ERROR("audio comp timeout\n");
}
dp_display->audio_enabled = false;
if (dp->link->sink_count == 0) {
/*
* irq_hpd with sink_count = 0
* hdmi unplugged out of dongle
*/
dp_ctrl_off_link_stream(dp->ctrl);
} else {
/*
* unplugged interrupt
* dongle unplugged out of DUT
*/
dp_ctrl_off(dp->ctrl);
dp_display_host_phy_exit(dp);
}
dp_display->power_on = false;
drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count);
return 0;
}
int dp_display_set_plugged_cb(struct msm_dp *dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev)
{
bool plugged;
dp_display->plugged_cb = fn;
dp_display->codec_dev = codec_dev;
plugged = dp_display->is_connected;
dp_display_handle_plugged_change(dp_display, plugged);
return 0;
}
/**
* dp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
* @info: display info
* @mode: Pointer to drm mode structure
* Returns: Validity status for specified mode
*/
enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
const u32 num_components = 3, default_bpp = 24;
struct dp_display_private *dp_display;
struct dp_link_info *link_info;
u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
struct msm_dp *dp;
int mode_pclk_khz = mode->clock;
dp = to_dp_bridge(bridge)->dp_display;
if (!dp || !mode_pclk_khz || !dp->connector) {
DRM_ERROR("invalid params\n");
return -EINVAL;
}
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
dp_display = container_of(dp, struct dp_display_private, dp_display);
link_info = &dp_display->panel->link_info;
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;
mode_bpp = dp_panel_get_mode_bpp(dp_display->panel,
mode_bpp, mode_pclk_khz);
mode_rate_khz = mode_pclk_khz * mode_bpp;
supported_rate_khz = link_info->num_lanes * link_info->rate * 8;
if (mode_rate_khz > supported_rate_khz)
return MODE_BAD;
return MODE_OK;
}
int dp_display_get_modes(struct msm_dp *dp)
{
struct dp_display_private *dp_display;
if (!dp) {
DRM_ERROR("invalid params\n");
return 0;
}
dp_display = container_of(dp, struct dp_display_private, dp_display);
return dp_panel_get_modes(dp_display->panel,
dp->connector);
}
bool dp_display_check_video_test(struct msm_dp *dp)
{
struct dp_display_private *dp_display;
dp_display = container_of(dp, struct dp_display_private, dp_display);
return dp_display->panel->video_test;
}
int dp_display_get_test_bpp(struct msm_dp *dp)
{
struct dp_display_private *dp_display;
if (!dp) {
DRM_ERROR("invalid params\n");
return 0;
}
dp_display = container_of(dp, struct dp_display_private, dp_display);
return dp_link_bit_depth_to_bpp(
dp_display->link->test_video.test_bit_depth);
}
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
{
struct dp_display_private *dp_display;
dp_display = container_of(dp, struct dp_display_private, dp_display);
/*
* if we are reading registers we need the link clocks to be on
* however till DP cable is connected this will not happen as we
* do not know the resolution to power up with. Hence check the
* power_on status before dumping DP registers to avoid crash due
* to unclocked access
*/
mutex_lock(&dp_display->event_mutex);
if (!dp->power_on) {
mutex_unlock(&dp_display->event_mutex);
return;
}
dp_catalog_snapshot(dp_display->catalog, disp_state);
mutex_unlock(&dp_display->event_mutex);
}
void dp_display_set_psr(struct msm_dp *dp_display, bool enter)
{
struct dp_display_private *dp;
if (!dp_display) {
DRM_ERROR("invalid params\n");
return;
}
dp = container_of(dp_display, struct dp_display_private, dp_display);
dp_ctrl_set_psr(dp->ctrl, enter);
}
static int hpd_event_thread(void *data)
{
struct dp_display_private *dp_priv;
unsigned long flag;
struct dp_event *todo;
int timeout_mode = 0;
dp_priv = (struct dp_display_private *)data;
while (1) {
if (timeout_mode) {
wait_event_timeout(dp_priv->event_q,
(dp_priv->event_pndx == dp_priv->event_gndx) ||
kthread_should_stop(), EVENT_TIMEOUT);
} else {
wait_event_interruptible(dp_priv->event_q,
(dp_priv->event_pndx != dp_priv->event_gndx) ||
kthread_should_stop());
}
if (kthread_should_stop())
break;
spin_lock_irqsave(&dp_priv->event_lock, flag);
todo = &dp_priv->event_list[dp_priv->event_gndx];
if (todo->delay) {
struct dp_event *todo_next;
dp_priv->event_gndx++;
dp_priv->event_gndx %= DP_EVENT_Q_MAX;
/* re enter delay event into q */
todo_next = &dp_priv->event_list[dp_priv->event_pndx++];
dp_priv->event_pndx %= DP_EVENT_Q_MAX;
todo_next->event_id = todo->event_id;
todo_next->data = todo->data;
todo_next->delay = todo->delay - 1;
/* clean up older event */
todo->event_id = EV_NO_EVENT;
todo->delay = 0;
/* switch to timeout mode */
timeout_mode = 1;
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
continue;
}
/* timeout with no events in q */
if (dp_priv->event_pndx == dp_priv->event_gndx) {
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
continue;
}
dp_priv->event_gndx++;
dp_priv->event_gndx %= DP_EVENT_Q_MAX;
timeout_mode = 0;
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
switch (todo->event_id) {
case EV_HPD_INIT_SETUP:
dp_display_host_init(dp_priv);
break;
case EV_HPD_PLUG_INT:
dp_hpd_plug_handle(dp_priv, todo->data);
break;
case EV_HPD_UNPLUG_INT:
dp_hpd_unplug_handle(dp_priv, todo->data);
break;
case EV_IRQ_HPD_INT:
dp_irq_hpd_handle(dp_priv, todo->data);
break;
case EV_USER_NOTIFICATION:
dp_display_send_hpd_notification(dp_priv,
todo->data);
break;
default:
break;
}
}
return 0;
}
static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv)
{
/* set event q to empty */
dp_priv->event_gndx = 0;
dp_priv->event_pndx = 0;
dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
if (IS_ERR(dp_priv->ev_tsk))
return PTR_ERR(dp_priv->ev_tsk);
return 0;
}
static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
{
struct dp_display_private *dp = dev_id;
irqreturn_t ret = IRQ_NONE;
u32 hpd_isr_status;
if (!dp) {
DRM_ERROR("invalid data\n");
return IRQ_NONE;
}
hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
if (hpd_isr_status & 0x0F) {
drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n",
dp->dp_display.connector_type, hpd_isr_status);
/* hpd related interrupts */
if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
}
if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
}
if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
ret = IRQ_HANDLED;
}
/* DP controller isr */
ret |= dp_ctrl_isr(dp->ctrl);
/* DP aux isr */
ret |= dp_aux_isr(dp->aux);
return ret;
}
int dp_display_request_irq(struct msm_dp *dp_display)
{
int rc = 0;
struct dp_display_private *dp;
if (!dp_display) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
dp = container_of(dp_display, struct dp_display_private, dp_display);
dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
if (!dp->irq) {
DRM_ERROR("failed to get irq\n");
return -EINVAL;
}
rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq,
dp_display_irq_handler,
IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
if (rc < 0) {
DRM_ERROR("failed to request IRQ%u: %d\n",
dp->irq, rc);
return rc;
}
return 0;
}
static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev)
{
const struct msm_dp_desc *descs = of_device_get_match_data(&pdev->dev);
struct resource *res;
int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return NULL;
for (i = 0; i < descs[i].io_start; i++) {
if (descs[i].io_start == res->start)
return &descs[i];
}
dev_err(&pdev->dev, "unknown displayport instance\n");
return NULL;
}
static int dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
struct dp_display_private *dp;
const struct msm_dp_desc *desc;
if (!pdev || !pdev->dev.of_node) {
DRM_ERROR("pdev not found\n");
return -ENODEV;
}
dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
desc = dp_display_get_desc(pdev);
if (!desc)
return -EINVAL;
dp->pdev = pdev;
dp->name = "drm_dp";
dp->id = desc->id;
dp->dp_display.connector_type = desc->connector_type;
dp->wide_bus_en = desc->wide_bus_en;
dp->dp_display.is_edp =
(dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
rc = dp_init_sub_modules(dp);
if (rc) {
DRM_ERROR("init sub module failed\n");
return -EPROBE_DEFER;
}
/* setup event q */
mutex_init(&dp->event_mutex);
init_waitqueue_head(&dp->event_q);
spin_lock_init(&dp->event_lock);
/* Store DP audio handle inside DP display */
dp->dp_display.dp_audio = dp->audio;
init_completion(&dp->audio_comp);
platform_set_drvdata(pdev, &dp->dp_display);
rc = component_add(&pdev->dev, &dp_display_comp_ops);
if (rc) {
DRM_ERROR("component add failed, rc=%d\n", rc);
dp_display_deinit_sub_modules(dp);
}
return rc;
}
static int dp_display_remove(struct platform_device *pdev)
{
struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
component_del(&pdev->dev, &dp_display_comp_ops);
dp_display_deinit_sub_modules(dp);
platform_set_drvdata(pdev, NULL);
return 0;
}
static int dp_pm_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_dp *dp_display = platform_get_drvdata(pdev);
struct dp_display_private *dp;
int sink_count = 0;
dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex);
drm_dbg_dp(dp->drm_dev,
"Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized, dp_display->power_on);
/* start from disconnected state */
dp->hpd_state = ST_DISCONNECTED;
/* turn on dp ctrl/phy */
dp_display_host_init(dp);
if (dp_display->is_edp)
dp_catalog_ctrl_hpd_enable(dp->catalog);
if (dp_catalog_link_is_connected(dp->catalog)) {
/*
* set sink to normal operation mode -- D0
* before dpcd read
*/
dp_display_host_phy_init(dp);
dp_link_psm_config(dp->link, &dp->panel->link_info, false);
sink_count = drm_dp_read_sink_count(dp->aux);
if (sink_count < 0)
sink_count = 0;
dp_display_host_phy_exit(dp);
}
dp->link->sink_count = sink_count;
/*
* can not declared display is connected unless
* HDMI cable is plugged in and sink_count of
* dongle become 1
* also only signal audio when disconnected
*/
if (dp->link->sink_count) {
dp->dp_display.is_connected = true;
} else {
dp->dp_display.is_connected = false;
dp_display_handle_plugged_change(dp_display, false);
}
drm_dbg_dp(dp->drm_dev,
"After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n",
dp->dp_display.connector_type, dp->link->sink_count,
dp->dp_display.is_connected, dp->core_initialized,
dp->phy_initialized, dp_display->power_on);
mutex_unlock(&dp->event_mutex);
return 0;
}
static int dp_pm_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_dp *dp_display = platform_get_drvdata(pdev);
struct dp_display_private *dp;
dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex);
drm_dbg_dp(dp->drm_dev,
"Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized, dp_display->power_on);
/* mainlink enabled */
if (dp_power_clk_status(dp->power, DP_CTRL_PM))
dp_ctrl_off_link_stream(dp->ctrl);
dp_display_host_phy_exit(dp);
/* host_init will be called at pm_resume */
dp_display_host_deinit(dp);
dp->hpd_state = ST_SUSPENDED;
drm_dbg_dp(dp->drm_dev,
"After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized, dp_display->power_on);
mutex_unlock(&dp->event_mutex);
return 0;
}
static const struct dev_pm_ops dp_pm_ops = {
.suspend = dp_pm_suspend,
.resume = dp_pm_resume,
};
static struct platform_driver dp_display_driver = {
.probe = dp_display_probe,
.remove = dp_display_remove,
.driver = {
.name = "msm-dp-display",
.of_match_table = dp_dt_match,
.suppress_bind_attrs = true,
.pm = &dp_pm_ops,
},
};
int __init msm_dp_register(void)
{
int ret;
ret = platform_driver_register(&dp_display_driver);
if (ret)
DRM_ERROR("Dp display driver register failed");
return ret;
}
void __exit msm_dp_unregister(void)
{
platform_driver_unregister(&dp_display_driver);
}
void msm_dp_irq_postinstall(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
if (!dp_display)
return;
dp = container_of(dp_display, struct dp_display_private, dp_display);
if (!dp_display->is_edp)
dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 0);
}
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
struct dp_display_private *dp;
dp = container_of(dp_display, struct dp_display_private, dp_display);
return dp->wide_bus_en;
}
void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
{
struct dp_display_private *dp;
struct device *dev;
int rc;
dp = container_of(dp_display, struct dp_display_private, dp_display);
dev = &dp->pdev->dev;
dp->debug = dp_debug_get(dev, dp->panel,
dp->link, dp->dp_display.connector,
minor);
if (IS_ERR(dp->debug)) {
rc = PTR_ERR(dp->debug);
DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
dp->debug = NULL;
}
}
static int dp_display_get_next_bridge(struct msm_dp *dp)
{
int rc;
struct dp_display_private *dp_priv;
struct device_node *aux_bus;
struct device *dev;
dp_priv = container_of(dp, struct dp_display_private, dp_display);
dev = &dp_priv->pdev->dev;
aux_bus = of_get_child_by_name(dev->of_node, "aux-bus");
if (aux_bus && dp->is_edp) {
dp_display_host_init(dp_priv);
dp_catalog_ctrl_hpd_enable(dp_priv->catalog);
dp_display_host_phy_init(dp_priv);
/*
* The code below assumes that the panel will finish probing
* by the time devm_of_dp_aux_populate_ep_devices() returns.
* This isn't a great assumption since it will fail if the
* panel driver is probed asynchronously but is the best we
* can do without a bigger driver reorganization.
*/
rc = of_dp_aux_populate_bus(dp_priv->aux, NULL);
of_node_put(aux_bus);
if (rc)
goto error;
} else if (dp->is_edp) {
DRM_ERROR("eDP aux_bus not found\n");
return -ENODEV;
}
/*
* External bridges are mandatory for eDP interfaces: one has to
* provide at least an eDP panel (which gets wrapped into panel-bridge).
*
* For DisplayPort interfaces external bridges are optional, so
* silently ignore an error if one is not present (-ENODEV).
*/
rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser);
if (!dp->is_edp && rc == -ENODEV)
return 0;
if (!rc) {
dp->next_bridge = dp_priv->parser->next_bridge;
return 0;
}
error:
if (dp->is_edp) {
of_dp_aux_depopulate_bus(dp_priv->aux);
dp_display_host_phy_exit(dp_priv);
dp_display_host_deinit(dp_priv);
}
return rc;
}
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
struct dp_display_private *dp_priv;
int ret;
dp_display->drm_dev = dev;
dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
ret = dp_display_request_irq(dp_display);
if (ret) {
DRM_ERROR("request_irq failed, ret=%d\n", ret);
return ret;
}
ret = dp_display_get_next_bridge(dp_display);
if (ret)
return ret;
dp_display->bridge = dp_bridge_init(dp_display, dev, encoder);
if (IS_ERR(dp_display->bridge)) {
ret = PTR_ERR(dp_display->bridge);
DRM_DEV_ERROR(dev->dev,
"failed to create dp bridge: %d\n", ret);
dp_display->bridge = NULL;
return ret;
}
priv->bridges[priv->num_bridges++] = dp_display->bridge;
dp_display->connector = dp_drm_connector_init(dp_display, encoder);
if (IS_ERR(dp_display->connector)) {
ret = PTR_ERR(dp_display->connector);
DRM_DEV_ERROR(dev->dev,
"failed to create dp connector: %d\n", ret);
dp_display->connector = NULL;
return ret;
}
dp_priv->panel->connector = dp_display->connector;
return 0;
}
void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = dp_bridge->dp_display;
int rc = 0;
struct dp_display_private *dp_display;
u32 state;
bool force_link_train = false;
dp_display = container_of(dp, struct dp_display_private, dp_display);
if (!dp_display->dp_mode.drm_mode.clock) {
DRM_ERROR("invalid params\n");
return;
}
if (dp->is_edp)
dp_hpd_plug_handle(dp_display, 0);
mutex_lock(&dp_display->event_mutex);
state = dp_display->hpd_state;
if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
mutex_unlock(&dp_display->event_mutex);
return;
}
rc = dp_display_set_mode(dp, &dp_display->dp_mode);
if (rc) {
DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
mutex_unlock(&dp_display->event_mutex);
return;
}
state = dp_display->hpd_state;
if (state == ST_DISPLAY_OFF) {
dp_display_host_phy_init(dp_display);
force_link_train = true;
}
dp_display_enable(dp_display, force_link_train);
rc = dp_display_post_enable(dp);
if (rc) {
DRM_ERROR("DP display post enable failed, rc=%d\n", rc);
dp_display_disable(dp_display);
}
/* completed connection */
dp_display->hpd_state = ST_CONNECTED;
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
mutex_unlock(&dp_display->event_mutex);
}
void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = dp_bridge->dp_display;
struct dp_display_private *dp_display;
dp_display = container_of(dp, struct dp_display_private, dp_display);
dp_ctrl_push_idle(dp_display->ctrl);
}
void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = dp_bridge->dp_display;
u32 state;
struct dp_display_private *dp_display;
dp_display = container_of(dp, struct dp_display_private, dp_display);
if (dp->is_edp)
dp_hpd_unplug_handle(dp_display, 0);
mutex_lock(&dp_display->event_mutex);
state = dp_display->hpd_state;
if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) {
mutex_unlock(&dp_display->event_mutex);
return;
}
dp_display_disable(dp_display);
state = dp_display->hpd_state;
if (state == ST_DISCONNECT_PENDING) {
/* completed disconnection */
dp_display->hpd_state = ST_DISCONNECTED;
} else {
dp_display->hpd_state = ST_DISPLAY_OFF;
}
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
mutex_unlock(&dp_display->event_mutex);
}
void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = dp_bridge->dp_display;
struct dp_display_private *dp_display;
dp_display = container_of(dp, struct dp_display_private, dp_display);
memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode));
if (dp_display_check_video_test(dp))
dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp);
else /* Default num_components per pixel = 3 */
dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3;
if (!dp_display->dp_mode.bpp)
dp_display->dp_mode.bpp = 24; /* Default bpp */
drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode);
dp_display->dp_mode.v_active_low =
!!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
dp_display->dp_mode.h_active_low =
!!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
}
void dp_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
struct msm_dp *dp_display = dp_bridge->dp_display;
struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex);
dp_catalog_ctrl_hpd_enable(dp->catalog);
/* enable HDP interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
dp_display->internal_hpd = true;
mutex_unlock(&dp->event_mutex);
}
void dp_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
struct msm_dp *dp_display = dp_bridge->dp_display;
struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex);
/* disable HDP interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
dp_catalog_ctrl_hpd_disable(dp->catalog);
dp_display->internal_hpd = false;
mutex_unlock(&dp->event_mutex);
}
void dp_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
struct msm_dp *dp_display = dp_bridge->dp_display;
struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
/* Without next_bridge interrupts are handled by the DP core directly */
if (dp_display->internal_hpd)
return;
if (!dp->core_initialized) {
drm_dbg_dp(dp->drm_dev, "not initialized\n");
return;
}
if (!dp_display->is_connected && status == connector_status_connected)
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
else if (dp_display->is_connected && status == connector_status_disconnected)
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
}
| linux-master | drivers/gpu/drm/msm/dp/dp_display.c |
/*
* Copyright 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software")
* to deal in the software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* them Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <drm/drm_file.h>
#include "vgem_drv.h"
#define VGEM_FENCE_TIMEOUT (10*HZ)
struct vgem_fence {
struct dma_fence base;
struct spinlock lock;
struct timer_list timer;
};
static const char *vgem_fence_get_driver_name(struct dma_fence *fence)
{
return "vgem";
}
static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
del_timer_sync(&fence->timer);
dma_fence_free(&fence->base);
}
static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%llu", fence->seqno);
}
static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
snprintf(str, size, "%llu",
dma_fence_is_signaled(fence) ? fence->seqno : 0);
}
static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
.timeline_value_str = vgem_fence_timeline_value_str,
};
static void vgem_fence_timeout(struct timer_list *t)
{
struct vgem_fence *fence = from_timer(fence, t, timer);
dma_fence_signal(&fence->base);
}
static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
unsigned int flags)
{
struct vgem_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return NULL;
spin_lock_init(&fence->lock);
dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
dma_fence_context_alloc(1), 1);
timer_setup(&fence->timer, vgem_fence_timeout, 0);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
return &fence->base;
}
/*
* vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
*
* Create and attach a fence to the vGEM handle. This fence is then exposed
* via the dma-buf reservation object and visible to consumers of the exported
* dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
* vGEM buffer is being written to by the client and is exposed as an exclusive
* fence, otherwise the fence indicates the client is current reading from the
* buffer and all future writes should wait for the client to signal its
* completion. Note that if a conflicting fence is already on the dma-buf (i.e.
* an exclusive fence when adding a read, or any fence when adding a write),
* -EBUSY is reported. Serialisation between operations should be handled
* by waiting upon the dma-buf.
*
* This returns the handle for the new fence that must be signaled within 10
* seconds (or otherwise it will automatically expire). See
* vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
*
* If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
*/
int vgem_fence_attach_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
struct drm_vgem_fence_attach *arg = data;
struct vgem_file *vfile = file->driver_priv;
struct dma_resv *resv;
struct drm_gem_object *obj;
enum dma_resv_usage usage;
struct dma_fence *fence;
int ret;
if (arg->flags & ~VGEM_FENCE_WRITE)
return -EINVAL;
if (arg->pad)
return -EINVAL;
obj = drm_gem_object_lookup(file, arg->handle);
if (!obj)
return -ENOENT;
fence = vgem_fence_create(vfile, arg->flags);
if (!fence) {
ret = -ENOMEM;
goto err;
}
/* Check for a conflicting fence */
resv = obj->resv;
usage = dma_resv_usage_rw(arg->flags & VGEM_FENCE_WRITE);
if (!dma_resv_test_signaled(resv, usage)) {
ret = -EBUSY;
goto err_fence;
}
/* Expose the fence via the dma-buf */
dma_resv_lock(resv, NULL);
ret = dma_resv_reserve_fences(resv, 1);
if (!ret)
dma_resv_add_fence(resv, fence, arg->flags & VGEM_FENCE_WRITE ?
DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
dma_resv_unlock(resv);
/* Record the fence in our idr for later signaling */
if (ret == 0) {
mutex_lock(&vfile->fence_mutex);
ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
mutex_unlock(&vfile->fence_mutex);
if (ret > 0) {
arg->out_fence = ret;
ret = 0;
}
}
err_fence:
if (ret) {
dma_fence_signal(fence);
dma_fence_put(fence);
}
err:
drm_gem_object_put(obj);
return ret;
}
/*
* vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
*
* Signal and consume a fence ealier attached to a vGEM handle using
* vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
*
* All fences must be signaled within 10s of attachment or otherwise they
* will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
*
* Signaling a fence indicates to all consumers of the dma-buf that the
* client has completed the operation associated with the fence, and that the
* buffer is then ready for consumption.
*
* If the fence does not exist (or has already been signaled by the client),
* vgem_fence_signal_ioctl returns -ENOENT.
*/
int vgem_fence_signal_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
struct vgem_file *vfile = file->driver_priv;
struct drm_vgem_fence_signal *arg = data;
struct dma_fence *fence;
int ret = 0;
if (arg->flags)
return -EINVAL;
mutex_lock(&vfile->fence_mutex);
fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
mutex_unlock(&vfile->fence_mutex);
if (!fence)
return -ENOENT;
if (IS_ERR(fence))
return PTR_ERR(fence);
if (dma_fence_is_signaled(fence))
ret = -ETIMEDOUT;
dma_fence_signal(fence);
dma_fence_put(fence);
return ret;
}
int vgem_fence_open(struct vgem_file *vfile)
{
mutex_init(&vfile->fence_mutex);
idr_init_base(&vfile->fence_idr, 1);
return 0;
}
static int __vgem_fence_idr_fini(int id, void *p, void *data)
{
dma_fence_signal(p);
dma_fence_put(p);
return 0;
}
void vgem_fence_close(struct vgem_file *vfile)
{
idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
idr_destroy(&vfile->fence_idr);
mutex_destroy(&vfile->fence_mutex);
}
| linux-master | drivers/gpu/drm/vgem/vgem_fence.c |
/*
* Copyright 2011 Red Hat, Inc.
* Copyright © 2014 The Chromium OS Authors
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software")
* to deal in the software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* them Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Adam Jackson <[email protected]>
* Ben Widawsky <[email protected]>
*/
/*
* This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
* software renderer and the X server for efficient buffer sharing.
*/
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include "vgem_drv.h"
#define DRIVER_NAME "vgem"
#define DRIVER_DESC "Virtual GEM provider"
#define DRIVER_DATE "20120112"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
static struct vgem_device {
struct drm_device drm;
struct platform_device *platform;
} *vgem_device;
static int vgem_open(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile;
int ret;
vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
if (!vfile)
return -ENOMEM;
file->driver_priv = vfile;
ret = vgem_fence_open(vfile);
if (ret) {
kfree(vfile);
return ret;
}
return 0;
}
static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile = file->driver_priv;
vgem_fence_close(vfile);
kfree(vfile);
}
static struct drm_ioctl_desc vgem_ioctls[] = {
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
};
DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, size_t size)
{
struct drm_gem_shmem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
/*
* vgem doesn't have any begin/end cpu access ioctls, therefore must use
* coherent memory or dma-buf sharing just wont work.
*/
obj->map_wc = true;
return &obj->base;
}
static const struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = vgem_open,
.postclose = vgem_postclose,
.ioctls = vgem_ioctls,
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_create_object = vgem_gem_create_object,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
static int __init vgem_init(void)
{
int ret;
struct platform_device *pdev;
pdev = platform_device_register_simple("vgem", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unregister;
}
dma_coerce_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(64));
vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver,
struct vgem_device, drm);
if (IS_ERR(vgem_device)) {
ret = PTR_ERR(vgem_device);
goto out_devres;
}
vgem_device->platform = pdev;
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register(&vgem_device->drm, 0);
if (ret)
goto out_devres;
return 0;
out_devres:
devres_release_group(&pdev->dev, NULL);
out_unregister:
platform_device_unregister(pdev);
return ret;
}
static void __exit vgem_exit(void)
{
struct platform_device *pdev = vgem_device->platform;
drm_dev_unregister(&vgem_device->drm);
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
}
module_init(vgem_init);
module_exit(vgem_exit);
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
| linux-master | drivers/gpu/drm/vgem/vgem_drv.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "drm_random.h"
u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
{
return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
}
EXPORT_SYMBOL(drm_prandom_u32_max_state);
void drm_random_reorder(unsigned int *order, unsigned int count,
struct rnd_state *state)
{
unsigned int i, j;
for (i = 0; i < count; ++i) {
BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32));
j = drm_prandom_u32_max_state(count, state);
swap(order[i], order[j]);
}
}
EXPORT_SYMBOL(drm_random_reorder);
unsigned int *drm_random_order(unsigned int count, struct rnd_state *state)
{
unsigned int *order, i;
order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
if (!order)
return order;
for (i = 0; i < count; i++)
order[i] = i;
drm_random_reorder(order, count, state);
return order;
}
EXPORT_SYMBOL(drm_random_order);
| linux-master | drivers/gpu/drm/lib/drm_random.c |
// SPDX-License-Identifier: MIT
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "ast_drv.h"
static void ast_i2c_setsda(void *i2c_priv, int data)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
if (ujcrb7 == jtemp)
break;
}
}
static void ast_i2c_setscl(void *i2c_priv, int clock)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((clock & 0x01) ? 0 : 1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
if (ujcrb7 == jtemp)
break;
}
}
static int ast_i2c_getsda(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
do {
val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
if (val == val2) {
pass++;
} else {
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
}
} while ((pass < 5) && (count++ < 0x10000));
return val & 1 ? 1 : 0;
}
static int ast_i2c_getscl(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
do {
val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
if (val == val2) {
pass++;
} else {
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
}
} while ((pass < 5) && (count++ < 0x10000));
return val & 1 ? 1 : 0;
}
static void ast_i2c_release(struct drm_device *dev, void *res)
{
struct ast_i2c_chan *i2c = res;
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
{
struct ast_i2c_chan *i2c;
int ret;
i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
if (!i2c)
return NULL;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AST i2c bit bus");
i2c->adapter.algo_data = &i2c->bit;
i2c->bit.udelay = 20;
i2c->bit.timeout = 2;
i2c->bit.data = i2c;
i2c->bit.setsda = ast_i2c_setsda;
i2c->bit.setscl = ast_i2c_setscl;
i2c->bit.getsda = ast_i2c_getsda;
i2c->bit.getscl = ast_i2c_getscl;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
drm_err(dev, "Failed to register bit i2c\n");
goto out_kfree;
}
ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c);
if (ret)
return NULL;
return i2c;
out_kfree:
kfree(i2c);
return NULL;
}
| linux-master | drivers/gpu/drm/ast/ast_i2c.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include "ast_drv.h"
MODULE_FIRMWARE("ast_dp501_fw.bin");
static void ast_release_firmware(void *data)
{
struct ast_device *ast = data;
release_firmware(ast->dp501_fw);
ast->dp501_fw = NULL;
}
static int ast_load_dp501_microcode(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
int ret;
ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
if (ret)
return ret;
return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast);
}
static void send_ack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
sendack |= 0x80;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
static void send_nack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
sendack &= ~0x80;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
static bool wait_ack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
do {
waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitack &= 0x80;
udelay(100);
} while ((!waitack) && (retry++ < 1000));
if (retry < 1000)
return true;
else
return false;
}
static bool wait_nack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
do {
waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitack &= 0x80;
udelay(100);
} while ((waitack) && (retry++ < 1000));
if (retry < 1000)
return true;
else
return false;
}
static void set_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
}
static void clear_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
}
#if 0
static bool wait_fw_ready(struct ast_device *ast)
{
u8 waitready;
u32 retry = 0;
do {
waitready = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitready &= 0x40;
udelay(100);
} while ((!waitready) && (retry++ < 1000));
if (retry < 1000)
return true;
else
return false;
}
#endif
static bool ast_write_cmd(struct drm_device *dev, u8 data)
{
struct ast_device *ast = to_ast_device(dev);
int retry = 0;
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
send_ack(ast);
set_cmd_trigger(ast);
do {
if (wait_ack(ast)) {
clear_cmd_trigger(ast);
send_nack(ast);
return true;
}
} while (retry++ < 100);
}
clear_cmd_trigger(ast);
send_nack(ast);
return false;
}
static bool ast_write_data(struct drm_device *dev, u8 data)
{
struct ast_device *ast = to_ast_device(dev);
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
send_ack(ast);
if (wait_ack(ast)) {
send_nack(ast);
return true;
}
}
send_nack(ast);
return false;
}
#if 0
static bool ast_read_data(struct drm_device *dev, u8 *data)
{
struct ast_device *ast = to_ast_device(dev);
u8 tmp;
*data = 0;
if (wait_ack(ast) == false)
return false;
tmp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd3, 0xff);
*data = tmp;
if (wait_nack(ast) == false) {
send_nack(ast);
return false;
}
send_nack(ast);
return true;
}
static void clear_cmd(struct ast_device *ast)
{
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
}
#endif
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
{
ast_write_cmd(dev, 0x40);
ast_write_data(dev, mode);
msleep(10);
}
static u32 get_fw_base(struct ast_device *ast)
{
return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
}
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
{
struct ast_device *ast = to_ast_device(dev);
u32 i, data;
u32 boot_address;
if (ast->config_mode != ast_use_p2a)
return false;
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (data) {
boot_address = get_fw_base(ast);
for (i = 0; i < size; i += 4)
*(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i);
return true;
}
return false;
}
static bool ast_launch_m68k(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
u32 i, data, len = 0;
u32 boot_address;
u8 *fw_addr = NULL;
u8 jreg;
if (ast->config_mode != ast_use_p2a)
return false;
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (!data) {
if (ast->dp501_fw_addr) {
fw_addr = ast->dp501_fw_addr;
len = 32*1024;
} else {
if (!ast->dp501_fw &&
ast_load_dp501_microcode(dev) < 0)
return false;
fw_addr = (u8 *)ast->dp501_fw->data;
len = ast->dp501_fw->size;
}
/* Get BootAddress */
ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
data = ast_mindwm(ast, 0x1e6e0004);
switch (data & 0x03) {
case 0:
boot_address = 0x44000000;
break;
default:
case 1:
boot_address = 0x48000000;
break;
case 2:
boot_address = 0x50000000;
break;
case 3:
boot_address = 0x60000000;
break;
}
boot_address -= 0x200000; /* -2MB */
/* copy image to buffer */
for (i = 0; i < len; i += 4) {
data = *(u32 *)(fw_addr + i);
ast_moutdwm(ast, boot_address + i, data);
}
/* Init SCU */
ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
/* Launch FW */
ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address);
ast_moutdwm(ast, 0x1e6e2100, 1);
/* Update Scratch */
data = ast_mindwm(ast, 0x1e6e2040) & 0xfffff1ff; /* D[11:9] = 100b: UEFI handling */
data |= 0x800;
ast_moutdwm(ast, 0x1e6e2040, data);
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xfc); /* D[1:0]: Reserved Video Buffer */
jreg |= 0x02;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg);
}
return true;
}
bool ast_dp501_is_connected(struct ast_device *ast)
{
u32 boot_address, offset, data;
if (ast->config_mode == ast_use_p2a) {
boot_address = get_fw_base(ast);
/* validate FW version */
offset = AST_DP501_GBL_VERSION;
data = ast_mindwm(ast, boot_address + offset);
if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
return false;
/* validate PnP Monitor */
offset = AST_DP501_PNPMONITOR;
data = ast_mindwm(ast, boot_address + offset);
if (!(data & AST_DP501_PNP_CONNECTED))
return false;
} else {
if (!ast->dp501_fw_buf)
return false;
/* dummy read */
offset = 0x0000;
data = readl(ast->dp501_fw_buf + offset);
/* validate FW version */
offset = AST_DP501_GBL_VERSION;
data = readl(ast->dp501_fw_buf + offset);
if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
return false;
/* validate PnP Monitor */
offset = AST_DP501_PNPMONITOR;
data = readl(ast->dp501_fw_buf + offset);
if (!(data & AST_DP501_PNP_CONNECTED))
return false;
}
return true;
}
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_device *ast = to_ast_device(dev);
u32 i, boot_address, offset, data;
u32 *pEDIDidx;
if (!ast_dp501_is_connected(ast))
return false;
if (ast->config_mode == ast_use_p2a) {
boot_address = get_fw_base(ast);
/* Read EDID */
offset = AST_DP501_EDID_DATA;
for (i = 0; i < 128; i += 4) {
data = ast_mindwm(ast, boot_address + offset + i);
pEDIDidx = (u32 *)(ediddata + i);
*pEDIDidx = data;
}
} else {
/* Read EDID */
offset = AST_DP501_EDID_DATA;
for (i = 0; i < 128; i += 4) {
data = readl(ast->dp501_fw_buf + offset + i);
pEDIDidx = (u32 *)(ediddata + i);
*pEDIDidx = data;
}
}
return true;
}
static bool ast_init_dvo(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
u8 jreg;
u32 data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x12000, 0x1688a8a8);
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if (!(jreg & 0x80)) {
/* Init SCU DVO Settings */
data = ast_read32(ast, 0x12008);
/* delay phase */
data &= 0xfffff8ff;
data |= 0x00000500;
ast_write32(ast, 0x12008, data);
if (IS_AST_GEN4(ast)) {
data = ast_read32(ast, 0x12084);
/* multi-pins for DVO single-edge */
data |= 0xfffe0000;
ast_write32(ast, 0x12084, data);
data = ast_read32(ast, 0x12088);
/* multi-pins for DVO single-edge */
data |= 0x000fffff;
ast_write32(ast, 0x12088, data);
data = ast_read32(ast, 0x12090);
/* multi-pins for DVO single-edge */
data &= 0xffffffcf;
data |= 0x00000020;
ast_write32(ast, 0x12090, data);
} else { /* AST GEN5+ */
data = ast_read32(ast, 0x12088);
/* multi-pins for DVO single-edge */
data |= 0x30000000;
ast_write32(ast, 0x12088, data);
data = ast_read32(ast, 0x1208c);
/* multi-pins for DVO single-edge */
data |= 0x000000cf;
ast_write32(ast, 0x1208c, data);
data = ast_read32(ast, 0x120a4);
/* multi-pins for DVO single-edge */
data |= 0xffff0000;
ast_write32(ast, 0x120a4, data);
data = ast_read32(ast, 0x120a8);
/* multi-pins for DVO single-edge */
data |= 0x0000000f;
ast_write32(ast, 0x120a8, data);
data = ast_read32(ast, 0x12094);
/* multi-pins for DVO single-edge */
data |= 0x00000002;
ast_write32(ast, 0x12094, data);
}
}
/* Force to DVO */
data = ast_read32(ast, 0x1202c);
data &= 0xfffbffff;
ast_write32(ast, 0x1202c, data);
/* Init VGA DVO Settings */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);
return true;
}
static void ast_init_analog(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
u32 data;
/*
* Set DAC source to VGA mode in SCU2C via the P2A
* bridge. First configure the P2U to target the SCU
* in case it isn't at this stage.
*/
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
/* Then unlock the SCU with the magic password */
ast_write32(ast, 0x12000, 0x1688a8a8);
ast_write32(ast, 0x12000, 0x1688a8a8);
ast_write32(ast, 0x12000, 0x1688a8a8);
/* Finally, clear bits [17:16] of SCU2c */
data = ast_read32(ast, 0x1202c);
data &= 0xfffcffff;
ast_write32(ast, 0, data);
/* Disable DVO */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x00);
}
void ast_init_3rdtx(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
u8 jreg;
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg & 0x0e) {
case 0x04:
ast_init_dvo(dev);
break;
case 0x08:
ast_launch_m68k(dev);
break;
case 0x0c:
ast_init_dvo(dev);
break;
default:
if (ast->tx_chip_types & BIT(AST_TX_SIL164))
ast_init_dvo(dev);
else
ast_init_analog(dev);
}
}
}
| linux-master | drivers/gpu/drm/ast/ast_dp501.c |
/*
* Copyright 2012 Red Hat Inc.
* Parts based on xf86-video-ast
* Copyright (c) 2005 ASPEED Technology Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <[email protected]>
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "ast_drv.h"
#include "ast_tables.h"
#define AST_LUT_SIZE 256
static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
{
ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
ast_io_read8(ast, AST_IO_SEQ_PORT);
ast_io_write8(ast, AST_IO_DAC_DATA, red);
ast_io_read8(ast, AST_IO_SEQ_PORT);
ast_io_write8(ast, AST_IO_DAC_DATA, green);
ast_io_read8(ast, AST_IO_SEQ_PORT);
ast_io_write8(ast, AST_IO_DAC_DATA, blue);
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
static void ast_crtc_set_gamma_linear(struct ast_device *ast,
const struct drm_format_info *format)
{
int i;
switch (format->format) {
case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
for (i = 0; i < AST_LUT_SIZE; i++)
ast_load_palette_index(ast, i, i, i, i);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
&format->format);
break;
}
}
static void ast_crtc_set_gamma(struct ast_device *ast,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
int i;
switch (format->format) {
case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
for (i = 0; i < AST_LUT_SIZE; i++)
ast_load_palette_index(ast, i,
lut[i].red >> 8,
lut[i].green >> 8,
lut[i].blue >> 8);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
&format->format);
break;
}
}
static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
struct ast_vbios_mode_info *vbios_mode)
{
u32 refresh_rate_index = 0, refresh_rate;
const struct ast_vbios_enhtable *best = NULL;
u32 hborder, vborder;
bool check_sync;
switch (format->cpp[0] * 8) {
case 8:
vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
break;
case 16:
vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
break;
case 24:
case 32:
vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
break;
default:
return false;
}
switch (mode->crtc_hdisplay) {
case 640:
vbios_mode->enh_table = &res_640x480[refresh_rate_index];
break;
case 800:
vbios_mode->enh_table = &res_800x600[refresh_rate_index];
break;
case 1024:
vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
break;
case 1152:
vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
break;
case 1280:
if (mode->crtc_vdisplay == 800)
vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
else
vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
break;
case 1360:
vbios_mode->enh_table = &res_1360x768[refresh_rate_index];
break;
case 1440:
vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
break;
case 1600:
if (mode->crtc_vdisplay == 900)
vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
else
vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
break;
case 1680:
vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
break;
case 1920:
if (mode->crtc_vdisplay == 1080)
vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
else
vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
break;
default:
return false;
}
refresh_rate = drm_mode_vrefresh(mode);
check_sync = vbios_mode->enh_table->flags & WideScreenMode;
while (1) {
const struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
while (loop->refresh_rate != 0xff) {
if ((check_sync) &&
(((mode->flags & DRM_MODE_FLAG_NVSYNC) &&
(loop->flags & PVSync)) ||
((mode->flags & DRM_MODE_FLAG_PVSYNC) &&
(loop->flags & NVSync)) ||
((mode->flags & DRM_MODE_FLAG_NHSYNC) &&
(loop->flags & PHSync)) ||
((mode->flags & DRM_MODE_FLAG_PHSYNC) &&
(loop->flags & NHSync)))) {
loop++;
continue;
}
if (loop->refresh_rate <= refresh_rate
&& (!best || loop->refresh_rate > best->refresh_rate))
best = loop;
loop++;
}
if (best || !check_sync)
break;
check_sync = 0;
}
if (best)
vbios_mode->enh_table = best;
hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
vbios_mode->enh_table->hfp;
adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
vbios_mode->enh_table->hfp +
vbios_mode->enh_table->hsync);
adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
vbios_mode->enh_table->vfp;
adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
vbios_mode->enh_table->vfp +
vbios_mode->enh_table->vsync);
return true;
}
static void ast_set_vbios_color_reg(struct ast_device *ast,
const struct drm_format_info *format,
const struct ast_vbios_mode_info *vbios_mode)
{
u32 color_index;
switch (format->cpp[0]) {
case 1:
color_index = VGAModeIndex - 1;
break;
case 2:
color_index = HiCModeIndex;
break;
case 3:
case 4:
color_index = TrueCModeIndex;
break;
default:
return;
}
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0x0f) << 4));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
if (vbios_mode->enh_table->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, format->cpp[0] * 8);
}
}
static void ast_set_vbios_mode_reg(struct ast_device *ast,
const struct drm_display_mode *adjusted_mode,
const struct ast_vbios_mode_info *vbios_mode)
{
u32 refresh_rate_index, mode_id;
refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
mode_id = vbios_mode->enh_table->mode_id;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
if (vbios_mode->enh_table->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
}
}
static void ast_set_std_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
const struct ast_vbios_stdtable *stdtable;
u32 i;
u8 jreg;
stdtable = vbios_mode->std_table;
jreg = stdtable->misc;
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
/* Set SEQ; except Screen Disable field */
ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, stdtable->seq[0]);
for (i = 1; i < 4; i++) {
jreg = stdtable->seq[i];
ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1), jreg);
}
/* Set CRTC; except base address and offset */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
for (i = 0; i < 12; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
for (i = 14; i < 19; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
for (i = 20; i < 25; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
/* set AR */
jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
for (i = 0; i < 20; i++) {
jreg = stdtable->ar[i];
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
}
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
/* Set GR */
for (i = 0; i < 9; i++)
ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
}
static void ast_set_crtc_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
u16 temp, precache = 0;
if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) &&
(vbios_mode->enh_table->flags & AST2500PreCatchCRT))
precache = 40;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
temp = (mode->crtc_htotal >> 3) - 5;
if (temp & 0x100)
jregAC |= 0x01; /* HT D[8] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
temp = (mode->crtc_hdisplay >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x04; /* HDE D[8] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
temp = (mode->crtc_hblank_start >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x10; /* HBS D[8] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
if (temp & 0x20)
jreg05 |= 0x80; /* HBE D[5] */
if (temp & 0x40)
jregAD |= 0x01; /* HBE D[5] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
temp = ((mode->crtc_hsync_start-precache) >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x40; /* HRS D[5] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f;
if (temp & 0x20)
jregAD |= 0x04; /* HRE D[5] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
// Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080))
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02);
else
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00);
/* vert timings */
temp = (mode->crtc_vtotal) - 2;
if (temp & 0x100)
jreg07 |= 0x01;
if (temp & 0x200)
jreg07 |= 0x20;
if (temp & 0x400)
jregAE |= 0x01;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
temp = (mode->crtc_vsync_start) - 1;
if (temp & 0x100)
jreg07 |= 0x04;
if (temp & 0x200)
jreg07 |= 0x80;
if (temp & 0x400)
jregAE |= 0x08;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
temp = (mode->crtc_vsync_end - 1) & 0x3f;
if (temp & 0x10)
jregAE |= 0x20;
if (temp & 0x20)
jregAE |= 0x40;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
temp = mode->crtc_vdisplay - 1;
if (temp & 0x100)
jreg07 |= 0x02;
if (temp & 0x200)
jreg07 |= 0x40;
if (temp & 0x400)
jregAE |= 0x02;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
temp = mode->crtc_vblank_start - 1;
if (temp & 0x100)
jreg07 |= 0x08;
if (temp & 0x200)
jreg09 |= 0x20;
if (temp & 0x400)
jregAE |= 0x04;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
temp = mode->crtc_vblank_end - 1;
if (temp & 0x100)
jregAE |= 0x10;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
if (precache)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x80);
else
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x00);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
}
static void ast_set_offset_reg(struct ast_device *ast,
struct drm_framebuffer *fb)
{
u16 offset;
offset = fb->pitches[0] >> 3;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
const struct ast_vbios_dclk_info *clk_info;
if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index];
else
clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
(clk_info->param3 & 0xc0) |
((clk_info->param3 & 0x3) << 4));
}
static void ast_set_color_reg(struct ast_device *ast,
const struct drm_format_info *format)
{
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
switch (format->cpp[0] * 8) {
case 8:
jregA0 = 0x70;
jregA3 = 0x01;
jregA8 = 0x00;
break;
case 15:
case 16:
jregA0 = 0x70;
jregA3 = 0x04;
jregA8 = 0x02;
break;
case 32:
jregA0 = 0x70;
jregA3 = 0x08;
jregA8 = 0x02;
break;
}
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
}
static void ast_set_crtthd_reg(struct ast_device *ast)
{
/* Set Threshold */
if (IS_AST_GEN7(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0);
} else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
} else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
} else {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
}
}
static void ast_set_sync_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
u8 jreg;
jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
jreg &= ~0xC0;
if (vbios_mode->enh_table->flags & NVSync)
jreg |= 0x80;
if (vbios_mode->enh_table->flags & NHSync)
jreg |= 0x40;
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
static void ast_set_start_address_crt1(struct ast_device *ast,
unsigned int offset)
{
u32 addr;
addr = offset >> 2;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
}
static void ast_wait_for_vretrace(struct ast_device *ast)
{
unsigned long timeout = jiffies + HZ;
u8 vgair1;
do {
vgair1 = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
} while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout));
}
/*
* Planes
*/
static int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
void __iomem *vaddr, u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type)
{
struct drm_plane *plane = &ast_plane->base;
ast_plane->vaddr = vaddr;
ast_plane->offset = offset;
ast_plane->size = size;
return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
formats, format_count, format_modifiers,
type, NULL);
}
/*
* Primary plane
*/
static const uint32_t ast_primary_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_C8,
};
static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc_state *new_crtc_state = NULL;
struct ast_crtc_state *new_ast_crtc_state;
int ret;
if (new_plane_state->crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
if (ret) {
return ret;
} else if (!new_plane_state->visible) {
if (drm_WARN_ON(dev, new_plane_state->crtc)) /* cannot legally happen */
return -EINVAL;
else
return 0;
}
new_ast_crtc_state = to_ast_crtc_state(new_crtc_state);
new_ast_crtc_state->format = new_plane_state->fb->format;
return 0;
}
static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src,
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
}
static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct ast_device *ast = to_ast_device(dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct ast_plane *ast_plane = to_ast_plane(plane);
struct drm_rect damage;
struct drm_atomic_helper_damage_iter iter;
if (!old_fb || (fb->format != old_fb->format)) {
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
ast_set_color_reg(ast, fb->format);
ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info);
}
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage);
}
/*
* Some BMCs stop scanning out the video signal after the driver
* reprogrammed the offset. This stalls display output for several
* seconds and makes the display unusable. Therefore only update
* the offset if it changes.
*/
if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
ast_set_offset_reg(ast, fb);
}
static void ast_primary_plane_helper_atomic_enable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_device *ast = to_ast_device(plane->dev);
struct ast_plane *ast_plane = to_ast_plane(plane);
/*
* Some BMCs stop scanning out the video signal after the driver
* reprogrammed the scanout address. This stalls display
* output for several seconds and makes the display unusable.
* Therefore only reprogram the address after enabling the plane.
*/
ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
}
static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_device *ast = to_ast_device(plane->dev);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
}
static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
.atomic_enable = ast_primary_plane_helper_atomic_enable,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
};
static const struct drm_plane_funcs ast_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_primary_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
void __iomem *vaddr = ast->vram;
u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
unsigned long size = ast->vram_fb_available - cursor_size;
int ret;
ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
0x01, &ast_primary_plane_funcs,
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY);
if (ret) {
drm_err(dev, "ast_plane_init() failed: %d\n", ret);
return ret;
}
drm_plane_helper_add(primary_plane, &ast_primary_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(primary_plane);
return 0;
}
/*
* Cursor plane
*/
static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
{
union {
u32 ul;
u8 b[4];
} srcdata32[2], data32;
union {
u16 us;
u8 b[2];
} data16;
u32 csum = 0;
s32 alpha_dst_delta, last_alpha_dst_delta;
u8 __iomem *dstxor;
const u8 *srcxor;
int i, j;
u32 per_pixel_copy, two_pixel_copy;
alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
last_alpha_dst_delta = alpha_dst_delta - (width << 1);
srcxor = src;
dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
per_pixel_copy = width & 1;
two_pixel_copy = width >> 1;
for (j = 0; j < height; j++) {
for (i = 0; i < two_pixel_copy; i++) {
srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
writel(data32.ul, dstxor);
csum += data32.ul;
dstxor += 4;
srcxor += 8;
}
for (i = 0; i < per_pixel_copy; i++) {
srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
writew(data16.us, dstxor);
csum += (u32)data16.us;
dstxor += 2;
srcxor += 4;
}
dstxor += last_alpha_dst_delta;
}
/* write checksum + signature */
dst += AST_HWC_SIZE;
writel(csum, dst);
writel(width, dst + AST_HWC_SIGNATURE_SizeX);
writel(height, dst + AST_HWC_SIGNATURE_SizeY);
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
}
static void ast_set_cursor_base(struct ast_device *ast, u64 address)
{
u8 addr0 = (address >> 3) & 0xff;
u8 addr1 = (address >> 11) & 0xff;
u8 addr2 = (address >> 19) & 0xff;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
u8 x_offset, u8 y_offset)
{
u8 x0 = (x & 0x00ff);
u8 x1 = (x & 0x0f00) >> 8;
u8 y0 = (y & 0x00ff);
u8 y1 = (y & 0x0700) >> 8;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
}
static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
{
static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
AST_IO_VGACRCB_HWC_ENABLED);
u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
if (enabled)
vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, mask, vgacrcb);
}
static const uint32_t ast_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_framebuffer *new_fb = new_plane_state->fb;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
if (new_plane_state->crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
if (ret || !new_plane_state->visible)
return ret;
if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
return -EINVAL;
return 0;
}
static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_plane *ast_plane = to_ast_plane(plane);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct ast_device *ast = to_ast_device(plane->dev);
struct iosys_map src_map = shadow_plane_state->data[0];
struct drm_rect damage;
const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
u64 dst_off = ast_plane->offset;
u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
unsigned int offset_x, offset_y;
u16 x, y;
u8 x_offset, y_offset;
/*
* Do data transfer to hardware buffer and point the scanout
* engine to the offset.
*/
if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
ast_update_cursor_image(dst, src, fb->width, fb->height);
ast_set_cursor_base(ast, dst_off);
}
/*
* Update location in HWC signature and registers.
*/
writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
offset_x = AST_MAX_HWC_WIDTH - fb->width;
offset_y = AST_MAX_HWC_HEIGHT - fb->height;
if (plane_state->crtc_x < 0) {
x_offset = (-plane_state->crtc_x) + offset_x;
x = 0;
} else {
x_offset = offset_x;
x = plane_state->crtc_x;
}
if (plane_state->crtc_y < 0) {
y_offset = (-plane_state->crtc_y) + offset_y;
y = 0;
} else {
y_offset = offset_y;
y = plane_state->crtc_y;
}
ast_set_cursor_location(ast, x, y, x_offset, y_offset);
/* Dummy write to enable HWC and make the HW pick-up the changes. */
ast_set_cursor_enabled(ast, true);
}
static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_device *ast = to_ast_device(plane->dev);
ast_set_cursor_enabled(ast, false);
}
static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_cursor_plane_helper_atomic_check,
.atomic_update = ast_cursor_plane_helper_atomic_update,
.atomic_disable = ast_cursor_plane_helper_atomic_disable,
};
static const struct drm_plane_funcs ast_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_cursor_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
struct drm_plane *cursor_plane = &ast_cursor_plane->base;
size_t size;
void __iomem *vaddr;
u64 offset;
int ret;
/*
* Allocate backing storage for cursors. The BOs are permanently
* pinned to the top end of the VRAM.
*/
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
if (ast->vram_fb_available < size)
return -ENOMEM;
vaddr = ast->vram + ast->vram_fb_available - size;
offset = ast->vram_fb_available - size;
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
0x01, &ast_cursor_plane_funcs,
ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR);
if (ret) {
drm_err(dev, "ast_plane_init() failed: %d\n", ret);
return ret;
}
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(cursor_plane);
ast->vram_fb_available -= size;
return 0;
}
/*
* CRTC
*/
static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct ast_device *ast = to_ast_device(crtc->dev);
u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
struct ast_vbios_mode_info *vbios_mode_info;
/* TODO: Maybe control display signal generation with
* Sync Enable (bit CR17.7).
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0);
if (ast->tx_chip_types & AST_TX_DP501_BIT)
ast_set_dp501_video_output(crtc->dev, 1);
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON);
ast_wait_for_vretrace(ast);
ast_dp_set_on_off(crtc->dev, 1);
}
ast_state = to_ast_crtc_state(crtc->state);
format = ast_state->format;
if (format) {
vbios_mode_info = &ast_state->vbios_mode_info;
ast_set_color_reg(ast, format);
ast_set_vbios_color_reg(ast, format, vbios_mode_info);
if (crtc->state->gamma_lut)
ast_crtc_set_gamma(ast, format, crtc->state->gamma_lut->data);
else
ast_crtc_set_gamma_linear(ast, format);
}
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
ch = mode;
if (ast->tx_chip_types & AST_TX_DP501_BIT)
ast_set_dp501_video_output(crtc->dev, 0);
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ast_dp_set_on_off(crtc->dev, 0);
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF);
}
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch);
break;
}
}
static enum drm_mode_status
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
struct ast_device *ast = to_ast_device(crtc->dev);
enum drm_mode_status status;
uint32_t jtemp;
if (ast->support_wide_screen) {
if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
return MODE_OK;
if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
return MODE_OK;
if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
return MODE_OK;
if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
return MODE_OK;
if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
return MODE_OK;
if ((mode->hdisplay == 1152) && (mode->vdisplay == 864))
return MODE_OK;
if ((ast->chip == AST2100) || // GEN2, but not AST1100 (?)
(ast->chip == AST2200) || // GEN3, but not AST2150 (?)
IS_AST_GEN4(ast) || IS_AST_GEN5(ast) ||
IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) {
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
return MODE_OK;
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
if (jtemp & 0x01)
return MODE_NOMODE;
else
return MODE_OK;
}
}
}
status = MODE_NOMODE;
switch (mode->hdisplay) {
case 640:
if (mode->vdisplay == 480)
status = MODE_OK;
break;
case 800:
if (mode->vdisplay == 600)
status = MODE_OK;
break;
case 1024:
if (mode->vdisplay == 768)
status = MODE_OK;
break;
case 1152:
if (mode->vdisplay == 864)
status = MODE_OK;
break;
case 1280:
if (mode->vdisplay == 1024)
status = MODE_OK;
break;
case 1600:
if (mode->vdisplay == 1200)
status = MODE_OK;
break;
default:
break;
}
return status;
}
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
struct drm_device *dev = crtc->dev;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
bool succ;
int ret;
if (!crtc_state->enable)
return 0;
ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
if (ret)
return ret;
ast_state = to_ast_crtc_state(crtc_state);
format = ast_state->format;
if (drm_WARN_ON_ONCE(dev, !format))
return -EINVAL; /* BUG: We didn't set format in primary check(). */
/*
* The gamma LUT has to be reloaded after changing the primary
* plane's color format.
*/
if (old_ast_crtc_state->format != format)
crtc_state->color_mgmt_changed = true;
if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
if (crtc_state->gamma_lut->length !=
AST_LUT_SIZE * sizeof(struct drm_color_lut)) {
drm_err(dev, "Wrong size for gamma_lut %zu\n",
crtc_state->gamma_lut->length);
return -EINVAL;
}
}
succ = ast_get_vbios_mode_info(format, &crtc_state->mode,
&crtc_state->adjusted_mode,
&ast_state->vbios_mode_info);
if (!succ)
return -EINVAL;
return 0;
}
static void
ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
/*
* The gamma LUT has to be reloaded after changing the primary
* plane's color format.
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (crtc_state->gamma_lut)
ast_crtc_set_gamma(ast,
ast_crtc_state->format,
crtc_state->gamma_lut->data);
else
ast_crtc_set_gamma_linear(ast, ast_crtc_state->format);
}
//Set Aspeed Display-Port
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
ast_dp_set_mode(crtc, vbios_mode_info);
}
static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct ast_device *ast = to_ast_device(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info =
&ast_crtc_state->vbios_mode_info;
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
ast_set_std_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_crtthd_reg(ast);
ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct drm_device *dev = crtc->dev;
struct ast_device *ast = to_ast_device(dev);
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
/*
* HW cursors require the underlying primary plane and CRTC to
* display a valid mode and image. This is not the case during
* full modeset operations. So we temporarily disable any active
* plane, including the HW cursor. Each plane's atomic_update()
* helper will re-enable it if necessary.
*
* We only do this during *full* modesets. It does not affect
* simple pageflips on the planes.
*/
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
/*
* Ensure that no scanout takes place before reprogramming mode
* and format registers.
*/
ast_wait_for_vretrace(ast);
}
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.mode_valid = ast_crtc_helper_mode_valid,
.atomic_check = ast_crtc_helper_atomic_check,
.atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
};
static void ast_crtc_reset(struct drm_crtc *crtc)
{
struct ast_crtc_state *ast_state =
kzalloc(sizeof(*ast_state), GFP_KERNEL);
if (crtc->state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
if (ast_state)
__drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
else
__drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *
ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
struct ast_crtc_state *new_ast_state, *ast_state;
struct drm_device *dev = crtc->dev;
if (drm_WARN_ON(dev, !crtc->state))
return NULL;
new_ast_state = kmalloc(sizeof(*new_ast_state), GFP_KERNEL);
if (!new_ast_state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &new_ast_state->base);
ast_state = to_ast_crtc_state(crtc->state);
new_ast_state->format = ast_state->format;
memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info,
sizeof(new_ast_state->vbios_mode_info));
return &new_ast_state->base;
}
static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct ast_crtc_state *ast_state = to_ast_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(&ast_state->base);
kfree(ast_state);
}
static const struct drm_crtc_funcs ast_crtc_funcs = {
.reset = ast_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = ast_crtc_atomic_duplicate_state,
.atomic_destroy_state = ast_crtc_atomic_destroy_state,
};
static int ast_crtc_init(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
struct drm_crtc *crtc = &ast->crtc;
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane.base,
&ast->cursor_plane.base, &ast_crtc_funcs,
NULL);
if (ret)
return ret;
drm_mode_crtc_set_gamma_size(crtc, AST_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, AST_LUT_SIZE);
drm_crtc_helper_add(crtc, &ast_crtc_helper_funcs);
return 0;
}
/*
* VGA Connector
*/
static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
{
struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
if (!ast_vga_connector->i2c)
goto err_drm_connector_update_edid_property;
/*
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
mutex_lock(&ast->ioregs_lock);
edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
if (!edid)
goto err_mutex_unlock;
mutex_unlock(&ast->ioregs_lock);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
return count;
err_mutex_unlock:
mutex_unlock(&ast->ioregs_lock);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
}
static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
.get_modes = ast_vga_connector_helper_get_modes,
};
static const struct drm_connector_funcs ast_vga_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int ast_vga_connector_init(struct drm_device *dev,
struct ast_vga_connector *ast_vga_connector)
{
struct drm_connector *connector = &ast_vga_connector->base;
int ret;
ast_vga_connector->i2c = ast_i2c_create(dev);
if (!ast_vga_connector->i2c)
drm_err(dev, "failed to add ddc bus for connector\n");
if (ast_vga_connector->i2c)
ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
&ast_vga_connector->i2c->adapter);
else
ret = drm_connector_init(dev, connector, &ast_vga_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
if (ret)
return ret;
drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
return 0;
}
static int ast_vga_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.vga.encoder;
struct ast_vga_connector *ast_vga_connector = &ast->output.vga.vga_connector;
struct drm_connector *connector = &ast_vga_connector->base;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = ast_vga_connector_init(dev, ast_vga_connector);
if (ret)
return ret;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
return 0;
}
/*
* SIL164 Connector
*/
static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector)
{
struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
if (!ast_sil164_connector->i2c)
goto err_drm_connector_update_edid_property;
/*
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
mutex_lock(&ast->ioregs_lock);
edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
if (!edid)
goto err_mutex_unlock;
mutex_unlock(&ast->ioregs_lock);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
return count;
err_mutex_unlock:
mutex_unlock(&ast->ioregs_lock);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
}
static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
.get_modes = ast_sil164_connector_helper_get_modes,
};
static const struct drm_connector_funcs ast_sil164_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int ast_sil164_connector_init(struct drm_device *dev,
struct ast_sil164_connector *ast_sil164_connector)
{
struct drm_connector *connector = &ast_sil164_connector->base;
int ret;
ast_sil164_connector->i2c = ast_i2c_create(dev);
if (!ast_sil164_connector->i2c)
drm_err(dev, "failed to add ddc bus for connector\n");
if (ast_sil164_connector->i2c)
ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
DRM_MODE_CONNECTOR_DVII,
&ast_sil164_connector->i2c->adapter);
else
ret = drm_connector_init(dev, connector, &ast_sil164_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
if (ret)
return ret;
drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
return 0;
}
static int ast_sil164_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.sil164.encoder;
struct ast_sil164_connector *ast_sil164_connector = &ast->output.sil164.sil164_connector;
struct drm_connector *connector = &ast_sil164_connector->base;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = ast_sil164_connector_init(dev, ast_sil164_connector);
if (ret)
return ret;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
return 0;
}
/*
* DP501 Connector
*/
static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector)
{
void *edid;
bool succ;
int count;
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (!edid)
goto err_drm_connector_update_edid_property;
succ = ast_dp501_read_edid(connector->dev, edid);
if (!succ)
goto err_kfree;
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
return count;
err_kfree:
kfree(edid);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
}
static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
struct ast_device *ast = to_ast_device(connector->dev);
if (ast_dp501_is_connected(ast))
return connector_status_connected;
return connector_status_disconnected;
}
static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
.get_modes = ast_dp501_connector_helper_get_modes,
.detect_ctx = ast_dp501_connector_helper_detect_ctx,
};
static const struct drm_connector_funcs ast_dp501_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector)
{
int ret;
ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ret;
drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
static int ast_dp501_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.dp501.encoder;
struct drm_connector *connector = &ast->output.dp501.connector;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = ast_dp501_connector_init(dev, connector);
if (ret)
return ret;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
return 0;
}
/*
* ASPEED Display-Port Connector
*/
static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
{
void *edid;
struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(dev);
int succ;
int count;
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (!edid)
goto err_drm_connector_update_edid_property;
/*
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
mutex_lock(&ast->ioregs_lock);
succ = ast_astdp_read_edid(connector->dev, edid);
if (succ < 0)
goto err_mutex_unlock;
mutex_unlock(&ast->ioregs_lock);
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
return count;
err_mutex_unlock:
mutex_unlock(&ast->ioregs_lock);
kfree(edid);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
}
static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
struct ast_device *ast = to_ast_device(connector->dev);
if (ast_astdp_is_connected(ast))
return connector_status_connected;
return connector_status_disconnected;
}
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
.get_modes = ast_astdp_connector_helper_get_modes,
.detect_ctx = ast_astdp_connector_helper_detect_ctx,
};
static const struct drm_connector_funcs ast_astdp_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector)
{
int ret;
ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ret;
drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
static int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.astdp.encoder;
struct drm_connector *connector = &ast->output.astdp.connector;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = ast_astdp_connector_init(dev, connector);
if (ret)
return ret;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
return 0;
}
/*
* BMC virtual Connector
*/
static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
{
return drm_add_modes_noedid(connector, 4096, 4096);
}
static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
.get_modes = ast_bmc_connector_helper_get_modes,
};
static const struct drm_connector_funcs ast_bmc_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int ast_bmc_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.bmc.encoder;
struct drm_connector *connector = &ast->output.bmc.connector;
int ret;
ret = drm_encoder_init(dev, encoder,
&ast_bmc_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, "ast_bmc");
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret)
return ret;
drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
return 0;
}
/*
* Mode config
*/
static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
{
struct ast_device *ast = to_ast_device(state->dev);
/*
* Concurrent operations could possibly trigger a call to
* drm_connector_helper_funcs.get_modes by trying to read the
* display modes. Protect access to I/O registers by acquiring
* the I/O-register lock. Released in atomic_flush().
*/
mutex_lock(&ast->ioregs_lock);
drm_atomic_helper_commit_tail_rpm(state);
mutex_unlock(&ast->ioregs_lock);
}
static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
.atomic_commit_tail = ast_mode_config_helper_atomic_commit_tail,
};
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
struct ast_device *ast = to_ast_device(dev);
unsigned long fbsize, fbpages, max_fbpages;
max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
if (fbpages > max_fbpages)
return MODE_MEM;
return MODE_OK;
}
static const struct drm_mode_config_funcs ast_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.mode_valid = ast_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int ast_mode_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
int ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
dev->mode_config.funcs = &ast_mode_config_funcs;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
if (ast->chip == AST2100 || // GEN2, but not AST1100 (?)
ast->chip == AST2200 || // GEN3, but not AST2150 (?)
IS_AST_GEN7(ast) ||
IS_AST_GEN6(ast) ||
IS_AST_GEN5(ast) ||
IS_AST_GEN4(ast)) {
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 2048;
} else {
dev->mode_config.max_width = 1600;
dev->mode_config.max_height = 1200;
}
dev->mode_config.helper_private = &ast_mode_config_helper_funcs;
ret = ast_primary_plane_init(ast);
if (ret)
return ret;
ret = ast_cursor_plane_init(ast);
if (ret)
return ret;
ast_crtc_init(dev);
if (ast->tx_chip_types & AST_TX_NONE_BIT) {
ret = ast_vga_output_init(ast);
if (ret)
return ret;
}
if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast);
if (ret)
return ret;
}
if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast);
if (ret)
return ret;
}
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ret = ast_astdp_output_init(ast);
if (ret)
return ret;
}
ret = ast_bmc_output_init(ast);
if (ret)
return ret;
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
return 0;
}
| linux-master | drivers/gpu/drm/ast/ast_mode.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.